diff --git a/.codecov.yml b/.codecov.yml new file mode 100644 index 0000000..e5945be --- /dev/null +++ b/.codecov.yml @@ -0,0 +1,45 @@ +codecov: + notify: + require_ci_to_pass: yes + +coverage: + precision: 4 + round: down + range: "65...90" + + status: + project: + default: + threshold: 0.2 #Allow the coverage to drop by threshold%, and posting a success status. + patch: + default: + target: 0% # trial operation + changes: no + +parsers: + gcov: + branch_detection: + conditional: yes + loop: yes + method: no + macro: no + +comment: + layout: "header, diff" + behavior: default + require_changes: no + +ignore: + - "LICENSES" + - "*_test.go" + - ".git" + - "*.yml" + - "*.md" + - "cmd/.*" + - "docs/.*" + - "vendor/.*" + - "ddl/failtest/.*" + - "ddl/testutil/.*" + - "executor/seqtest/.*" + - "expression/generator/.*" + diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..e69de29 diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000..43c6a00 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,10 @@ +[*] +end_of_line = lf +insert_final_newline = true +charset = utf-8 + +# tab_size = 4 spaces +[*.go] +indent_style = tab +indent_size = 4 +trim_trailing_whitespace = true diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..1ad886c --- /dev/null +++ b/.gitattributes @@ -0,0 +1,5 @@ +# Set the default behavior, in case people don't have core.autocrlf set. +* text=auto + +# Declare files that will always have LF line endings on checkout. +*.y text eol=lf diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml new file mode 100644 index 0000000..a9fcc78 --- /dev/null +++ b/.github/workflows/go.yml @@ -0,0 +1,28 @@ +name: Go +on: [push] +jobs: + + build: + name: Build & Test + runs-on: ubuntu-latest + steps: + + - name: Set up Go 1.13 + uses: actions/setup-go@v1 + with: + go-version: 1.13 + id: go + + - name: Check out code into the Go module directory + uses: actions/checkout@v1 + + - name: Get dependencies + run: | + go get -v -t -d ./... + if [ -f Gopkg.toml ]; then + curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh + dep ensure + fi + + - name: Build & Test + run: make dev diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..f9bf740 --- /dev/null +++ b/.gitignore @@ -0,0 +1,20 @@ +bin +/tidb-server/tidb-server +/tidb-server/debug +coverage.out +.idea/ +*.iml +*.swp +*.log +tags +profile.coverprofile +explain_test +cmd/explaintest/explain-test.out +cmd/explaintest/explaintest_tidb-server +*.fail.go +tools/bin/ +vendor +/_tools/ +.DS_Store +.vscode +parser/bin diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..9acedf8 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,6 @@ +language: go + +go: + - "1.13" +script: + - make dev diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..80faad0 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,3 @@ +# Contributor Covenant Code of Conduct + +See the [Contributor Covenant Code of Conduct](https://github.com/pingcap/community/blob/master/CODE_OF_CONDUCT.md) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..d3d19f9 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,4 @@ +# Contribution Guide + +See the [Contribution Guide](https://github.com/pingcap/community/blob/master/CONTRIBUTING.md) in the +[community](https://github.com/pingcap/community) repo. diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md new file mode 100644 index 0000000..72df943 --- /dev/null +++ b/CONTRIBUTORS.md @@ -0,0 +1 @@ +Contributor list is moved to [Contributors](https://github.com/pingcap/community/blob/master/architecture/contributor-list.md#tidb-contributors) diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..20a183e --- /dev/null +++ b/Dockerfile @@ -0,0 +1,37 @@ +# Builder image +FROM golang:1.13-alpine as builder + +RUN apk add --no-cache \ + wget \ + make \ + git \ + gcc \ + musl-dev + +RUN wget -O /usr/local/bin/dumb-init https://github.com/Yelp/dumb-init/releases/download/v1.2.2/dumb-init_1.2.2_amd64 \ + && chmod +x /usr/local/bin/dumb-init + +RUN mkdir -p /go/src/github.com/pingcap/tidb +WORKDIR /go/src/github.com/pingcap/tidb + +# Cache dependencies +COPY go.mod . +COPY go.sum . + +RUN GO111MODULE=on go mod download + +# Build real binaries +COPY . . +RUN make + +# Executable image +FROM alpine + +COPY --from=builder /go/src/github.com/pingcap/tidb/bin/tidb-server /tidb-server +COPY --from=builder /usr/local/bin/dumb-init /usr/local/bin/dumb-init + +WORKDIR / + +EXPOSE 4000 + +ENTRYPOINT ["/usr/local/bin/dumb-init", "/tidb-server"] diff --git a/Jenkinsfile b/Jenkinsfile new file mode 100644 index 0000000..26782c5 --- /dev/null +++ b/Jenkinsfile @@ -0,0 +1,11 @@ +#!groovy + +node { + def TIDB_TEST_BRANCH = "master" + def TIKV_BRANCH = "master" + def PD_BRANCH = "master" + + fileLoader.withGit('git@github.com:pingcap/SRE.git', 'master', 'github-iamxy-ssh', '') { + fileLoader.load('jenkins/ci/pingcap_tidb_branch.groovy').call(TIDB_TEST_BRANCH, TIKV_BRANCH, PD_BRANCH) + } +} diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..b67d909 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/LICENSES/QL-LICENSE b/LICENSES/QL-LICENSE new file mode 100644 index 0000000..c11db18 --- /dev/null +++ b/LICENSES/QL-LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2014 The ql Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the names of the authors nor the names of the +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/LICENSES/STRUTIL-LICENSE b/LICENSES/STRUTIL-LICENSE new file mode 100644 index 0000000..28713a4 --- /dev/null +++ b/LICENSES/STRUTIL-LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2014 The strutil Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the names of the authors nor the names of the +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..20d830b --- /dev/null +++ b/Makefile @@ -0,0 +1,268 @@ +PROJECT=tidb +GOPATH ?= $(shell go env GOPATH) +P=8 + +# Ensure GOPATH is set before running build process. +ifeq "$(GOPATH)" "" + $(error Please set the environment variable GOPATH before running `make`) +endif +FAIL_ON_STDOUT := awk '{ print } END { if (NR > 0) { exit 1 } }' + +CURDIR := $(shell pwd) +path_to_add := $(addsuffix /bin,$(subst :,/bin:,$(GOPATH))):$(PWD)/tools/bin +export PATH := $(path_to_add):$(PATH) + +GO := GO111MODULE=on go +GOBUILD := $(GO) build $(BUILD_FLAG) -tags codes +GOBUILDCOVERAGE := GOPATH=$(GOPATH) cd tidb-server; $(GO) test -coverpkg="../..." -c . +GOTEST := $(GO) test -p $(P) +OVERALLS := GO111MODULE=on overalls + +ARCH := "`uname -s`" +LINUX := "Linux" +MAC := "Darwin" +PACKAGE_LIST := go list ./...| grep -vE "cmd" +PACKAGES := $$($(PACKAGE_LIST)) +PACKAGE_DIRECTORIES := $(PACKAGE_LIST) | sed 's|github.com/pingcap/$(PROJECT)/||' +FILES := $$(find $$($(PACKAGE_DIRECTORIES)) -name "*.go") + +FAILPOINT_ENABLE := $$(find $$PWD/ -type d | grep -vE "(\.git|tools)" | xargs tools/bin/failpoint-ctl enable) +FAILPOINT_DISABLE := $$(find $$PWD/ -type d | grep -vE "(\.git|tools)" | xargs tools/bin/failpoint-ctl disable) + +LDFLAGS += -X "github.com/pingcap/parser/mysql.TiDBReleaseVersion=$(shell git describe --tags --dirty --always)" + +TEST_LDFLAGS = -X "github.com/pingcap/tidb/config.checkBeforeDropLDFlag=1" +COVERAGE_SERVER_LDFLAGS = -X "github.com/pingcap/tidb/tidb-server.isCoverageServer=1" + +CHECK_LDFLAGS += $(LDFLAGS) ${TEST_LDFLAGS} + +TARGET = "" + +# VB = Vector Benchmark +VB_FILE = +VB_FUNC = + + +.PHONY: all build update clean todo test gotest interpreter server dev check checklist parser tidy + +default: server buildsucc + +server-admin-check: server_check buildsucc + +buildsucc: + @echo Build TiDB Server successfully! + +all: dev server benchkv + +parser: + cd parser && make all + +dev: checklist parser check test + +build: parser + $(GOBUILD) + +# Install the check tools. +check-setup:tools/bin/revive tools/bin/goword tools/bin/gometalinter tools/bin/gosec + +check: parser fmt errcheck lint tidy testSuite check-static vet + +# These need to be fixed before they can be ran regularly +check-fail: goword check-slow + +fmt: + @echo "gofmt (simplify)" + @gofmt -s -l -w $(FILES) 2>&1 | $(FAIL_ON_STDOUT) + +goword:tools/bin/goword + tools/bin/goword $(FILES) 2>&1 | $(FAIL_ON_STDOUT) + +gosec:tools/bin/gosec + tools/bin/gosec $$($(PACKAGE_DIRECTORIES)) + +check-static: tools/bin/golangci-lint + tools/bin/golangci-lint run -v --disable-all --deadline=4m \ + --enable=misspell \ + --enable=ineffassign \ + --enable=unused \ + $$($(PACKAGE_DIRECTORIES)) + +check-slow:tools/bin/gometalinter tools/bin/gosec + tools/bin/gometalinter --disable-all \ + --enable errcheck \ + $$($(PACKAGE_DIRECTORIES)) + +errcheck:tools/bin/errcheck + @echo "errcheck" + @GO111MODULE=on tools/bin/errcheck -exclude ./tools/check/errcheck_excludes.txt -ignoretests -blank $(PACKAGES) + +gogenerate: + @echo "go generate ./..." + ./tools/check/check-gogenerate.sh + +lint:tools/bin/revive + @echo "linting" + @tools/bin/revive -formatter friendly -config tools/check/revive.toml -exclude ./parser/... $(FILES) + +vet: + @echo "vet" + $(GO) vet -all $(PACKAGES) 2>&1 | $(FAIL_ON_STDOUT) + +tidy: + @echo "go mod tidy" + ./tools/check/check-tidy.sh + +testSuite: + @echo "testSuite" + ./tools/check/check_testSuite.sh + +clean: + $(GO) clean -i ./... + rm -rf *.out + rm -rf parser + +# Split tests for CI to run `make test` in parallel. +test: test_part_1 test_part_2 + @>&2 echo "Great, all tests passed." + +test_part_1: checklist + +test_part_2: checkdep gotest gogenerate + +upload-coverage: SHELL:=/bin/bash +upload-coverage: +ifeq ("$(TRAVIS_COVERAGE)", "1") + mv overalls.coverprofile coverage.txt + bash <(curl -s https://codecov.io/bash) +endif + +gotest: failpoint-enable +ifeq ("$(TRAVIS_COVERAGE)", "1") + @echo "Running in TRAVIS_COVERAGE mode." + $(GO) get github.com/go-playground/overalls + @export log_level=error; \ + $(OVERALLS) -project=github.com/pingcap/tidb \ + -covermode=count \ + -ignore='.git,vendor,cmd,docs,LICENSES' \ + -concurrency=4 \ + -- -coverpkg=./... \ + || { $(FAILPOINT_DISABLE); exit 1; } +else + @echo "Running in native mode." + @export log_level=error; export TZ='Asia/Shanghai'; \ + $(GOTEST) -ldflags '$(TEST_LDFLAGS)' -cover $(PACKAGES) -check.timeout 4s || { $(FAILPOINT_DISABLE); exit 1; } +endif + @$(FAILPOINT_DISABLE) + +race: failpoint-enable + @export log_level=debug; \ + $(GOTEST) -timeout 20m -race $(PACKAGES) || { $(FAILPOINT_DISABLE); exit 1; } + @$(FAILPOINT_DISABLE) + +leak: failpoint-enable + @export log_level=debug; \ + $(GOTEST) -tags leak $(PACKAGES) || { $(FAILPOINT_DISABLE); exit 1; } + @$(FAILPOINT_DISABLE) + +tikv_integration_test: failpoint-enable + $(GOTEST) ./store/tikv/. -with-tikv=true || { $(FAILPOINT_DISABLE); exit 1; } + @$(FAILPOINT_DISABLE) + +RACE_FLAG = +ifeq ("$(WITH_RACE)", "1") + RACE_FLAG = -race + GOBUILD = GOPATH=$(GOPATH) $(GO) build +endif + +CHECK_FLAG = +ifeq ("$(WITH_CHECK)", "1") + CHECK_FLAG = $(TEST_LDFLAGS) +endif + +server: parser +ifeq ($(TARGET), "") + CGO_ENABLED=1 $(GOBUILD) $(RACE_FLAG) -ldflags '$(LDFLAGS) $(CHECK_FLAG)' -o bin/tidb-server tidb-server/main.go +else + CGO_ENABLED=1 $(GOBUILD) $(RACE_FLAG) -ldflags '$(LDFLAGS) $(CHECK_FLAG)' -o '$(TARGET)' tidb-server/main.go +endif + +server_check: +ifeq ($(TARGET), "") + $(GOBUILD) $(RACE_FLAG) -ldflags '$(CHECK_LDFLAGS)' -o bin/tidb-server tidb-server/main.go +else + $(GOBUILD) $(RACE_FLAG) -ldflags '$(CHECK_LDFLAGS)' -o '$(TARGET)' tidb-server/main.go +endif + +linux: +ifeq ($(TARGET), "") + GOOS=linux $(GOBUILD) $(RACE_FLAG) -ldflags '$(LDFLAGS) $(CHECK_FLAG)' -o bin/tidb-server-linux tidb-server/main.go +else + GOOS=linux $(GOBUILD) $(RACE_FLAG) -ldflags '$(LDFLAGS) $(CHECK_FLAG)' -o '$(TARGET)' tidb-server/main.go +endif + +server_coverage: +ifeq ($(TARGET), "") + $(GOBUILDCOVERAGE) $(RACE_FLAG) -ldflags '$(LDFLAGS) $(COVERAGE_SERVER_LDFLAGS) $(CHECK_FLAG)' -o ../bin/tidb-server-coverage +else + $(GOBUILDCOVERAGE) $(RACE_FLAG) -ldflags '$(LDFLAGS) $(COVERAGE_SERVER_LDFLAGS) $(CHECK_FLAG)' -o '$(TARGET)' +endif + +checklist: + cat checklist.md + +failpoint-enable: tools/bin/failpoint-ctl +# Converting gofail failpoints... + @$(FAILPOINT_ENABLE) + +failpoint-disable: tools/bin/failpoint-ctl +# Restoring gofail failpoints... + @$(FAILPOINT_DISABLE) + +checkdep: + $(GO) list -f '{{ join .Imports "\n" }}' github.com/pingcap/tidb/store/tikv | grep ^github.com/pingcap/parser$$ || exit 0; exit 1 + +tools/bin/megacheck: tools/check/go.mod + cd tools/check; \ + $(GO) build -o ../bin/megacheck honnef.co/go/tools/cmd/megacheck + +tools/bin/revive: tools/check/go.mod + cd tools/check; \ + $(GO) build -o ../bin/revive github.com/mgechev/revive + +tools/bin/goword: tools/check/go.mod + cd tools/check; \ + $(GO) build -o ../bin/goword github.com/chzchzchz/goword + +tools/bin/gometalinter: tools/check/go.mod + cd tools/check; \ + $(GO) build -o ../bin/gometalinter gopkg.in/alecthomas/gometalinter.v3 + +tools/bin/gosec: tools/check/go.mod + cd tools/check; \ + $(GO) build -o ../bin/gosec github.com/securego/gosec/cmd/gosec + +tools/bin/errcheck: tools/check/go.mod + cd tools/check; \ + $(GO) build -o ../bin/errcheck github.com/kisielk/errcheck + +tools/bin/failpoint-ctl: go.mod + $(GO) build -o $@ github.com/pingcap/failpoint/failpoint-ctl + +tools/bin/misspell:tools/check/go.mod + $(GO) get -u github.com/client9/misspell/cmd/misspell + +tools/bin/ineffassign:tools/check/go.mod + cd tools/check; \ + $(GO) build -o ../bin/ineffassign github.com/gordonklaus/ineffassign +tools/bin/golangci-lint: + curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s -- -b ./tools/bin v1.21.0 + +# Usage: +# +# $ make vectorized-bench VB_FILE=Time VB_FUNC=builtinCurrentDateSig +vectorized-bench: + cd ./expression && \ + go test -v -benchmem \ + -bench=BenchmarkVectorizedBuiltin$(VB_FILE)Func \ + -run=BenchmarkVectorizedBuiltin$(VB_FILE)Func \ + -args "$(VB_FUNC)" diff --git a/README.md b/README.md new file mode 100644 index 0000000..29bb0e0 --- /dev/null +++ b/README.md @@ -0,0 +1,62 @@ +# TinySQL + +TinySQL is a course designed to teach you how to implement a distributed relational database in Go. TinySQL is also the name of the simplifed version of [TiDB](https://github.com/pingcap/tidb). + +## Prerequisites + +Experience with Go is required. If not, it is recommended to learn [A Tour of Go](https://tour.golang.org/) first. + +## Course Overview + +The detailed information you can get in the directory `courses`. + +There's a [material list](./courses/material.md). It lists plenty of materials for you to study how a database system works. We picks some of the topics in it and prepare homework for you to have a better understand about it. + +This course will take you from idea to implementation, with the essential topics of distributed relational database covered. + +The course is organized into three parts: + +1. Gives a simple interpretation of SQL and relational algebra in preparation for the following course. + +2. Explains the life of a read-only SQL, which includes parsing, execution, and the optimization of SQL plans. + +3. Focuses on SQLs (including DML and DDL) that change the state of the database: how they get implemented and how to deal with the interaction of them and read-only statements. + +## Other courses in this series + +This course only focuses on the SQL layer of a distributed database system. If you are also interested in KV layer, see [TinyKV](https://github.com/pingcap-incubator/tinykv). + +## Deploy + +Once you finish the project. You can deploy the binary and use MySQL client to connect the server. + +### Build + +``` +make +``` + +### Run & Play + +Use `./bin/tidb-server` to deploy the server and use `mysql -h127.0.0.1 -P4000 -uroot` to connect the server. + +Also you can deploy the cluster together with the tinykv project. + +Use `make` command in tinykv project to build binary `tinyscheduler-server`, `tinykv-server`. + +Then put the binaries into a single dir and run the following commands: + +``` +mkdir -p data +./tinyscheduler-server +./tinykv-server -path=data +./tidb-server --store=tikv --path="127.0.0.1:2379" +``` + +## Contributing + +Contributions are welcomed and greatly appreciated. See [CONTRIBUTING.md](https://github.com/pingcap/community/blob/master/CONTRIBUTING.md) for details on submitting patches and the contribution workflow. + +## License + +TinySQL is under the Apache 2.0 license. See the [LICENSE](./LICENSE) file for details. diff --git a/checklist.md b/checklist.md new file mode 100644 index 0000000..6ba599c --- /dev/null +++ b/checklist.md @@ -0,0 +1,28 @@ +# Following the checklist saves the reviewers' time and gets your PR reviewed faster. + +# Self Review +Have you reviewed every line of your changes by yourself? + +# Test +Have you added enough test cases to cover the new feature or bug fix? +Also, add comments to describe your test cases. + +# Naming +Do function names keep consistent with its behavior? +Is it easy to infer the function's behavior by its name? + +# Comment +Is there any code that confuses the reviewer? +Add comments on them! You'll be asked to do so anyway. +Make sure there is no syntax or spelling error in your comments. +Some online syntax checking tools like Grammarly may be helpful. + +# Refactor +Is there any way to refactor the code to make it more readable? +If the refactoring touches a lot of existing code, send another PR to do it. + +# Single Purpose +Make sure the PR does only one thing and nothing else. + +# Diff Size +Make sure the diff size is no more than 500, split it into small PRs if it is too large. diff --git a/checkout-pr-branch.sh b/checkout-pr-branch.sh new file mode 100644 index 0000000..2f78588 --- /dev/null +++ b/checkout-pr-branch.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +# This script is used to checkout a TiDB PR branch in a forked repo. +if test -z $1; then + echo -e "Usage:\n" + echo -e "\tcheckout-pr-branch.sh [github-username]:[pr-branch]\n" + echo -e "The argument can be copied directly from github PR page." + echo -e "The local branch name would be [github-username]/[pr-branch]." + exit 0; +fi + +username=$(echo $1 | cut -d':' -f1) +branch=$(echo $1 | cut -d':' -f2) +local_branch=$username/$branch +fork="https://github.com/$username/tidb" + +exists=`git show-ref refs/heads/$local_branch` +if [ -n "$exists" ]; then + git checkout $local_branch + git pull $fork $branch:$local_branch +else + git fetch $fork $branch:$local_branch + git checkout $local_branch +fi diff --git a/code_review_guide.md b/code_review_guide.md new file mode 100644 index 0000000..1627243 --- /dev/null +++ b/code_review_guide.md @@ -0,0 +1,66 @@ +# Code Review Guide + +## Things to do before you start reviewing the PR + +* Make sure you are familiar with the packages the PR modifies. + +* Make sure you have enough continuous time to review the PR, use 300 LOC per hour to estimate. + +* Make sure you can follow the updates of the PR in the next few work days. + +* Read the description of the PR, if it's not easy to understand, ask the coder to improve it. + +* For a bug fix PR, if there is no test case, ask the coder to add tests. + +* For a performance PR, if no benchmark result is provided, ask the coder to add a benchmark result. + + +## Things to check during the review process + +* Am I able to understand the purpose of each unit test? + +* Do unit tests actually test that the code is performing the intended functionality? + +* Do unit tests cover all the important code blocks and specially handled errors? + +* Could procedure tests be rewritten to table driven tests? + +* Is the code written following the style guide? + +* Is the same code duplicated more than twice? + +* Do comments exist and describe the intent of the code? + +* Are hacks, workarounds and temporary fixes commented? + +* Does this function do more than the name suggests? + +* Can this function's behavior be inferred by its name? + +* Do tests exist and are they comprehensive? + +* Do unit tests cover all the important code branches? + +* Could the test code be extracted into a table-driven test? + + +## Things to keep in mind when you are writing a review comment + +* Be kind to the coder, not to the code. + +* Ask questions rather than make statements. + +* Treat people who know less than you with respect, deference, and patience. + +* Remember to praise when the code quality exceeds your expectation. + +* It isn't necessarily wrong if the coder's solution is different than yours. + +* Refer to the code style document when necessary. + + +## Things to remember after you submitted the review comment + +* Checkout Github notification regularly to keep track of the updates of the PR. + +* When the PR has been updated, start another round of review or give it a LGTM. diff --git a/config/config.go b/config/config.go new file mode 100644 index 0000000..acbe87f --- /dev/null +++ b/config/config.go @@ -0,0 +1,155 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "fmt" + "strings" + + "github.com/BurntSushi/toml" + zaplog "github.com/pingcap/log" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/atomic" +) + +// Config number limitations +const ( + // DefTxnTotalSizeLimit is the default value of TxnTxnTotalSizeLimit. + DefTxnTotalSizeLimit = 1024 * 1024 * 1024 +) + +// Valid config maps +var ( + ValidStorage = map[string]bool{ + "mocktikv": true, + "tikv": true, + } + + CheckTableBeforeDrop = false + // checkBeforeDropLDFlag is a go build flag. + checkBeforeDropLDFlag = "None" +) + +// Config contains configuration options. +type Config struct { + Host string `toml:"host" json:"host"` + AdvertiseAddress string `toml:"advertise-address" json:"advertise-address"` + Port uint `toml:"port" json:"port"` + Cors string `toml:"cors" json:"cors"` + Store string `toml:"store" json:"store"` + Path string `toml:"path" json:"path"` + Lease string `toml:"lease" json:"lease"` + Log Log `toml:"log" json:"log"` + Status Status `toml:"status" json:"status"` +} + +// Log is the log section of config. +type Log struct { + // Log level. + Level string `toml:"level" json:"level"` + // File log config. + File logutil.FileLogConfig `toml:"file" json:"file"` +} + +// The ErrConfigValidationFailed error is used so that external callers can do a type assertion +// to defer handling of this specific error when someone does not want strict type checking. +// This is needed only because logging hasn't been set up at the time we parse the config file. +// This should all be ripped out once strict config checking is made the default behavior. +type ErrConfigValidationFailed struct { + confFile string + UndecodedItems []string +} + +func (e *ErrConfigValidationFailed) Error() string { + return fmt.Sprintf("config file %s contained unknown configuration options: %s", e.confFile, strings.Join(e.UndecodedItems, ", ")) +} + +// Status is the status section of the config. +type Status struct { + StatusHost string `toml:"status-host" json:"status-host"` + + StatusPort uint `toml:"status-port" json:"status-port"` + + ReportStatus bool `toml:"report-status" json:"report-status"` +} + +var defaultConf = Config{ + Host: "0.0.0.0", + AdvertiseAddress: "", + Port: 4000, + Cors: "", + Store: "mocktikv", + Path: "/tmp/tinysql", + Lease: "45s", + Log: Log{ + Level: "info", + File: logutil.NewFileLogConfig(logutil.DefaultLogMaxSize), + }, + Status: Status{ + ReportStatus: true, + StatusHost: "0.0.0.0", + StatusPort: 10080, + }, +} + +var ( + globalConf = atomic.Value{} +) + +// NewConfig creates a new config instance with default value. +func NewConfig() *Config { + conf := defaultConf + return &conf +} + +// GetGlobalConfig returns the global configuration for this server. +// It should store configuration from command line and configuration file. +// Other parts of the system can read the global configuration use this function. +func GetGlobalConfig() *Config { + return globalConf.Load().(*Config) +} + +// StoreGlobalConfig stores a new config to the globalConf. It mostly uses in the test to avoid some data races. +func StoreGlobalConfig(config *Config) { + globalConf.Store(config) +} + +// Load loads config options from a toml file. +func (c *Config) Load(confFile string) error { + metaData, err := toml.DecodeFile(confFile, c) + // If any items in confFile file are not mapped into the Config struct, issue + // an error and stop the server from starting. + undecoded := metaData.Undecoded() + if len(undecoded) > 0 && err == nil { + var undecodedItems []string + for _, item := range undecoded { + undecodedItems = append(undecodedItems, item.String()) + } + err = &ErrConfigValidationFailed{confFile, undecodedItems} + } + + return err +} + +// ToLogConfig converts *Log to *logutil.LogConfig. +func (l *Log) ToLogConfig() *logutil.LogConfig { + return logutil.NewLogConfig(l.Level, "test", l.File, false, func(config *zaplog.Config) { config.DisableErrorVerbose = false }) +} + +func init() { + globalConf.Store(&defaultConf) + if checkBeforeDropLDFlag == "1" { + CheckTableBeforeDrop = true + } +} diff --git a/config/config.toml.example b/config/config.toml.example new file mode 100644 index 0000000..c6794c2 --- /dev/null +++ b/config/config.toml.example @@ -0,0 +1,43 @@ +# TiDB Configuration. + +# TiDB server host. +host = "0.0.0.0" + +# tidb server advertise IP. +advertise-address = "" + +# TiDB server port. +port = 4000 + +# Registered store name, [tikv, mocktikv] +store = "mocktikv" + +# TiDB storage path. +path = "/tmp/tinysql" + +# Schema lease duration, very dangerous to change only if you know what you do. +lease = "45s" + +[log] +# Log level: debug, info, warn, error, fatal. +level = "info" + +# File logging. +[log.file] +# Log file name. +filename = "" + +# Max log file size in MB (upper limit to 4096MB). +max-size = 300 + +[status] +# If enable status report HTTP service. +report-status = true + +# TiDB status host. +status-host = "0.0.0.0" + +## status-host is the HTTP address for reporting the internal status of a TiDB server, for example: +## API for pprof: http://${status-host}:${status_port}/debug/pprof +# TiDB status port. +status-port = 10080 diff --git a/config/config_test.go b/config/config_test.go new file mode 100644 index 0000000..a64cf86 --- /dev/null +++ b/config/config_test.go @@ -0,0 +1,42 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "path/filepath" + "runtime" + "testing" + + . "github.com/pingcap/check" +) + +var _ = Suite(&testConfigSuite{}) + +type testConfigSuite struct{} + +func TestT(t *testing.T) { + CustomVerboseFlag = true + TestingT(t) +} + +func (s *testConfigSuite) TestConfig(c *C) { + conf := new(Config) + _, localFile, _, _ := runtime.Caller(0) + + configFile := filepath.Join(filepath.Dir(localFile), "config.toml.example") + c.Assert(conf.Load(configFile), IsNil) + + // Make sure the example config is the same as default config. + c.Assert(conf, DeepEquals, GetGlobalConfig()) +} diff --git a/courses/imgs/proj3-part1-1.png b/courses/imgs/proj3-part1-1.png new file mode 100644 index 0000000..f267d03 Binary files /dev/null and b/courses/imgs/proj3-part1-1.png differ diff --git a/courses/imgs/proj3-part2-1.png b/courses/imgs/proj3-part2-1.png new file mode 100644 index 0000000..c88fcf2 Binary files /dev/null and b/courses/imgs/proj3-part2-1.png differ diff --git a/courses/imgs/proj3-part2-2.jpg b/courses/imgs/proj3-part2-2.jpg new file mode 100644 index 0000000..7edd110 Binary files /dev/null and b/courses/imgs/proj3-part2-2.jpg differ diff --git a/courses/imgs/proj3-part2-3.jpg b/courses/imgs/proj3-part2-3.jpg new file mode 100644 index 0000000..5ad466c Binary files /dev/null and b/courses/imgs/proj3-part2-3.jpg differ diff --git a/courses/material.md b/courses/material.md new file mode 100644 index 0000000..3e80e0e --- /dev/null +++ b/courses/material.md @@ -0,0 +1,121 @@ +# 课程材料 + +## Overview + +### 关系代数 + +#### SQL Grammar & Relation Algebra + +##### 课程资料 + +- https://cs186berkeley.net/static/notes/n0-SQLPart1.pdf +- https://cs186berkeley.net/static/notes/n1-SQLPart2.pdf +- https://15445.courses.cs.cmu.edu/fall2019/notes/01-introduction.pdf +- https://15445.courses.cs.cmu.edu/fall2019/notes/02-advancedsql.pdf + +#### Table Codec + +##### 课程资料 +- KV mapping: [关系模型到 Key-Value 模型的映射](https://pingcap.com/blog-cn/tidb-internal-2/#%E5%85%B3%E7%B3%BB%E6%A8%A1%E5%9E%8B%E5%88%B0-key-value-%E6%A8%A1%E5%9E%8B%E7%9A%84%E6%98%A0%E5%B0%84) + +## Parser + +### 课程资料 + +- Lexer & Parser + - http://dinosaur.compilertools.net/ +- `goyacc` + - https://godoc.org/modernc.org/goyacc + - https://pingcap.com/blog-cn/tidb-source-code-reading-5/ + +## Runtime + +### Execution Model: Volcano, Vectorization and Compilation + +#### 课程资料 + +- [Volcano - An Extensible and Parallel Query Evaluation System](https://paperhub.s3.amazonaws.com/dace52a42c07f7f8348b08dc2b186061.pdf) +- [MonetDB/X100: Hyper-Pipelining Query Execution](http://cidrdb.org/cidr2005/papers/P19.pdf) +- [Efficiently Compiling Efficient Query Plans for Modern Hardware](http://www.vldb.org/pvldb/vol4/p539-neumann.pdf) +- [Relaxed Operator Fusion for In-Memory Databases: Making Compilation, Vectorization, and Prefetching Work Together At Last](http://www.vldb.org/pvldb/vol11/p1-menon.pdf) + +### Join && Aggregate + +#### 课程资料 + +- https://pingcap.com/blog-cn/tidb-source-code-reading-9/ +- https://pingcap.com/blog-cn/tidb-source-code-reading-11/ +- https://pingcap.com/blog-cn/tidb-source-code-reading-15/ +- https://pingcap.com/blog-cn/tidb-source-code-reading-22/ +- https://15721.courses.cs.cmu.edu/spring2019/papers/17-hashjoins/schuh-sigmod2016.pdf +- https://15721.courses.cs.cmu.edu/spring2019/papers/18-sortmergejoins/p85-balkesen.pdf + +### Region Cache + +#### 课程资料 + +- https://pingcap.com/blog-cn/tidb-source-code-reading-18/ + +### 实现 region cache + +#### gRPC + +##### 课程资料 + +- https://pingcap.com/blog-cn/tidb-source-code-reading-18/ + +## Optimizer + +### Search Strategy: From System R to Cascades + +#### 课程资料 + +- https://15721.courses.cs.cmu.edu/spring2019/papers/22-optimizer1/chaudhuri-pods1998.pdf +- https://15721.courses.cs.cmu.edu/spring2019/papers/22-optimizer1/graefe-ieee1995.pdf +- https://15721.courses.cs.cmu.edu/spring2019/papers/22-optimizer1/p337-soliman.pdf +- https://pingcap.com/blog-cn/tidb-cascades-planner/ + +### Cost Model & Statistics + +#### 课程资料 + +- https://15721.courses.cs.cmu.edu/spring2019/slides/24-costmodels.pdf +- https://15721.courses.cs.cmu.edu/spring2019/papers/24-costmodels/p204-leis.pdf +- https://pingcap.com/blog-cn/tidb-source-code-reading-12/ + +### Access Path Selection + +#### 课程资料 + +- [Access Path Selection in a Relational Database Management System](https://people.eecs.berkeley.edu/~brewer/cs262/3-selinger79.pdf) + +### Subquery Unnesting + +#### 课程资料 + +- [Unnesting Arbitrary Queries](http://www.btw-2015.de/res/proceedings/Hauptband/Wiss/Neumann-Unnesting_Arbitrary_Querie.pdf) + +### Join Reordering + +#### 课程资料 + +- https://15721.courses.cs.cmu.edu/spring2017/papers/14-optimizer1/p539-moerkotte.pdf + +## Transaction + +### 课程资料 + +- Isolation Levels + - [Isolation (database systems)](https://en.wikipedia.org/wiki/Isolation_(database_systems)) + - [Transactions](https://cs186berkeley.net/static/notes/n10-Transactions.pdf) +- Concurrency Control + - [15-445/645 Database Systems (Fall 2019) - Lecture Notes - 16 Concurrency Control Theory](https://15445.courses.cs.cmu.edu/fall2019/notes/16-concurrencycontrol.pdf) + - [15-445/645 Database Systems (Fall 2019) - Lecture Notes - 19 Multi-Version Concurrency Control](https://15445.courses.cs.cmu.edu/fall2019/notes/19-multiversioning.pdf) + +## Online, Asynchronous Schema Change + +### 课程资料 + +- [TiDB 源码阅读系列文章(十七)DDL 源码解析](https://pingcap.com/blog-cn/tidb-source-code-reading-17/) +- https://static.googleusercontent.com/media/research.google.com/zh-CN//pubs/archive/41376.pdf + diff --git a/courses/proj1-README-zh_CN.md b/courses/proj1-README-zh_CN.md new file mode 100644 index 0000000..b8f5da5 --- /dev/null +++ b/courses/proj1-README-zh_CN.md @@ -0,0 +1,17 @@ +# Project 1: Relational Algebra + +## Overview + +在这一部分,我们将介绍关系型数据库是怎样表示以及处理“数据”这一概念的,在 SQL 语法部分我们会从抽象的角度介绍 SQL,在 Table codec 部分将介绍是如何将数据映射到存储--也就是 TinyKV 上的。 + +## 第一部分 + +[SQL 语法](./proj1-part1-README-zh_CN.md) + +## 第二部分 + +[Table Codec](./proj1-part2-README-zh_CN.md) + +## 评分 + +由于第一部分的作业为其他平台的试题,Project 1 的评分即是第二部分作业的评分。 diff --git a/courses/proj1-part1-README-zh_CN.md b/courses/proj1-part1-README-zh_CN.md new file mode 100644 index 0000000..6e0f9c9 --- /dev/null +++ b/courses/proj1-part1-README-zh_CN.md @@ -0,0 +1,189 @@ +# SQL 语法 + +## 概览 + +在这一部分我们将介绍利用 SQL 在逻辑上表示以及处理数据。你可以在阅读的同时使用 [A Tour of TiDB](https://tour.pingcap.com/) 来动手操作数据库。 + +## SQL 语法 + +对于数据库来说,关键的问题可能就是如何表示数据以及如何处理这些数据了。在关系型数据库中,数据是以“表”的形式存在的,例如在 [A Tour of TiDB](https://tour.pingcap.com/) 中,有一个 person 表: + +|number|name|region|birthday| +|------:|----:|------:|--------:| +|1|tom|north|2019-10-27| +|2|bob|west|2018-10-27| +|3|jay|north|2018-10-25| +|4|jerry|north|2018-10-23| + +该表一共有 4 行,每行有 4 列信息,这样每一行就成为了一个最小的完整信息单元,利用这样的一个或多个表,我们便可以在上面做出各种操作以得出想要的信息。 + +在表上最简单的操作便是直接输出全部行了,例如: + +```sql +TiDB> select * from person; ++--------+-------+--------+------------+ +| number | name | region | birthday | ++--------+-------+--------+------------+ +| 1 | tom | north | 2019-10-27 | +| 2 | bob | west | 2018-10-27 | +| 3 | jay | north | 2018-10-25 | +| 4 | jerry | north | 2018-10-23 | ++--------+-------+--------+------------+ +4 row in set (0.00 sec) +``` + +还可以制定只输出需要的列,例如: + +```sql +TiDB> select number,name from person; ++--------+-------+ +| number | name | ++--------+-------+ +| 1 | tom | +| 2 | bob | +| 3 | jay | +| 4 | jerry | ++--------+-------+ +4 row in set (0.01 sec) +``` + +当然,有的时候可能我们只对满足某些条件的行感兴趣,例如我们可能只关心位于 north 的人: + +```sql +TiDB> select name, birthday from person where region = 'north'; ++-------+------------+ +| name | birthday | ++-------+------------+ +| tom | 2019-10-27 | +| jay | 2018-10-25 | +| jerry | 2018-10-23 | ++-------+------------+ +3 row in set (0.01 sec) +``` + +通过 where 语句以及各种条件的组合,我们可以只得到满足某些信息的行。 + +有些时候,我们还需要一些概括性的数据,例如表里满足某个条件的一共有多少行,这个时候我们需要聚合函数来计概括后的信息: + + +```sql +TiDB> select count(*) from person where region = 'north'; ++----------+ +| count(*) | ++----------+ +| 3 | ++----------+ +1 row in set (0.01 sec) +``` + +常见的聚合有 max,min,sum 和 count 等。上面的语句只是输出了满足 region = ‘north’ 的行数,如果我们同时也想知道其他所有 region 的总人数呢? 此时 `group by`就排上用场了: + + + +```sql +TiDB> select region, count(*) from person group by region; ++--------+----------+ +| region | count(*) | ++--------+----------+ +| north | 3 | +| west | 1 | ++--------+----------+ +2 row in set (0.01 sec) +``` + +当然,对于聚合的结果我们可能还是需要过滤一些行,不过此时前面介绍的 where 语句就不能使用了,因为 where 后面的过滤条件是在 group by 之前生效的,在 group by 之后过滤需要使用 having: + +``` +TiDB> select region, count(*) from person group by region having count(*) > 1; ++--------+----------+ +| region | count(*) | ++--------+----------+ +| north | 3 | ++--------+----------+ +1 row in set (0.02 sec) +``` + +此外,还有一些常见的操作例如 order by,limit 等,这里不再一一介绍。 +除了单表上的操作,往往我们可能需要结合多个表的信息,例如还有另外一张 address 表: + +```sql +TiDB> create table address(number int, address varchar(50)); +Execute success (0.05 sec) +TiDB> insert into address values (1, 'a'),(2, 'b'),(3, 'c'), (4, 'd'); +Execute success (0.02 sec) +``` + +最简单的结合两张表的信息便是将分别取两张表中的任意一行结合起来,这样我们一共会有 4*4 种可能的组合,也就是会得到 16 行: + +```sql +TiDB> select name, address from person inner join address; ++-------+---------+ +| name | address | ++-------+---------+ +| tom | a | +| tom | b | +| tom | c | +| tom | d | +| bob | a | +| bob | b | +| bob | c | +| bob | d | +| jay | a | +| jay | b | +| jay | c | +| jay | d | +| jerry | a | +| jerry | b | +| jerry | c | +| jerry | d | ++-------+---------+ +16 row in set (0.02 sec) +``` + +但这样的信息产生的信息爆炸往往是我们不需要的,幸运的是我们可以指定组合任意行的策略,例如如果想要同时知道某个人的地址以及名字,那我们只需要取两张表中有相同 number 值的人接合在一起,这样只会产生 4 行结果: + +```sql +TiDB> select name, address from person inner join address on person.number = address.number; ++-------+---------+ +| name | address | ++-------+---------+ +| tom | a | +| bob | b | +| jay | c | +| jerry | d | ++-------+---------+ +4 row in set (0.02 sec) +``` + +需要注意的是这里的 join 为 inner join,除此以外还有 outer join,在这里就不再赘述。 + + +## 作业描述 + +完成 hackerrank 的下列题目 + + (10%) +[Revising the Select Query II](https://www.hackerrank.com/challenges/revising-the-select-query-2/problem) (10%) +[Revising Aggregations - The Count Function](https://www.hackerrank.com/challenges/revising-aggregations-the-count-function/problem) (10%) + + (10%) +[Revising Aggregations - Averages](https://www.hackerrank.com/challenges/revising-aggregations-the-average-function/problem) (10%) +[African Cities](https://www.hackerrank.com/challenges/african-cities/problem) (10%) + +```sql +SELECT CITY.NAME FROM CITY, COUNTRY WHERE CITY.CountryCode = COUNTRY.Code AND COUNTRY.CONTINENT = 'Africa'; +``` + +[Average Population of Each Continent](https://www.hackerrank.com/challenges/average-population-of-each-continent/problem) (10%) +[Binary Tree Nodes](https://www.hackerrank.com/challenges/binary-search-tree-1/problem) (30%) + +```mysql +SELECT B.N, IF(B.P IS NULL, 'Root', IF((SELECT COUNT(*) FROM BST AS A WHERE A.P = B.N)>0, 'Inner', 'Leaf')) + FROM BST AS B ORDER BY N; +``` + + + +## 评分 + +该作业目前视为课下作业,暂时不计入评分。 diff --git a/courses/proj1-part2-README-zh_CN.md b/courses/proj1-part2-README-zh_CN.md new file mode 100644 index 0000000..00d48a4 --- /dev/null +++ b/courses/proj1-part2-README-zh_CN.md @@ -0,0 +1,74 @@ +# Table Codec + +## 概览 + +在本章我们将介绍表上的数据如何映射到 TinyKV 上。 + +## Table Codec 简介 + +从上一节介绍的数据处理看,我们需要存储具有怎样的性质呢: + +- 首先,从单表操作的角度看,同一张表的数据应当是存放在一起的,这样我们可以避免在处理某一张表时读取到其他表上的数据 +- 其次,对于同一张表内的数据,我们该如何排列呢?从 SQL 过滤条件的角度来看 + - 如果基本上没有过滤条件,那么无论怎么排列都没关系 + - 如果类似 person = “xx” 这样的等值查询比较多,那么一个类似 hash 表的排列是比较优的,当然一个有序数组也可以 + - 如果类似 number >= “xx” 这样的查询比较多,那么一个类似有序数组的排列是比较优的,因为这样我们可以避免读取较多的无用数据 +- 对于同一行上的数据,我们是分开存还是存在一起比较好呢?再一次,这取决于数据访问的模式,如果同一行上的数据总是需要同时被读取,那么存在一起是更好的,在 TinySQL 中我们选择了将同一行上的数据存放在一起。 + +从上面的角度看,一个类似有序的数组的排列可能是最简单的方式,因为几乎它的可以用一个统一的方式满足所有的要求。接下来我们再看看 TinyKV,从最简单的角度看,我们可以将它看做一个提供了如下性质的 KV 引擎: + +- Key 和 Value 都是 bytes 数组,也就是说无论原先的类型是什么,我们都要序列化后再存入 +- Scan(startKey),任意给定一个 Key,这个接口可以按顺序返回所有大于等于这个 startKey 数据。 +- Set(key, value),将 key 的值设置为 value。 + +结合上面的讨论,数据的存储方式就呼之欲出了: + +- 由于同一张表的需要存放在一起,那么表的唯一标示应该放在 Key 的最前面,这样同一张表的 Key 就是连续的 +- 对于某一张表,将需要排序的列放在表的唯一标示后面,编码在 Key 里 +- Value 中存放某一行上所有其他的列 + +具体来说,我们会对每个表分配一个 TableID,每一行分配一个 RowID(如果表有整数型的 Primary Key,那么会用 Primary Key 的值当做 RowID),其中 TableID 在整个集群内唯一,RowID 在表内唯一,这些 ID 都是 int64 类型。 +每行数据按照如下规则进行编码成 Key-Value pair: + +``` + Key: tablePrefix_tableID_recordPrefixSep_rowID + Value: [col1, col2, col3, col4] +``` + +其中 Key 的 tablePrefix/recordPrefixSep 都是特定的字符串常量,用于在 KV 空间内区分其他数据。 +对于索引,会为每一个索引分配表内唯一的 indexID,然后按照如下规则编码成 Key-Value pair: + +``` + Key: tablePrefix_tableID_indexPrefixSep_indexID_indexColumnsValue + Value: rowID +``` + +当然,我们还需要考虑非唯一索引,这个时候上面的方法就行不通了,我们需要将 rowID 也编码进 Key 里使之成为唯一的: + +``` + Key: tablePrefix_tableID_indexPrefixSep_indexID_ColumnsValue_rowID + Value:null +``` + +思考:如果从 join 的角度考虑,数据应该怎么映射呢? + +## 理解代码 + +tablecodec 的主要代码位于 [tablecodec.go](https://github.com/pingcap-incubator/tinysql/blob/course/tablecodec/tablecodec.go),这次我们需要关注的代码主要从 [L33 到 L147](https://github.com/pingcap-incubator/tinysql/blob/course/tablecodec/tablecodec.go#L33-L146) 之间。 + +代码一开始,定义了上文提到的三个常量:tablePrefix,recordPrefixSep 和 indexPrefixSep。 + +接下来可以看到 [EncodeRowKeyWithHandle](https://github.com/pingcap-incubator/tinysql/blob/course/tablecodec/tablecodec.go#L64) 和 [EncodeIndexSeekKey](https://github.com/pingcap-incubator/tinysql/blob/course/tablecodec/tablecodec.go#L86) 分别实现了上述所说的行数据和索引数据的编码。 + +## 作业描述 + +根据上述 [EncodeRowKeyWithHandle](https://github.com/pingcap-incubator/tinysql/blob/course/tablecodec/tablecodec.go#L64) 和 [EncodeIndexSeekKey](https://github.com/pingcap-incubator/tinysql/blob/course/tablecodec/tablecodec.go#L86),实现 [DecodeRecordKey](https://github.com/pingcap-incubator/tinysql/blob/course/tablecodec/tablecodec.go#L72) 和 [DecodeIndexKeyPrefix](https://github.com/pingcap-incubator/tinysql/blob/course/tablecodec/tablecodec.go#L95),注意由于参数 `key` 可能是不合法的,需要考虑错误处理。 + +## 测试 + +通过 [tablecodec](https://github.com/pingcap-incubator/tinysql/blob/course/tablecodec) 下所有测试。 + +## 评分 + +`TestDecodeIndexKey` 和 `TestRecordKey` 各占 50% 分数。 + diff --git a/courses/proj2-README-zh_CN.md b/courses/proj2-README-zh_CN.md new file mode 100644 index 0000000..031d4ce --- /dev/null +++ b/courses/proj2-README-zh_CN.md @@ -0,0 +1,21 @@ +# Parser + +## 概览 + +在 Parser 部分,我们将介绍 TinySQL 是如何将文本转化为 AST 的。 + +## Parser 简介 + +可以参考 [TiDB 源码阅读系列之 TiDB SQL Parser 的实现](https://pingcap.com/blog-cn/tidb-source-code-reading-5/)。 + +## 作业描述 + +完成 `JoinTable` 的实现,你可以利用 parser test 里失败的测试确定需要补充哪些语法部分。 + +## 测试 + +通过测试 `TestDMLStmt`。 + +## 评分 + +通过 `TestDMLStmt` 即可满分。 diff --git a/courses/proj3-README-zh_CN.md b/courses/proj3-README-zh_CN.md new file mode 100644 index 0000000..3d06059 --- /dev/null +++ b/courses/proj3-README-zh_CN.md @@ -0,0 +1,42 @@ +# DDL + +## 概述 + +DDL(Data Definition Language) 数据模式定义语言,是用来描述数据表实体的语言。简单来说就是数据库中对库/表/列/索引进行创建/删除/变更操作的部分逻辑实现。这个 Project 中会对 TinySQL 的 DDL 模块在概念和代码两部分上进行一些介绍。 + +## 异步 schema 变更 + +TinySQL 中的异步 schema 变更是参照了 Google F1 中的 schema 变更的算法。F1 的原始论文可以在[http://static.googleusercontent.com/media/research.google.com/zh-CN//pubs/archive/41376.pdf] 中找到。同时在[这里](https://github.com/ngaut/builddatabase/blob/master/f1/schema-change.md)有一份中文版的说明。 + +## 理解代码 + +TinySQL 中的代码主要在包目录 `ddl` 中。其中下述文件包含了 `ddl` 的主要功能 + +| File | Introduction | +| :------------- | :------------------------------------------ | +| `ddl.go` | 包含 DDL 接口定义和其实现。 | +| `ddl_api.go` | 提供 create , drop , alter , truncate , rename 等操作的 API,供 Executor 调用。主要功能是封装 DDL 操作的 job 然后存入 DDL job queue,等待 job 执行完成后返回。| +| `ddl_worker.go` | DDL worker 的实现。owner 节点的 worker 从 job queue 中取 job,然后执行,执行完成后将 job 存入 job history queue 中。| +| `syncer.go` | 负责同步 ddl worker 的 owner 和 follower 间的 `schema version`。 每次 DDL 状态变更后 `schema version ID` 都会加 1。| + +`ddl owner` 相关的代码单独放在 `owner` 目录下,实现了 owner 选举等功能。 + +这里我们主要以 `CreateTable` 语句来介绍一下代码中的实现逻辑。 + +`create table` 需要把 table 的元信息([TableInfo](https://github.com/pingcap-incubator/tinysql/blob/course/parser/model/model.go#L180))从 SQL 中解析出来,做一些检查,然后把 table 的元信息持久化保存到 TiKV 中。在 DDL 包中,对外暴露的接口是 [CreateTable](https://github.com/pingcap-incubator/tinysql/blob/course/ddl/ddl_api.go#L846)。这个函数会在执行层中调用。其主要流程如下: + +* 会先 check 一些限制,比如 table name 是否已经存在,table 名是否太长,是否有重复定义的列等等限制。 +* [buildTableInfo](https://github.com/pingcap-incubator/tinysql/blob/course/ddl/ddl_api.go#L712) 获取 global table ID,生成 `tableInfo` , 即 table 的元信息,然后封装成一个 DDL job,这个 job 包含了 `table ID` 和 `tableInfo`,并将这个 job 的 type 标记为 `ActionCreateTable`。 +* [d.doDDLJob(ctx, job)](https://github.com/pingcap-incubator/tinysql/blob/course/ddl/ddl_api.go#L421) 函数中的 [d.addDDLJob(ctx, job)](https://github.com/pingcap-incubator/tinysql/blob/course/ddl/ddl.go#L182) 会先给 job 获取一个 global job ID 然后放到 job queue 中去。 +* DDL 组件启动后,在 [start](https://github.com/pingcap-incubator/tinysql/blob/course/ddl/ddl.go#L285) 函数中会启动一个 `ddl_worker` 协程运行 [start](https://github.com/pingcap-incubator/tinysql/blob/course/ddl/ddl_worker.go#L112) 函数,每隔一段时间调用 [handleDDLJobQueue](https://github.com/pingcap-incubator/tinysql/blob/course/ddl/ddl_worker.go#L300) 函数去尝试处理 DDL job 队列里的 job,`ddl_worker` 会先 check 自己是不是 owner,如果不是 owner,就什么也不做,然后返回;如果是 owner,就调用 [getFirstDDLJob](https://github.com/pingcap-incubator/tinysql/blob/course/ddl/ddl_worker.go#L219) 函数获取 DDL 队列中的第一个 job,然后调 [runDDLJob](https://github.com/pingcap-incubator/tinysql/blob/course/ddl/ddl_worker.go#L427) 函数执行 job。 + * [runDDLJob](https://github.com/pingcap-incubator/tinysql/blob/course/ddl/ddl_worker.go#L452) 函数里面会根据 job 的类型,然后调用对应的执行函数,对于 `create table` 类型的 job,会调用 [onCreateTable](https://github.com/pingcap-incubator/tinysql/blob/course/ddl/table.go#L32) 函数,然后做一些 check 后,会调用 [createTableOrViewWithCheck](https://github.com/pingcap-incubator/tinysql/blob/course/ddl/table.go#L66) 函数,将 `db_ID` 和 `table_ID` 映射为 `key`,`tableInfo` 作为 value 存到 TiKV 里面去,并更新 job 的状态。 +* [finishDDLJob](https://github.com/pingcap-incubator/tinysql/blob/course/ddl/ddl_worker.go#L257) 函数将 job 从 DDL job 队列中移除,然后加入 history ddl job 队列中去。 +* [doDDLJob](https://github.com/pingcap-incubator/tinysql/blob/course/ddl/ddl.go#L449) 函数中检测到 history DDL job 队列中有对应的 job 后,返回。 + +## 作业 + +我们这里实现一下比较简单的 `Drop Column` 涉及的[一些代码](https://github.com/pingcap-incubator/tinysql/blob/course/ddl/column.go#L216)。 + +## 测试 + +通过单元测试 `TestColumnChange` 以及 `TestDropColumn`。 diff --git a/courses/proj4-README-zh_CN.md b/courses/proj4-README-zh_CN.md new file mode 100644 index 0000000..ea49a11 --- /dev/null +++ b/courses/proj4-README-zh_CN.md @@ -0,0 +1,13 @@ +# 优化 + +## 概览 + +在优化这一部分,我们将介绍优化器的两种框架,如何使用统计信息进行数据分布的估算和索引的选择,以及其他一些经典的问题。 + +## 第一部分 搜索框架 System R 和 Cascades + +[第一部分文档](./proj4-part1-README-zh_CN.md) + +## 第二部分 代价估算以及统计信息以及 Join Reorder + +[第二部分文档](./proj4-part2-README-zh_CN.md) diff --git a/courses/proj4-part1-README-zh_CN.md b/courses/proj4-part1-README-zh_CN.md new file mode 100644 index 0000000..4262a31 --- /dev/null +++ b/courses/proj4-part1-README-zh_CN.md @@ -0,0 +1,68 @@ +# 搜索框架 System R 和 Cascades + +## 概览 + +在这一小节,我们将详细描述 System R 和 Cascades 两种优化器框架的设计理念以及 TinySQL 中对应的代码实现。 + +## TiDB 中基于 System R 框架的优化器 + +System R 两阶段模型可以说是现代数据库基于代价优化(Cost based optimize)的优化器的鼻祖。TiDB 目前生产环境中仍然采用的这一框架。 + +[DoOptimize](https://github.com/pingcap-incubator/tinysql/blob/master/planner/core/optimizer.go#L76) 是优化器的入口。这里我们会传入原始的 Plan tree。然后在经过逻辑优化和物理优化后,返回一个最终的 Physical plan tree。 + +### 逻辑优化 + +[logicalOpimize](https://github.com/pingcap-incubator/tinysql/blob/master/planner/core/optimizer.go#L95) 是逻辑优化的入口。我们会顺序遍历所有的优化规则,每个优化规则会遍历整个 plan tree,同时对 plan tree 做一些修改,最后返回修改过的 plan tree。 + + +### 物理优化 + +[physicalOptimize](https://github.com/pingcap-incubator/tinysql/blob/master/planner/core/optimizer.go#L112) 是物理优化的入口。这个过程实际上是一个记忆化搜索的过程。 + +其记忆化搜索的过程大致可以用如下伪代码表示: + +``` +// The OrderProp tells whether the output data should be ordered by some column or expression. (e.g. For select * from t order by a, we need to make the data ordered by column a, that is the exactly information that OrderProp should store) +func findBestTask(p LogicalPlan, prop OrderProp) PhysicalPlan { + + if retP, ok := dpTable.Find(p, prop); ok { + return retP + } + + selfPhysicalPlans := p.getPhysicalPlans() + + bestPlanTree := a plan with maximum cost + + for _, pp := range selfPhysicalPlans { + + childProps := pp.GetChildProps(prop) + childPlans := make([]PhysicalPlan, 0, len(p.children)) + for i, cp := range p.children { + childPlans = append(childPlans, findBestTask(cp, childProps[i]) + } + physicalPlanTree, cost := connect(pp, childPlans) + + if physicalPlanTree.cost < bestPlanTree.cost { + bestPlanTree = physicalPlanTree + } + } + return bestPlanTree +} +``` + +实际的执行代码可以在[findBestTask](https://github.com/pingcap-incubator/tinysql/blob/master/planner/core/find_best_task.go#L95) 中查看,其逻辑和上述伪代码基本一致。 + +## Cascades 框架 + +[最初的论文](https://15721.courses.cs.cmu.edu/spring2018/papers/15-optimizer1/graefe-ieee1995.pdf)里对 Cascades 架构的设计理念做了一个比较细致的讲解。 + +[TiDB Cascades Planner 原理解析](https://pingcap.com/blog-cn/tidb-cascades-planner/) 这篇文章对 Cascades 在 TiDB 中的实现做了比较细致的讲解,大家可以通过这篇博客结合 TinySQL 的代码进行学习。TiDB 和 TinySQL 在关键概念释义上是完全一样的。概念名词可以直接在 TinySQL 找到。 + +## 作业 + +完成 `rule_predicate_push_down.go` 中的 TODO 内容。以及 `transformation_rules.go` 中的 TODO 内容。 + +## 评分 + +通过 `transformation_rules_test.go` 中的测试以及 package `core` 下 `TestPredicatePushDown` 的测试 + diff --git a/courses/proj4-part2-README-zh_CN.md b/courses/proj4-part2-README-zh_CN.md new file mode 100644 index 0000000..a4de741 --- /dev/null +++ b/courses/proj4-part2-README-zh_CN.md @@ -0,0 +1,36 @@ +# Join and Access Path Selection + +## 概述 + +在这一部分,我们会需要实现和代价选择相关的一些内容。 + +## 描述数据分布的数据结构 + +当结束启发式规则的筛选之后,我们仍然可能剩余多组索引等待筛选我们就需要知道每个索引究竟会过滤多少行数据。在 TiDB 中我们使用直方图和 Count-Min Sketch 来存储的统计信息,[TiDB 源码阅读系列文章(十二)统计信息(上)](https://pingcap.com/blog-cn/tidb-source-code-reading-12/) 中,我们对直方图和 Count-Min Sketch 的实现原理做了比较详细的介绍。 + +这里我们需要完成 `cmsketch.go` 中的 TODO 内容,并通过 `cmsketch_test.go` 中的测试 + +## Join Reorder + +如果我们不加修改的直接执行用户输入的 Join 的顺序,假设用户输入了 `select * from t1 joni t2 on ... join t3 on ...`,我们会按照先扫描 `t1`,然后和 `t2` 做 Join,然后和 `t3` 做 Join。这个顺序会在很多情况下表现的并不是特别好(一个简单的场景,`t1`, `t2` 特别大,而 `t3` 特别小,那么只要和 `t3` join 时可以过滤部分数据,先 Join `t1` 和 `t2` 势必没有先让 `t3` 和其中一个表 join 在性能上占优势)。 + +你需要实现一个基于 DP 的 Join Reorder 算法,并通过 `rule_join_reorder_dp_test.go` 中的测试。实现的位置在文件 `rule_join_reorder_dp.go` 中。 + +这里我们简单的表述一下这个算法: + +- 使用数字的二进制表示来代表当前参与 Join 的节点情况。11(二进制表示为 1011)表示当前的 Join Group 包含了第 3 号节点,第 1 号节点和第 0 号节点(节点从 0 开始计数)。 +- f[11] 来表示包含了节点 `3, 1, 0` 的最优的 Join Tree。 +- 转移方程则是 `f[group] = min{join{f[sub], f[group ^ sub])}` 这里 `sub` 是 `group` 二进制表示下的任意子集。 + + +## Access Path Selection + +在实际场景中,一个表可能会有多个索引,一个查询中的查询条件也可能涉及到某个表的多个索引。因此就会需要我们去决定选择使用哪个索引。你会需要实现这个过程的某些代码 + +### Skyline pruning + +你可以在 [TiDB proposal](https://github.com/pingcap/tidb/blob/master/docs/design/2019-01-25-skyline-pruning.md) 以及 [Skyline pruning operator](http://skylineresearch.in/skylineintro/The_Skyline_Operator.pdf) 来了解其理论基础。 + +这是一个启发式规则的筛选,用来筛除一些一定会差的选择分支。具体的筛选要求在 TiDB proposal 以及 `TODO` 注释的解释中有更详细的说明。你需要实现并通过 `TestSkylinePruning` 中的所有测试。实现的位置为 `find_best_task.go` 的 TODO 内容。 + + diff --git a/courses/proj5-README-zh_CN.md b/courses/proj5-README-zh_CN.md new file mode 100644 index 0000000..a7568c3 --- /dev/null +++ b/courses/proj5-README-zh_CN.md @@ -0,0 +1,21 @@ +# 执行 +- +## 概览 + +在这一章节我们将介绍执行层最核心的执行模型以及两个比较关键的具体算子的执行算法。 + +## 第一部分 执行模型:火山模型和向量化 + +[第一部分文档](./proj5-part1-README-zh_CN.md) + +## 第二部分 Hash Join + +[第二部分文档](./proj5-part2-README-zh_CN.md) + +## 第三部分 Hash Aggregate + +[第三部分文档](./proj5-part3-README-zh_CN.md) + +## 评分 + +三个子任务的评分细节详见各自的说明文档,其中汇总评分时按照 2:4:4 的比例汇总为 Project 3 的最终评分。 diff --git a/courses/proj5-part1-README-zh_CN.md b/courses/proj5-part1-README-zh_CN.md new file mode 100644 index 0000000..659a5b0 --- /dev/null +++ b/courses/proj5-part1-README-zh_CN.md @@ -0,0 +1,57 @@ +# 执行模型: 火山模型和向量化 +- +## 概览 + +在这一章节我们将介绍执行层最核心的执行模型。 + +## 执行模型介绍 + +在介绍具体模型之前,我们先思考一下为什么需要模型。模型是对现实事物核心特质的简化,可以用来帮助人们理解问题。在前面的 SQL 部分中,可以看到 SQL 的可以表达语义复杂多变,但同时它也具有一定的特性: + +- SQL 是由不同的部分组成的,每个部分有固定的语义 +- 部分与部分之间是有一定关系的,每个部分都是对前一部分结果的进一步处理 + +### 火山模型 +- +让我们先来看一看火山模型。在火山模型中,由不同的执行器组成,每个执行器对应的是 SQL 中的某个部分,例如过滤,聚合等;执行器与执行器之间组成了类似树状的关系,每个算子都实现了三个接口: + +- Open,对当前执行器所需的资源进行初始化 +- Next,从孩子节点(如果存在)取必需的数据,计算并返回一条结果 +- Close,对执行器所需的资源进行释放 + +![Volcano Execution Model](./imgs/proj3-part1-1.png) + +从这里也可以看到,火山模型是符合我们对模型的设想的,每个执行器负责特定的语义,并通过树型结构灵活地组合起来。那么它的缺点是什么呢?如果处理的数据量多,那么每个算子输出的每一行都对应一次 `Next` 调用,框架上的函数调用开销将会非常大。 + +### 向量化 +- +减小函数调用的一个直观想法就是每次 `Next` 返回一批数据,而不是只返回一行。为了支持返回多行的操作,TinySQL 还使用了 `Chunk` 来表示这些行,用于减小内存分配开销、降低内存占用以及实现内存使用量统计/控制。 + +在结果按批返回后,也为计算的向量化带来了可能性,但首先,我们先来了解一下表达式及其计算。 + +## 理解代码 + +### 向量化表达式 + +在 [builtin\_string\_vec.go](https://github.com/pingcap-incubator/tinysql/blob/selection/expression/builtin_string_vec.go) 有三个较为简单的向量化的 string 类型函数,可以结合[向量化的进行计算](https://docs.google.com/document/d/1JKP9YS3wYsuXsYhDgVepJt5y72K6_WxhUVfOLyjpAjc/edit#heading=h.66r4twnr3b1c)阅读。 + +### 火山模型 +- +我们以 Selection 为例来介绍代码。 + +在 [executor.go#L346](https://github.com/pingcap-incubator/tinysql/blob/selection/executor/executor.go#L346) 实现了一个较为简单的执行器 `Selection`,它的作用就是根据 `filters` 过滤掉不需要的行并返回给父亲,可以看到它也实现了常见的 `Open`, `Next` 和 `Close` 接口。可以通过阅读 unBatchedNext 理解一下它的功能。 + +## 作业描述 + +- 实现向量化表达式 [vecEvalInt](https://github.com/pingcap-incubator/tinysql/blob/selection/expression/builtin_string_vec.go#L89),并将 [vectorized](https://github.com/pingcap-incubator/tinysql/blob/selection/expression/builtin_string_vec.go#L84) 的返回值改为 `true` +- 实现向量化 selection 的 [Next](https://github.com/pingcap-incubator/tinysql/blob/selection/executor/executor.go#L380) 函数。 + +## 测试 + +通过通过 `expression` 下所有测试和 `executor` 下面的 `TestJoin` 以及 `TestMergeJoin`。 + +你可以通过 `go test package_path -check.f func_name` 来跑一个具体的函数。以 `TestJoin` 为例,你可以使用 `go test github.com/pingcap/pingcap-incubator/tinysql/executor -check.f TestJoin` 来跑这个具体的函数。同时可以将输出文件重定向至文件中来后续 debug。 + +## 评分 + +`expression` 和 `executor` 各占 50%。 diff --git a/courses/proj5-part2-README-zh_CN.md b/courses/proj5-part2-README-zh_CN.md new file mode 100644 index 0000000..2916943 --- /dev/null +++ b/courses/proj5-part2-README-zh_CN.md @@ -0,0 +1,94 @@ +# Hash Join +- +## 概览 + +在这一小节我们将学习 Hash Join 及其实现,并且从这一小节开始我们将接触并发计算 +。Hash Join 是实现 Join 的一种常见方式,除此之外 TinySQL 还实现了与 Merge Sort 思想较类似的 [Merge Join](https://github.com/pingcap-incubator/tinysql/blob/master/executor/merge_join.go#L24),感兴趣可以自行阅读。 + +## Hash Join 算法简介 + +简单来说,对于两张表的 Hash Join,我们会选择选择一个内表来构造哈希表,然后对外 表的每一行数据都去这个哈希表中查找是否有匹配的数据。那怎样提高 Hash Join 的效率呢?在建立好哈希表后,实际上哈希表就是只读的了,那么查找匹配的过程其实是可以并行起来的,也就是说我们可以用多个线程同时查哈希表: + +![Hash Join 1](./imgs/proj3-part2-1.png) + +这样可以大大提高 Hash Join 的效率。 + +## 理解代码 + +从上图也可以看到,其包含的主要过程如下: + +- Main Thread:一个,执行下列任务: + 1. 读取所有的 Inner 表数据并构造哈希表 + 2. 启动 Outer Fetcher 和 Join Worker 开始后台工作,生成 Join 结果。各个 goroutine 的启动过程由 fetchAndProbeHashTable 这个函数完成; + 3. 将 Join Worker 计算出的 Join 结果返回给 NextChunk 接口的调用方。 +- Outer Fetcher:一个,负责读取 Outer 表的数据并分发给各个 Join Worker; +- Join Worker:多个,负责查哈希表、Join 匹配的 Inner 和 Outer 表的数据,并把结果传递给 Main Thread。 + +接下来我们细致的介绍 Hash Join 的各个阶段。 + +### Main Thread 读内表数据并构造哈希表 + +读 Inner 表数据的过程由 fetchAndBuildHashTable 这个函数完成。这个过程会不断调用 Child 的 NextChunk 接口,把每次函数调用所获取的 Chunk 存储到 hashRowContainer 中供接下来的计算使用。 + +我们这里使用的哈希表本质上是一个链表,将具有相同 Key 哈希值的类似链表的方式连在一起,这样后续查找具有相同 Key 的值时只需要遍历链表即可。 + +### Outer Fetcher + +Outer Fetcher 是一个后台 goroutine,他的主要计算逻辑在 fetchOuterSideChunks 这个函数中。它会不断的读大表的数据,并将获得的 Outer 表的数据分发给各个 Join Worker。这里多线程之间的资源交互可以用下图表示: + +![Hash Join 2](./imgs/proj3-part2-2.jpg) + +上图中涉及到了两个 channel: + +1. outerResultChs[i]:每个 Join Worker 一个,Outer Fetcher 将获取到的 Outer Chunk 写入到这个 channel 中供相应的 Join Worker 使用 +2. outerChkResourceCh:当 Join Worker 用完了当前的 Outer Chunk 后,它需要把这个 Chunk 以及自己对应的 outerResultChs[i] 的地址一起写入到 outerChkResourceCh 这个 channel 中,告诉 Outer Fetcher 两个信息: + 1. 我提供了一个 Chunk 给你,你直接用这个 Chunk 去拉 Outer 数据吧,不用再重新申请内存了; + 2. 我的 Outer Chunk 已经用完了,你需要把拉取到的 Outer 数据直接传给我,不要给别人了; + +所以,整体上 Outer Fetcher 的计算逻辑是: + +1. 从 outerChkResourceCh 中获取一个 outerChkResource,存储在变量 outerResource 中 +2. 从 Child 那拉取数据,将数据写入到 outerResource 的 chk 字段中 +3. 将这个 chk 发给需要 Outer 表的数据的 Join Worker 的 outerResultChs[i] 中去,这个信息记录在了 outerResource 的 dest 字段中 + +### Join Worker + +每个 Join Worker 都是一个后台 goroutine,主要计算逻辑在 runJoinWorker 这个函数中。 + +![Hash Join 3](./imgs/proj3-part2-3.jpg) + +上图中涉及到两个 channel: + +1. joinChkResourceCh[i]:每个 Join Worker 一个,用来存 Join 的结果 +2. joinResultCh:Join Worker 将 Join 的结果 Chunk 以及它的 joinChkResourceCh 地址写入到这个 channel 中,告诉 Main Thread 两件事: + 1. 我计算出了一个 Join 的结果 Chunk 给你,你读到这个数据后可以直接返回给你 Next 函数的调用方 + 2. 你用完这个 Chunk 后赶紧还给我,不要给别人,我好继续干活 + + +所以,整体上 Join Worker 的计算逻辑是: + +1. 获取一个 Join Chunk Resource +2. 获取一个 Outer Chunk +3. 查哈希表,将匹配的 Outer Row 和 Inner Rows 写到 Join Chunk 中 +4. 将写满了的 Join Chunk 发送给 Main Thread + +### Main Thread + +主线程的计算逻辑由 NextChunk 这个函数完成。主线程的计算逻辑非常简单: + +1. 从 joinResultCh 中获取一个 Join Chunk +2. 将调用方传下来的 chk 和 Join Chunk 中的数据交换 +3. 把 Join Chunk 还给对应的 Join Worker + +## 作业 + +实现 [runJoinWorker](https://github.com/pingcap-incubator/tinysql/blob/hash-join/executor/join.go#L243) 以及 [fetchAndBuildHashTable](https://github.com/pingcap-incubator/tinysql/blob/hash-join/executor/join.go#L148)。 + +## 测试 + +通过 `join_test.go` 下的所有测试 + +## 评分 + +全部通过可得 100 分。若有测试未通过按比例扣分。 + diff --git a/courses/proj5-part3-README-zh_CN.md b/courses/proj5-part3-README-zh_CN.md new file mode 100644 index 0000000..ada8ba1 --- /dev/null +++ b/courses/proj5-part3-README-zh_CN.md @@ -0,0 +1,94 @@ +# Hash Aggregate + +## 概览 + +在这一小节,我们将深入讲解 Hash Agg 的原理和实现。 + +## Hash Agg 简介 + +在理解 Hash 聚合之前,我们先回顾下聚合。以 `select sum(b) from t group by a` 为例,它的意思就是求那些具有相同 a 列值的 b 列上的和,也就是说最终每个不同的 a 只会输出一行,那么其实最终的结果可以认为是一个类似 map 这样的 map,那么而更新 map 的值也比较简单。 + +怎样进行优化呢?一个观察是 sum 这样的函数是满足交换律和结合律的,也就是说相加的顺序没有关系,例如假设某个具体的 a 值具有 10 行,那么我们可以先计算其中 5 行的 sum(b) 值,再计算另外 5 行 sum(b) 值,再把这样的结果合并起来。 + +对于 avg 这样的函数怎样优化呢?我们可以将它拆成是两个聚合函数,即 sum(b) 和 count(b),两个函数都满足交换律和结合律,因此也可以用上面的方法去优化。 + +图 + +那么一个可行的优化就是我们引入一个中间状态,这个时候每个 partial worker 从孩子节点取一部分数据并预先计算,注意这个时候得到的结果可能是不正确的,然后再将结果交给一个合并 partial worker 结果的 final worker,这样就可以提高整体的执行效率。 + +但上面的模型可能的一个结果就是有可能 a 列上不同值的数目较多,导致 final worker 成为瓶颈。怎样优化这个瓶颈呢?一个想法是将 fianl woker 拆成多个,但这个时候怎样保证正确性呢?即怎样让具有相同 a 值的、由 partial worker 所计算的中间结果映射到同一个 final worker 上?这个时候哈希函数又可以排上用场了,我们可以将 a 上的值进行哈希,然后将具有相同哈希值的中间结果交给同一个 final worker,即: + +图 + +这样我们就得到了 TinySQL 中 Hash Agg 的执行模型。 +为了适应上述并行计算的,TiDB 对于聚合函数的计算阶段进行划分,相应定义了 4 种计算模式:CompleteMode,FinalMode,Partial1Mode,Partial2Mode。不同的计算模式下,所处理的输入值和输出值会有所差异,如下表所示: + +| AggFunctionMode | 输入值 | 输出值 | +| :-------------- | ----: | ----: | +|CompleteMode|原始数据|最终结果| +|FinalMode|中间结果|最终结果| +|Partial1Mode|原始数据|中间结果| +|Partial2Mode|中间结果|进一步聚合的中间结果| + +以上文提到的 `select avg(b) from t group by a` 为例,通过对计算阶段进行划分,可以有多种不同的计算模式的组合,如: + +- CompleteMode + +此时 AVG 函数 的整个计算过程只有一个阶段,如图所示: + +图 + +- Partial1Mode --> FinalMode + +此时我们将 AVG 函数的计算过程拆成两个阶段进行,如图所示: + +图 + +除了上面的两个例子外,还可能聚合被下推到 TinyKV 上进行计算(Partial1Mode),并返回经过预聚合的中间结果。为了充分利用 TinySQL 所在机器的 CPU 和内存资源,加快 TinySQL 层的聚合计算,TinySQL 层的聚合函数计算可以这样进行:Partial2Mode --> FinalMode。 + +## 理解代码 + +TiDB 的并行 Hash Aggregation 算子执行过程中的主要线程有:Main Thead,Data Fetcher,Partial Worker,和 Final Worker: + +- Main Thread 一个: + - 启动 Input Reader,Partial Workers 及 Final Workers + - 等待 Final Worker 的执行结果并返回 +- Data Fetcher 一个: + - 按 batch 读取子节点数据并分发给 Partial Worker +- Partial Worker 多 个: + - 读取 Data Fetcher 发送来的数据,并做预聚合 + - 将预聚合结果根据 Group 值 shuffle 给对应的 Final Worker +- Final Worker 多 个: + - 读取 PartialWorker 发送来的数据,计算最终结果,发送给 Main Thread + +Hash Aggregation 的执行阶段可分为如下图所示的 5 步: + +图 + +1. 启动 Data Fetcher,Partial Workers 及 Final Workers + + 这部分工作由 prepare4ParallelExec 函数完成。该函数会启动一个 Data Fetcher,多个 Partial Worker 以及多个 Final Worker。 + +2. DataFetcher 读取子节点的数据并分发给 Partial Workers + + 这部分工作由 fetchChildData 函数完成。 + +3. Partial Workers 预聚合计算,及根据 Group Key shuffle 给对应的 Final Workers + + 这部分工作由 HashAggPartialWorker.run 函数完成。该函数调用 updatePartialResult 函数对 DataFetcher 发来数据执行预聚合计算,并将预聚合结果存储到 partialResultMap 中。其中 partialResultMap 的 key 为根据 Group-By 的值 encode 的结果,value 为 PartialResult 类型的数组,数组中的每个元素表示该下标处的聚合函数在对应 Group 中的预聚合结果。shuffleIntermData 函数完成根据 Group 值 shuffle 给对应的 Final Worker。 + +4. Final Worker 计算最终结果,发送给 Main Thread + + 这部分工作由 HashAggFinalWorker.run 函数完成。该函数调用 consumeIntermData 函数接收 PartialWorkers 发送来的预聚合结果,进而合并得到最终结果。getFinalResult 函数完成发送最终结果给 Main Thread。 + +5. Main Thread 接收最终结果并返回 + +## 作业描述 + +补充完整 `aggregate.go` 中的 TODO 代码 + +## 评分 + +完成 `aggregate_test.go` 中的测试 + + diff --git a/ddl/callback.go b/ddl/callback.go new file mode 100644 index 0000000..421ab53 --- /dev/null +++ b/ddl/callback.go @@ -0,0 +1,72 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ddl + +import ( + "context" + + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/sessionctx" +) + +// Interceptor is used for DDL. +type Interceptor interface { + // OnGetInfoSchema is an intercept which is called in the function ddl.GetInfoSchema(). It is used in the tests. + OnGetInfoSchema(ctx sessionctx.Context, is infoschema.InfoSchema) infoschema.InfoSchema +} + +// BaseInterceptor implements Interceptor. +type BaseInterceptor struct{} + +// OnGetInfoSchema implements Interceptor.OnGetInfoSchema interface. +func (bi *BaseInterceptor) OnGetInfoSchema(ctx sessionctx.Context, is infoschema.InfoSchema) infoschema.InfoSchema { + return is +} + +// Callback is used for DDL. +type Callback interface { + // OnChanged is called after schema is changed. + OnChanged(err error) error + // OnJobRunBefore is called before running job. + OnJobRunBefore(job *model.Job) + // OnJobUpdated is called after the running job is updated. + OnJobUpdated(job *model.Job) + // OnWatched is called after watching owner is completed. + OnWatched(ctx context.Context) +} + +// BaseCallback implements Callback.OnChanged interface. +type BaseCallback struct { +} + +// OnChanged implements Callback interface. +func (c *BaseCallback) OnChanged(err error) error { + return err +} + +// OnJobRunBefore implements Callback.OnJobRunBefore interface. +func (c *BaseCallback) OnJobRunBefore(job *model.Job) { + // Nothing to do. +} + +// OnJobUpdated implements Callback.OnJobUpdated interface. +func (c *BaseCallback) OnJobUpdated(job *model.Job) { + // Nothing to do. +} + +// OnWatched implements Callback.OnWatched interface. +func (c *BaseCallback) OnWatched(ctx context.Context) { + // Nothing to do. +} diff --git a/ddl/callback_test.go b/ddl/callback_test.go new file mode 100644 index 0000000..033065d --- /dev/null +++ b/ddl/callback_test.go @@ -0,0 +1,94 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ddl + +import ( + "context" + + . "github.com/pingcap/check" + "github.com/pingcap/log" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/sessionctx" + "go.uber.org/zap" +) + +type TestInterceptor struct { + *BaseInterceptor + + OnGetInfoSchemaExported func(ctx sessionctx.Context, is infoschema.InfoSchema) infoschema.InfoSchema +} + +func (ti *TestInterceptor) OnGetInfoSchema(ctx sessionctx.Context, is infoschema.InfoSchema) infoschema.InfoSchema { + if ti.OnGetInfoSchemaExported != nil { + return ti.OnGetInfoSchemaExported(ctx, is) + } + + return ti.BaseInterceptor.OnGetInfoSchema(ctx, is) +} + +type TestDDLCallback struct { + *BaseCallback + + onJobRunBefore func(*model.Job) + OnJobRunBeforeExported func(*model.Job) + onJobUpdated func(*model.Job) + OnJobUpdatedExported func(*model.Job) + onWatched func(ctx context.Context) +} + +func (tc *TestDDLCallback) OnJobRunBefore(job *model.Job) { + log.Info("on job run before", zap.String("job", job.String())) + if tc.OnJobRunBeforeExported != nil { + tc.OnJobRunBeforeExported(job) + return + } + if tc.onJobRunBefore != nil { + tc.onJobRunBefore(job) + return + } + + tc.BaseCallback.OnJobRunBefore(job) +} + +func (tc *TestDDLCallback) OnJobUpdated(job *model.Job) { + log.Info("on job updated", zap.String("job", job.String())) + if tc.OnJobUpdatedExported != nil { + tc.OnJobUpdatedExported(job) + return + } + if tc.onJobUpdated != nil { + tc.onJobUpdated(job) + return + } + + tc.BaseCallback.OnJobUpdated(job) +} + +func (tc *TestDDLCallback) OnWatched(ctx context.Context) { + if tc.onWatched != nil { + tc.onWatched(ctx) + return + } + + tc.BaseCallback.OnWatched(ctx) +} + +func (s *testDDLSuite) TestCallback(c *C) { + cb := &BaseCallback{} + c.Assert(cb.OnChanged(nil), IsNil) + cb.OnJobRunBefore(nil) + cb.OnJobUpdated(nil) + cb.OnWatched(context.TODO()) +} diff --git a/ddl/column.go b/ddl/column.go new file mode 100644 index 0000000..1a141f1 --- /dev/null +++ b/ddl/column.go @@ -0,0 +1,547 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ddl + +import ( + "fmt" + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/meta" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/sqlexec" + "go.uber.org/zap" + "sync/atomic" +) + +// adjustColumnInfoInAddColumn is used to set the correct position of column info when adding column. +// 1. The added column was append at the end of tblInfo.Columns, due to ddl state was not public then. +// It should be moved to the correct position when the ddl state to be changed to public. +// 2. The offset of column should also to be set to the right value. +func adjustColumnInfoInAddColumn(tblInfo *model.TableInfo, offset int) { + oldCols := tblInfo.Columns + newCols := make([]*model.ColumnInfo, 0, len(oldCols)) + newCols = append(newCols, oldCols[:offset]...) + newCols = append(newCols, oldCols[len(oldCols)-1]) + newCols = append(newCols, oldCols[offset:len(oldCols)-1]...) + // Adjust column offset. + offsetChanged := make(map[int]int) + for i := offset + 1; i < len(newCols); i++ { + offsetChanged[newCols[i].Offset] = i + newCols[i].Offset = i + } + newCols[offset].Offset = offset + // Update index column offset info. + // TODO: There may be some corner cases for index column offsets, we may check this later. + for _, idx := range tblInfo.Indices { + for _, col := range idx.Columns { + newOffset, ok := offsetChanged[col.Offset] + if ok { + col.Offset = newOffset + } + } + } + tblInfo.Columns = newCols +} + +// adjustColumnInfoInDropColumn is used to set the correct position of column info when dropping column. +// 1. The offset of column should to be set to the last of the columns. +// 2. The dropped column is moved to the end of tblInfo.Columns, due to it was not public any more. +func adjustColumnInfoInDropColumn(tblInfo *model.TableInfo, offset int) { + oldCols := tblInfo.Columns + // Adjust column offset. + offsetChanged := make(map[int]int) + for i := offset + 1; i < len(oldCols); i++ { + offsetChanged[oldCols[i].Offset] = i - 1 + oldCols[i].Offset = i - 1 + } + oldCols[offset].Offset = len(oldCols) - 1 + // Update index column offset info. + // TODO: There may be some corner cases for index column offsets, we may check this later. + for _, idx := range tblInfo.Indices { + for _, col := range idx.Columns { + newOffset, ok := offsetChanged[col.Offset] + if ok { + col.Offset = newOffset + } + } + } + newCols := make([]*model.ColumnInfo, 0, len(oldCols)) + newCols = append(newCols, oldCols[:offset]...) + newCols = append(newCols, oldCols[offset+1:]...) + newCols = append(newCols, oldCols[offset]) + tblInfo.Columns = newCols +} + +func createColumnInfo(tblInfo *model.TableInfo, colInfo *model.ColumnInfo) (*model.ColumnInfo, int, error) { + // Check column name duplicate. + cols := tblInfo.Columns + position := len(cols) + colInfo.ID = allocateColumnID(tblInfo) + colInfo.State = model.StateNone + // To support add column asynchronous, we should mark its offset as the last column. + // So that we can use origin column offset to get value from row. + colInfo.Offset = len(cols) + + // Append the column info to the end of the tblInfo.Columns. + // It will reorder to the right position in "Columns" when it state change to public. + newCols := make([]*model.ColumnInfo, 0, len(cols)+1) + newCols = append(newCols, cols...) + newCols = append(newCols, colInfo) + + tblInfo.Columns = newCols + return colInfo, position, nil +} + +func checkAddColumn(t *meta.Meta, job *model.Job) (*model.TableInfo, *model.ColumnInfo, *model.ColumnInfo, int, error) { + schemaID := job.SchemaID + tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID) + if err != nil { + return nil, nil, nil, 0, errors.Trace(err) + } + col := &model.ColumnInfo{} + offset := 0 + err = job.DecodeArgs(col, &offset) + if err != nil { + job.State = model.JobStateCancelled + return nil, nil, nil, 0, errors.Trace(err) + } + + columnInfo := model.FindColumnInfo(tblInfo.Columns, col.Name.L) + if columnInfo != nil { + if columnInfo.State == model.StatePublic { + // We already have a column with the same column name. + job.State = model.JobStateCancelled + return nil, nil, nil, 0, infoschema.ErrColumnExists.GenWithStackByArgs(col.Name) + } + } + return tblInfo, columnInfo, col, offset, nil +} + +func onAddColumn(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error) { + // Handle the rolling back job. + if job.IsRollingback() { + ver, err = onDropColumn(t, job) + if err != nil { + return ver, errors.Trace(err) + } + return ver, nil + } + + failpoint.Inject("errorBeforeDecodeArgs", func(val failpoint.Value) { + if val.(bool) { + failpoint.Return(ver, errors.New("occur an error before decode args")) + } + }) + + tblInfo, columnInfo, col, offset, err := checkAddColumn(t, job) + if err != nil { + return ver, errors.Trace(err) + } + if columnInfo == nil { + columnInfo, offset, err = createColumnInfo(tblInfo, col) + if err != nil { + job.State = model.JobStateCancelled + return ver, errors.Trace(err) + } + logutil.BgLogger().Info("[ddl] run add column job", zap.String("job", job.String()), zap.Reflect("columnInfo", *columnInfo), zap.Int("offset", offset)) + // Set offset arg to job. + if offset != 0 { + job.Args = []interface{}{columnInfo, offset} + } + if err = checkAddColumnTooManyColumns(len(tblInfo.Columns)); err != nil { + job.State = model.JobStateCancelled + return ver, errors.Trace(err) + } + } + + originalState := columnInfo.State + switch columnInfo.State { + case model.StateNone: + // none -> delete only + job.SchemaState = model.StateDeleteOnly + columnInfo.State = model.StateDeleteOnly + ver, err = updateVersionAndTableInfoWithCheck(t, job, tblInfo, originalState != columnInfo.State) + case model.StateDeleteOnly: + // delete only -> write only + job.SchemaState = model.StateWriteOnly + columnInfo.State = model.StateWriteOnly + ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != columnInfo.State) + case model.StateWriteOnly: + // write only -> reorganization + job.SchemaState = model.StateWriteReorganization + columnInfo.State = model.StateWriteReorganization + ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != columnInfo.State) + case model.StateWriteReorganization: + // reorganization -> public + // Adjust table column offset. + adjustColumnInfoInAddColumn(tblInfo, offset) + columnInfo.State = model.StatePublic + ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != columnInfo.State) + if err != nil { + return ver, errors.Trace(err) + } + + // Finish this job. + job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo) + default: + err = ErrInvalidDDLState.GenWithStackByArgs("column", columnInfo.State) + } + + return ver, errors.Trace(err) +} + +func onDropColumn(t *meta.Meta, job *model.Job) (ver int64, _ error) { + tblInfo, colInfo, err := checkDropColumn(t, job) + if err != nil { + return ver, errors.Trace(err) + } + + originalState := colInfo.State + // TODO: fill the codes of the case `StatePublic`, `StateWriteOnly` and `StateDeleteOnly`. + // You'll need to find the right place where to put the function `adjustColumnInfoInDropColumn`. + // Also you'll need to take a corner case about the default value. + // (Think about how the not null property and default value will influence the `Drop Column` operation. + switch colInfo.State { + case model.StatePublic: + // To be filled + ver, err = updateVersionAndTableInfoWithCheck(t, job, tblInfo, originalState != colInfo.State) + case model.StateWriteOnly: + // To be filled + ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != colInfo.State) + case model.StateDeleteOnly: + // To be filled + ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != colInfo.State) + case model.StateDeleteReorganization: + // reorganization -> absent + // All reorganization jobs are done, drop this column. + tblInfo.Columns = tblInfo.Columns[:len(tblInfo.Columns)-1] + colInfo.State = model.StateNone + ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != colInfo.State) + if err != nil { + return ver, errors.Trace(err) + } + + // Finish this job. + if job.IsRollingback() { + job.FinishTableJob(model.JobStateRollbackDone, model.StateNone, ver, tblInfo) + } else { + job.FinishTableJob(model.JobStateDone, model.StateNone, ver, tblInfo) + } + default: + err = errInvalidDDLJob.GenWithStackByArgs("table", tblInfo.State) + } + return ver, errors.Trace(err) +} + +func checkDropColumn(t *meta.Meta, job *model.Job) (*model.TableInfo, *model.ColumnInfo, error) { + schemaID := job.SchemaID + tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID) + if err != nil { + return nil, nil, errors.Trace(err) + } + + var colName model.CIStr + err = job.DecodeArgs(&colName) + if err != nil { + job.State = model.JobStateCancelled + return nil, nil, errors.Trace(err) + } + + colInfo := model.FindColumnInfo(tblInfo.Columns, colName.L) + if colInfo == nil { + job.State = model.JobStateCancelled + return nil, nil, ErrCantDropFieldOrKey.GenWithStack("column %s doesn't exist", colName) + } + if err = isDroppableColumn(tblInfo, colName); err != nil { + job.State = model.JobStateCancelled + return nil, nil, errors.Trace(err) + } + return tblInfo, colInfo, nil +} + +func onSetDefaultValue(t *meta.Meta, job *model.Job) (ver int64, _ error) { + newCol := &model.ColumnInfo{} + err := job.DecodeArgs(newCol) + if err != nil { + job.State = model.JobStateCancelled + return ver, errors.Trace(err) + } + + return updateColumnDefaultValue(t, job, newCol, &newCol.Name) +} + +func (w *worker) onModifyColumn(t *meta.Meta, job *model.Job) (ver int64, _ error) { + newCol := &model.ColumnInfo{} + oldColName := &model.CIStr{} + var modifyColumnTp byte + err := job.DecodeArgs(newCol, oldColName, &modifyColumnTp) + if err != nil { + job.State = model.JobStateCancelled + return ver, errors.Trace(err) + } + + return w.doModifyColumn(t, job, newCol, oldColName, modifyColumnTp) +} + +// doModifyColumn updates the column information and reorders all columns. +func (w *worker) doModifyColumn(t *meta.Meta, job *model.Job, newCol *model.ColumnInfo, oldName *model.CIStr, modifyColumnTp byte) (ver int64, _ error) { + dbInfo, err := t.GetDatabase(job.SchemaID) + if err != nil { + return ver, errors.Trace(err) + } + + tblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID) + if err != nil { + return ver, errors.Trace(err) + } + + oldCol := model.FindColumnInfo(tblInfo.Columns, oldName.L) + if job.IsRollingback() { + ver, err = rollbackModifyColumnJob(t, tblInfo, job, oldCol, modifyColumnTp) + if err != nil { + return ver, errors.Trace(err) + } + job.FinishTableJob(model.JobStateRollbackDone, model.StateNone, ver, tblInfo) + return ver, nil + } + + if oldCol == nil || oldCol.State != model.StatePublic { + job.State = model.JobStateCancelled + return ver, infoschema.ErrColumnNotExists.GenWithStackByArgs(oldName, tblInfo.Name) + } + // If we want to rename the column name, we need to check whether it already exists. + if newCol.Name.L != oldName.L { + c := model.FindColumnInfo(tblInfo.Columns, newCol.Name.L) + if c != nil { + job.State = model.JobStateCancelled + return ver, infoschema.ErrColumnExists.GenWithStackByArgs(newCol.Name) + } + } + + failpoint.Inject("uninitializedOffsetAndState", func(val failpoint.Value) { + if val.(bool) { + if newCol.State != model.StatePublic { + failpoint.Return(ver, errors.New("the column state is wrong")) + } + } + }) + + // Column from null to not null. + if !mysql.HasNotNullFlag(oldCol.Flag) && mysql.HasNotNullFlag(newCol.Flag) { + noPreventNullFlag := !mysql.HasPreventNullInsertFlag(oldCol.Flag) + // Introduce the `mysql.PreventNullInsertFlag` flag to prevent users from inserting or updating null values. + err = modifyColsFromNull2NotNull(w, dbInfo, tblInfo, []*model.ColumnInfo{oldCol}, newCol.Name, oldCol.Tp != newCol.Tp) + if err != nil { + if ErrWarnDataTruncated.Equal(err) || errInvalidUseOfNull.Equal(err) { + job.State = model.JobStateRollingback + } + return ver, err + } + // The column should get into prevent null status first. + if noPreventNullFlag { + return updateVersionAndTableInfoWithCheck(t, job, tblInfo, true) + } + } + + // We need the latest column's offset and state. This information can be obtained from the store. + newCol.Offset = oldCol.Offset + newCol.State = oldCol.State + // Calculate column's new position. + oldPos, newPos := oldCol.Offset, oldCol.Offset + + columnChanged := make(map[string]*model.ColumnInfo) + columnChanged[oldName.L] = newCol + + if newPos == oldPos { + tblInfo.Columns[newPos] = newCol + } else { + cols := tblInfo.Columns + + // Reorder columns in place. + if newPos < oldPos { + copy(cols[newPos+1:], cols[newPos:oldPos]) + } else { + copy(cols[oldPos:], cols[oldPos+1:newPos+1]) + } + cols[newPos] = newCol + + for i, col := range tblInfo.Columns { + if col.Offset != i { + columnChanged[col.Name.L] = col + col.Offset = i + } + } + } + + // Change offset and name in indices. + for _, idx := range tblInfo.Indices { + for _, c := range idx.Columns { + if newCol, ok := columnChanged[c.Name.L]; ok { + c.Name = newCol.Name + c.Offset = newCol.Offset + } + } + } + + ver, err = updateVersionAndTableInfoWithCheck(t, job, tblInfo, true) + if err != nil { + // Modified the type definition of 'null' to 'not null' before this, so rollBack the job when an error occurs. + job.State = model.JobStateRollingback + return ver, errors.Trace(err) + } + + job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo) + return ver, nil +} + +// checkForNullValue ensure there are no null values of the column of this table. +// `isDataTruncated` indicates whether the new field and the old field type are the same, in order to be compatible with mysql. +func checkForNullValue(ctx sessionctx.Context, isDataTruncated bool, schema, table, newCol model.CIStr, oldCols ...*model.ColumnInfo) error { + colsStr := "" + for i, col := range oldCols { + if i == 0 { + colsStr += "`" + col.Name.L + "` is null" + } else { + colsStr += " or `" + col.Name.L + "` is null" + } + } + sql := fmt.Sprintf("select 1 from `%s`.`%s` where %s limit 1;", schema.L, table.L, colsStr) + rows, _, err := ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(sql) + if err != nil { + return errors.Trace(err) + } + rowCount := len(rows) + if rowCount != 0 { + if isDataTruncated { + return ErrWarnDataTruncated.GenWithStackByArgs(newCol.L, rowCount) + } + return errInvalidUseOfNull + } + return nil +} + +func updateColumnDefaultValue(t *meta.Meta, job *model.Job, newCol *model.ColumnInfo, oldColName *model.CIStr) (ver int64, _ error) { + tblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID) + if err != nil { + return ver, errors.Trace(err) + } + oldCol := model.FindColumnInfo(tblInfo.Columns, oldColName.L) + if oldCol == nil || oldCol.State != model.StatePublic { + job.State = model.JobStateCancelled + return ver, infoschema.ErrColumnNotExists.GenWithStackByArgs(newCol.Name, tblInfo.Name) + } + // The newCol's offset may be the value of the old schema version, so we can't use newCol directly. + oldCol.DefaultValue = newCol.DefaultValue + oldCol.Flag = newCol.Flag + + ver, err = updateVersionAndTableInfo(t, job, tblInfo, true) + if err != nil { + job.State = model.JobStateCancelled + return ver, errors.Trace(err) + } + + job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo) + return ver, nil +} + +func isColumnWithIndex(colName string, indices []*model.IndexInfo) bool { + for _, indexInfo := range indices { + for _, col := range indexInfo.Columns { + if col.Name.L == colName { + return true + } + } + } + return false +} + +func allocateColumnID(tblInfo *model.TableInfo) int64 { + tblInfo.MaxColumnID++ + return tblInfo.MaxColumnID +} + +func checkAddColumnTooManyColumns(colNum int) error { + if uint32(colNum) > atomic.LoadUint32(&TableColumnCountLimit) { + return errTooManyFields + } + return nil +} + +// rollbackModifyColumnJob rollbacks the job when an error occurs. +func rollbackModifyColumnJob(t *meta.Meta, tblInfo *model.TableInfo, job *model.Job, oldCol *model.ColumnInfo, modifyColumnTp byte) (ver int64, _ error) { + var err error + if modifyColumnTp == mysql.TypeNull { + // field NotNullFlag flag reset. + tblInfo.Columns[oldCol.Offset].Flag = oldCol.Flag &^ mysql.NotNullFlag + // field PreventNullInsertFlag flag reset. + tblInfo.Columns[oldCol.Offset].Flag = oldCol.Flag &^ mysql.PreventNullInsertFlag + ver, err = updateVersionAndTableInfo(t, job, tblInfo, true) + if err != nil { + return ver, errors.Trace(err) + } + } + return ver, nil +} + +// modifyColsFromNull2NotNull modifies the type definitions of 'null' to 'not null'. +// Introduce the `mysql.PreventNullInsertFlag` flag to prevent users from inserting or updating null values. +func modifyColsFromNull2NotNull(w *worker, dbInfo *model.DBInfo, tblInfo *model.TableInfo, cols []*model.ColumnInfo, + newColName model.CIStr, isModifiedType bool) error { + // Get sessionctx from context resource pool. + var ctx sessionctx.Context + ctx, err := w.sessPool.get() + if err != nil { + return errors.Trace(err) + } + defer w.sessPool.put(ctx) + + // If there is a null value inserted, it cannot be modified and needs to be rollback. + err = checkForNullValue(ctx, isModifiedType, dbInfo.Name, tblInfo.Name, newColName, cols...) + if err != nil { + return errors.Trace(err) + } + + // Prevent this field from inserting null values. + for _, col := range cols { + col.Flag |= mysql.PreventNullInsertFlag + } + return nil +} + +func generateOriginDefaultValue(col *model.ColumnInfo) (interface{}, error) { + var err error + odValue := col.GetDefaultValue() + if odValue == nil && mysql.HasNotNullFlag(col.Flag) { + zeroVal := table.GetZeroValue(col) + odValue, err = zeroVal.ToString() + if err != nil { + return nil, errors.Trace(err) + } + } + return odValue, nil +} + +func getColumnInfoByName(tbInfo *model.TableInfo, column string) *model.ColumnInfo { + for _, colInfo := range tbInfo.Cols() { + if colInfo.Name.L == column { + return colInfo + } + } + return nil +} diff --git a/ddl/column_change_test.go b/ddl/column_change_test.go new file mode 100644 index 0000000..9697d56 --- /dev/null +++ b/ddl/column_change_test.go @@ -0,0 +1,389 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ddl + +import ( + "context" + "fmt" + "sync" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta" + "github.com/pingcap/tidb/meta/autoid" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/mock" + "github.com/pingcap/tidb/util/testutil" +) + +var _ = Suite(&testColumnChangeSuite{}) + +type testColumnChangeSuite struct { + store kv.Storage + dbInfo *model.DBInfo +} + +func (s *testColumnChangeSuite) SetUpSuite(c *C) { + WaitTimeWhenErrorOccured = 1 * time.Microsecond + s.store = testCreateStore(c, "test_column_change") + s.dbInfo = &model.DBInfo{ + Name: model.NewCIStr("test_column_change"), + ID: 1, + } + err := kv.RunInNewTxn(s.store, true, func(txn kv.Transaction) error { + t := meta.NewMeta(txn) + return errors.Trace(t.CreateDatabase(s.dbInfo)) + }) + c.Check(err, IsNil) +} + +func (s *testColumnChangeSuite) TearDownSuite(c *C) { + s.store.Close() +} + +func (s *testColumnChangeSuite) TestColumnChange(c *C) { + d := newDDL( + context.Background(), + WithStore(s.store), + WithLease(testLease), + ) + defer d.Stop() + // create table t (c1 int, c2 int); + tblInfo := testTableInfo(c, d, "t", 2) + ctx := testNewContext(d) + err := ctx.NewTxn(context.Background()) + c.Assert(err, IsNil) + testCreateTable(c, ctx, d, s.dbInfo, tblInfo) + // insert t values (1, 2); + originTable := testGetTable(c, d, s.dbInfo.ID, tblInfo.ID) + row := types.MakeDatums(1, 2) + h, err := originTable.AddRecord(ctx, row) + c.Assert(err, IsNil) + txn, err := ctx.Txn(true) + c.Assert(err, IsNil) + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + + var mu sync.Mutex + tc := &TestDDLCallback{} + // set up hook + prevState := model.StateNone + var ( + deleteOnlyTable table.Table + writeOnlyTable table.Table + publicTable table.Table + ) + var checkErr error + tc.onJobUpdated = func(job *model.Job) { + if job.SchemaState == prevState { + return + } + hookCtx := mock.NewContext() + hookCtx.Store = s.store + prevState = job.SchemaState + err := hookCtx.NewTxn(context.Background()) + if err != nil { + checkErr = errors.Trace(err) + } + switch job.SchemaState { + case model.StateDeleteOnly: + deleteOnlyTable, err = getCurrentTable(d, s.dbInfo.ID, tblInfo.ID) + if err != nil { + checkErr = errors.Trace(err) + } + case model.StateWriteOnly: + writeOnlyTable, err = getCurrentTable(d, s.dbInfo.ID, tblInfo.ID) + if err != nil { + checkErr = errors.Trace(err) + } + err = s.checkAddWriteOnly(hookCtx, d, deleteOnlyTable, writeOnlyTable, h) + if err != nil { + checkErr = errors.Trace(err) + } + case model.StatePublic: + mu.Lock() + publicTable, err = getCurrentTable(d, s.dbInfo.ID, tblInfo.ID) + if err != nil { + checkErr = errors.Trace(err) + } + err = s.checkAddPublic(hookCtx, d, writeOnlyTable, publicTable) + if err != nil { + checkErr = errors.Trace(err) + } + mu.Unlock() + } + txn, err := hookCtx.Txn(true) + if err != nil { + checkErr = errors.Trace(err) + } + err = txn.Commit(context.Background()) + if err != nil { + checkErr = errors.Trace(err) + } + } + d.SetHook(tc) + defaultValue := int64(3) + job := testCreateColumn(c, ctx, d, s.dbInfo, tblInfo, "c3", defaultValue) + c.Assert(errors.ErrorStack(checkErr), Equals, "") + testCheckJobDone(c, d, job, true) + mu.Lock() + tb := publicTable + mu.Unlock() + s.testColumnDrop(c, ctx, d, tb) + s.testAddColumnNoDefault(c, ctx, d, tblInfo) +} + +func (s *testColumnChangeSuite) testAddColumnNoDefault(c *C, ctx sessionctx.Context, d *ddl, tblInfo *model.TableInfo) { + d.Stop() + tc := &TestDDLCallback{} + // set up hook + prevState := model.StateNone + var checkErr error + var writeOnlyTable table.Table + tc.onJobUpdated = func(job *model.Job) { + if job.SchemaState == prevState { + return + } + hookCtx := mock.NewContext() + hookCtx.Store = s.store + prevState = job.SchemaState + err := hookCtx.NewTxn(context.Background()) + if err != nil { + checkErr = errors.Trace(err) + } + switch job.SchemaState { + case model.StateWriteOnly: + writeOnlyTable, err = getCurrentTable(d, s.dbInfo.ID, tblInfo.ID) + if err != nil { + checkErr = errors.Trace(err) + } + case model.StatePublic: + _, err = getCurrentTable(d, s.dbInfo.ID, tblInfo.ID) + if err != nil { + checkErr = errors.Trace(err) + } + _, err = writeOnlyTable.AddRecord(hookCtx, types.MakeDatums(10, 10)) + if err != nil { + checkErr = errors.Trace(err) + } + } + txn, err := hookCtx.Txn(true) + if err != nil { + checkErr = errors.Trace(err) + } + err = txn.Commit(context.TODO()) + if err != nil { + checkErr = errors.Trace(err) + } + } + d.SetHook(tc) + d.start(context.Background(), nil) + job := testCreateColumn(c, ctx, d, s.dbInfo, tblInfo, "c3", nil) + c.Assert(errors.ErrorStack(checkErr), Equals, "") + testCheckJobDone(c, d, job, true) +} + +func (s *testColumnChangeSuite) testColumnDrop(c *C, ctx sessionctx.Context, d *ddl, tbl table.Table) { + d.Stop() + dropCol := tbl.Cols()[2] + tc := &TestDDLCallback{} + // set up hook + prevState := model.StateNone + var checkErr error + tc.onJobUpdated = func(job *model.Job) { + if job.SchemaState == prevState { + return + } + prevState = job.SchemaState + currentTbl, err := getCurrentTable(d, s.dbInfo.ID, tbl.Meta().ID) + if err != nil { + checkErr = errors.Trace(err) + } + for _, col := range currentTbl.Cols() { + if col.ID == dropCol.ID { + checkErr = errors.Errorf("column is not dropped") + } + } + } + d.SetHook(tc) + d.start(context.Background(), nil) + c.Assert(errors.ErrorStack(checkErr), Equals, "") + testDropColumn(c, ctx, d, s.dbInfo, tbl.Meta(), dropCol.Name.L, false) +} + +func (s *testColumnChangeSuite) checkAddWriteOnly(ctx sessionctx.Context, d *ddl, deleteOnlyTable, writeOnlyTable table.Table, h int64) error { + // WriteOnlyTable: insert t values (2, 3) + err := ctx.NewTxn(context.Background()) + if err != nil { + return errors.Trace(err) + } + _, err = writeOnlyTable.AddRecord(ctx, types.MakeDatums(2, 3)) + if err != nil { + return errors.Trace(err) + } + err = ctx.NewTxn(context.Background()) + if err != nil { + return errors.Trace(err) + } + err = checkResult(ctx, writeOnlyTable, writeOnlyTable.WritableCols(), + testutil.RowsWithSep(" ", "1 2 ", "2 3 3")) + if err != nil { + return errors.Trace(err) + } + // This test is for RowWithCols when column state is StateWriteOnly. + row, err := writeOnlyTable.RowWithCols(ctx, h, writeOnlyTable.WritableCols()) + if err != nil { + return errors.Trace(err) + } + got := fmt.Sprintf("%v", row) + expect := fmt.Sprintf("%v", []types.Datum{types.NewDatum(1), types.NewDatum(2), types.NewDatum(nil)}) + if got != expect { + return errors.Errorf("expect %v, got %v", expect, got) + } + // DeleteOnlyTable: select * from t + err = checkResult(ctx, deleteOnlyTable, deleteOnlyTable.WritableCols(), testutil.RowsWithSep(" ", "1 2", "2 3")) + if err != nil { + return errors.Trace(err) + } + // WriteOnlyTable: update t set c1 = 2 where c1 = 1 + h, _, err = writeOnlyTable.Seek(ctx, 0) + if err != nil { + return errors.Trace(err) + } + err = writeOnlyTable.UpdateRecord(ctx, h, types.MakeDatums(1, 2, 3), types.MakeDatums(2, 2, 3), touchedSlice(writeOnlyTable)) + if err != nil { + return errors.Trace(err) + } + err = ctx.NewTxn(context.Background()) + if err != nil { + return errors.Trace(err) + } + // After we update the first row, its default value is also set. + err = checkResult(ctx, writeOnlyTable, writeOnlyTable.WritableCols(), testutil.RowsWithSep(" ", "2 2 3", "2 3 3")) + if err != nil { + return errors.Trace(err) + } + // DeleteOnlyTable: delete from t where c2 = 2 + err = deleteOnlyTable.RemoveRecord(ctx, h, types.MakeDatums(2, 2)) + if err != nil { + return errors.Trace(err) + } + err = ctx.NewTxn(context.Background()) + if err != nil { + return errors.Trace(err) + } + // After delete table has deleted the first row, check the WriteOnly table records. + err = checkResult(ctx, writeOnlyTable, writeOnlyTable.WritableCols(), testutil.RowsWithSep(" ", "2 3 3")) + return errors.Trace(err) +} + +func touchedSlice(t table.Table) []bool { + touched := make([]bool, 0, len(t.WritableCols())) + for range t.WritableCols() { + touched = append(touched, true) + } + return touched +} + +func (s *testColumnChangeSuite) checkAddPublic(sctx sessionctx.Context, d *ddl, writeOnlyTable, publicTable table.Table) error { + ctx := context.TODO() + // publicTable Insert t values (4, 4, 4) + err := sctx.NewTxn(ctx) + if err != nil { + return errors.Trace(err) + } + h, err := publicTable.AddRecord(sctx, types.MakeDatums(4, 4, 4)) + if err != nil { + return errors.Trace(err) + } + err = sctx.NewTxn(ctx) + if err != nil { + return errors.Trace(err) + } + // writeOnlyTable update t set c1 = 3 where c1 = 4 + oldRow, err := writeOnlyTable.RowWithCols(sctx, h, writeOnlyTable.WritableCols()) + if err != nil { + return errors.Trace(err) + } + if len(oldRow) != 3 { + return errors.Errorf("%v", oldRow) + } + newRow := types.MakeDatums(3, 4, oldRow[2].GetValue()) + err = writeOnlyTable.UpdateRecord(sctx, h, oldRow, newRow, touchedSlice(writeOnlyTable)) + if err != nil { + return errors.Trace(err) + } + err = sctx.NewTxn(ctx) + if err != nil { + return errors.Trace(err) + } + // publicTable select * from t, make sure the new c3 value 4 is not overwritten to default value 3. + err = checkResult(sctx, publicTable, publicTable.WritableCols(), testutil.RowsWithSep(" ", "2 3 3", "3 4 4")) + if err != nil { + return errors.Trace(err) + } + return nil +} + +func getCurrentTable(d *ddl, schemaID, tableID int64) (table.Table, error) { + var tblInfo *model.TableInfo + err := kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { + t := meta.NewMeta(txn) + var err error + tblInfo, err = t.GetTable(schemaID, tableID) + if err != nil { + return errors.Trace(err) + } + return nil + }) + if err != nil { + return nil, errors.Trace(err) + } + alloc := autoid.NewAllocator(d.store, schemaID, false) + tbl, err := table.TableFromMeta(alloc, tblInfo) + if err != nil { + return nil, errors.Trace(err) + } + return tbl, err +} + +func checkResult(ctx sessionctx.Context, t table.Table, cols []*table.Column, rows [][]interface{}) error { + var gotRows [][]interface{} + err := t.IterRecords(ctx, t.FirstKey(), cols, func(h int64, data []types.Datum, cols []*table.Column) (bool, error) { + gotRows = append(gotRows, datumsToInterfaces(data)) + return true, nil + }) + if err != nil { + return err + } + got := fmt.Sprintf("%v", gotRows) + expect := fmt.Sprintf("%v", rows) + if got != expect { + return errors.Errorf("expect %v, got %v", expect, got) + } + return nil +} + +func datumsToInterfaces(datums []types.Datum) []interface{} { + ifs := make([]interface{}, 0, len(datums)) + for _, d := range datums { + ifs = append(ifs, d.GetValue()) + } + return ifs +} diff --git a/ddl/column_test.go b/ddl/column_test.go new file mode 100644 index 0000000..c97da08 --- /dev/null +++ b/ddl/column_test.go @@ -0,0 +1,929 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ddl + +import ( + "context" + "reflect" + "sync" + + . "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/table/tables" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/types" +) + +var _ = Suite(&testColumnSuite{}) + +type testColumnSuite struct { + store kv.Storage + dbInfo *model.DBInfo + + d *ddl +} + +func (s *testColumnSuite) SetUpSuite(c *C) { + s.store = testCreateStore(c, "test_column") + s.d = newDDL( + context.Background(), + WithStore(s.store), + WithLease(testLease), + ) + + s.dbInfo = testSchemaInfo(c, s.d, "test_column") + testCreateSchema(c, testNewContext(s.d), s.d, s.dbInfo) +} + +func (s *testColumnSuite) TearDownSuite(c *C) { + testDropSchema(c, testNewContext(s.d), s.d, s.dbInfo) + s.d.Stop() + + err := s.store.Close() + c.Assert(err, IsNil) +} + +func buildCreateColumnJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, colName string, + defaultValue interface{}) *model.Job { + col := &model.ColumnInfo{ + Name: model.NewCIStr(colName), + Offset: len(tblInfo.Columns), + DefaultValue: defaultValue, + OriginDefaultValue: defaultValue, + } + col.ID = allocateColumnID(tblInfo) + col.FieldType = *types.NewFieldType(mysql.TypeLong) + + job := &model.Job{ + SchemaID: dbInfo.ID, + TableID: tblInfo.ID, + Type: model.ActionAddColumn, + BinlogInfo: &model.HistoryInfo{}, + Args: []interface{}{col, 0}, + } + return job +} + +func testCreateColumn(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, + colName string, defaultValue interface{}) *model.Job { + job := buildCreateColumnJob(dbInfo, tblInfo, colName, defaultValue) + err := d.doDDLJob(ctx, job) + c.Assert(err, IsNil) + v := getSchemaVer(c, ctx) + checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) + return job +} + +func buildDropColumnJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, colName string) *model.Job { + return &model.Job{ + SchemaID: dbInfo.ID, + TableID: tblInfo.ID, + Type: model.ActionDropColumn, + BinlogInfo: &model.HistoryInfo{}, + Args: []interface{}{model.NewCIStr(colName)}, + } +} + +func testDropColumn(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, colName string, isError bool) *model.Job { + job := buildDropColumnJob(dbInfo, tblInfo, colName) + err := d.doDDLJob(ctx, job) + if isError { + c.Assert(err, NotNil) + return nil + } + c.Assert(errors.ErrorStack(err), Equals, "") + v := getSchemaVer(c, ctx) + checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) + return job +} + +func (s *testColumnSuite) TestColumn(c *C) { + tblInfo := testTableInfo(c, s.d, "t1", 3) + ctx := testNewContext(s.d) + + testCreateTable(c, ctx, s.d, s.dbInfo, tblInfo) + t := testGetTable(c, s.d, s.dbInfo.ID, tblInfo.ID) + + num := 10 + for i := 0; i < num; i++ { + _, err := t.AddRecord(ctx, types.MakeDatums(i, 10*i, 100*i)) + c.Assert(err, IsNil) + } + + err := ctx.NewTxn(context.Background()) + c.Assert(err, IsNil) + + i := int64(0) + err = t.IterRecords(ctx, t.FirstKey(), t.Cols(), func(h int64, data []types.Datum, cols []*table.Column) (bool, error) { + c.Assert(data, HasLen, 3) + c.Assert(data[0].GetInt64(), Equals, i) + c.Assert(data[1].GetInt64(), Equals, 10*i) + c.Assert(data[2].GetInt64(), Equals, 100*i) + i++ + return true, nil + }) + c.Assert(err, IsNil) + c.Assert(i, Equals, int64(num)) + + c.Assert(table.FindCol(t.Cols(), "c4"), IsNil) + + job := testCreateColumn(c, ctx, s.d, s.dbInfo, tblInfo, "c4", 100) + testCheckJobDone(c, s.d, job, true) + + t = testGetTable(c, s.d, s.dbInfo.ID, tblInfo.ID) + c.Assert(table.FindCol(t.Cols(), "c4"), NotNil) + + i = int64(0) + err = t.IterRecords(ctx, t.FirstKey(), t.Cols(), + func(h int64, data []types.Datum, cols []*table.Column) (bool, error) { + c.Assert(data, HasLen, 4) + c.Assert(data[0].GetInt64(), Equals, i) + c.Assert(data[1].GetInt64(), Equals, 10*i) + c.Assert(data[2].GetInt64(), Equals, 100*i) + c.Assert(data[3].GetInt64(), Equals, int64(100)) + i++ + return true, nil + }) + c.Assert(err, IsNil) + c.Assert(i, Equals, int64(num)) + + h, err := t.AddRecord(ctx, types.MakeDatums(11, 12, 13, 14)) + c.Assert(err, IsNil) + err = ctx.NewTxn(context.Background()) + c.Assert(err, IsNil) + values, err := t.RowWithCols(ctx, h, t.Cols()) + c.Assert(err, IsNil) + + c.Assert(values, HasLen, 4) + c.Assert(values[3].GetInt64(), Equals, int64(14)) + + job = testDropColumn(c, ctx, s.d, s.dbInfo, tblInfo, "c4", false) + testCheckJobDone(c, s.d, job, false) + + t = testGetTable(c, s.d, s.dbInfo.ID, tblInfo.ID) + values, err = t.RowWithCols(ctx, h, t.Cols()) + c.Assert(err, IsNil) + + c.Assert(values, HasLen, 3) + c.Assert(values[2].GetInt64(), Equals, int64(13)) + + job = testCreateColumn(c, ctx, s.d, s.dbInfo, tblInfo, "c4", 111) + testCheckJobDone(c, s.d, job, true) + + t = testGetTable(c, s.d, s.dbInfo.ID, tblInfo.ID) + values, err = t.RowWithCols(ctx, h, t.Cols()) + c.Assert(err, IsNil) + + c.Assert(values, HasLen, 4) + c.Assert(values[3].GetInt64(), Equals, int64(111)) + + job = testCreateColumn(c, ctx, s.d, s.dbInfo, tblInfo, "c5", 101) + testCheckJobDone(c, s.d, job, true) + + testDropTable(c, ctx, s.d, s.dbInfo, tblInfo) +} + +func (s *testColumnSuite) checkColumnKVExist(ctx sessionctx.Context, t table.Table, handle int64, col *table.Column, columnValue interface{}, isExist bool) error { + err := ctx.NewTxn(context.Background()) + if err != nil { + return errors.Trace(err) + } + defer func() { + if txn, err1 := ctx.Txn(true); err1 == nil { + txn.Commit(context.Background()) + } + }() + key := t.RecordKey(handle) + txn, err := ctx.Txn(true) + if err != nil { + return errors.Trace(err) + } + data, err := txn.Get(context.TODO(), key) + if !isExist { + if terror.ErrorEqual(err, kv.ErrNotExist) { + return nil + } + } + if err != nil { + return errors.Trace(err) + } + colMap := make(map[int64]*types.FieldType) + colMap[col.ID] = &col.FieldType + rowMap, err := tablecodec.DecodeRow(data, colMap, ctx.GetSessionVars().Location()) + if err != nil { + return errors.Trace(err) + } + val, ok := rowMap[col.ID] + if isExist { + if !ok || val.GetValue() != columnValue { + return errors.Errorf("%v is not equal to %v", val.GetValue(), columnValue) + } + } else { + if ok { + return errors.Errorf("column value should not exists") + } + } + return nil +} + +func (s *testColumnSuite) checkNoneColumn(ctx sessionctx.Context, d *ddl, tblInfo *model.TableInfo, handle int64, col *table.Column, columnValue interface{}) error { + t, err := testGetTableWithError(d, s.dbInfo.ID, tblInfo.ID) + if err != nil { + return errors.Trace(err) + } + err = s.checkColumnKVExist(ctx, t, handle, col, columnValue, false) + if err != nil { + return errors.Trace(err) + } + err = s.testGetColumn(t, col.Name.L, false) + if err != nil { + return errors.Trace(err) + } + return nil +} + +func (s *testColumnSuite) checkDeleteOnlyColumn(ctx sessionctx.Context, d *ddl, tblInfo *model.TableInfo, handle int64, col *table.Column, row []types.Datum, columnValue interface{}) error { + t, err := testGetTableWithError(d, s.dbInfo.ID, tblInfo.ID) + if err != nil { + return errors.Trace(err) + } + err = ctx.NewTxn(context.Background()) + if err != nil { + return errors.Trace(err) + } + i := int64(0) + err = t.IterRecords(ctx, t.FirstKey(), t.Cols(), func(h int64, data []types.Datum, cols []*table.Column) (bool, error) { + if !reflect.DeepEqual(data, row) { + return false, errors.Errorf("%v not equal to %v", data, row) + } + i++ + return true, nil + }) + if err != nil { + return errors.Trace(err) + } + if i != 1 { + return errors.Errorf("expect 1, got %v", i) + } + err = s.checkColumnKVExist(ctx, t, handle, col, columnValue, false) + if err != nil { + return errors.Trace(err) + } + // Test add a new row. + err = ctx.NewTxn(context.Background()) + if err != nil { + return errors.Trace(err) + } + + newRow := types.MakeDatums(int64(11), int64(22), int64(33)) + newHandle, err := t.AddRecord(ctx, newRow) + if err != nil { + return errors.Trace(err) + } + err = ctx.NewTxn(context.Background()) + if err != nil { + return errors.Trace(err) + } + + rows := [][]types.Datum{row, newRow} + + i = int64(0) + err = t.IterRecords(ctx, t.FirstKey(), t.Cols(), func(h int64, data []types.Datum, cols []*table.Column) (bool, error) { + if !reflect.DeepEqual(data, rows[i]) { + return false, errors.Errorf("%v not equal to %v", data, rows[i]) + } + i++ + return true, nil + }) + if err != nil { + return errors.Trace(err) + } + if i != 2 { + return errors.Errorf("expect 2, got %v", i) + } + + err = s.checkColumnKVExist(ctx, t, handle, col, columnValue, false) + if err != nil { + return errors.Trace(err) + } + // Test remove a row. + err = ctx.NewTxn(context.Background()) + if err != nil { + return errors.Trace(err) + } + + err = t.RemoveRecord(ctx, newHandle, newRow) + if err != nil { + return errors.Trace(err) + } + err = ctx.NewTxn(context.Background()) + if err != nil { + return errors.Trace(err) + } + i = int64(0) + err = t.IterRecords(ctx, t.FirstKey(), t.Cols(), func(h int64, data []types.Datum, cols []*table.Column) (bool, error) { + i++ + return true, nil + }) + if err != nil { + return errors.Trace(err) + } + + if i != 1 { + return errors.Errorf("expect 1, got %v", i) + } + err = s.checkColumnKVExist(ctx, t, newHandle, col, columnValue, false) + if err != nil { + return errors.Trace(err) + } + err = s.testGetColumn(t, col.Name.L, false) + if err != nil { + return errors.Trace(err) + } + return nil +} + +func (s *testColumnSuite) checkWriteOnlyColumn(ctx sessionctx.Context, d *ddl, tblInfo *model.TableInfo, handle int64, col *table.Column, row []types.Datum, columnValue interface{}) error { + t, err := testGetTableWithError(d, s.dbInfo.ID, tblInfo.ID) + if err != nil { + return errors.Trace(err) + } + err = ctx.NewTxn(context.Background()) + if err != nil { + return errors.Trace(err) + } + + i := int64(0) + err = t.IterRecords(ctx, t.FirstKey(), t.Cols(), func(h int64, data []types.Datum, cols []*table.Column) (bool, error) { + if !reflect.DeepEqual(data, row) { + return false, errors.Errorf("%v not equal to %v", data, row) + } + i++ + return true, nil + }) + if err != nil { + return errors.Trace(err) + } + if i != 1 { + return errors.Errorf("expect 1, got %v", i) + } + + err = s.checkColumnKVExist(ctx, t, handle, col, columnValue, false) + if err != nil { + return errors.Trace(err) + } + + // Test add a new row. + err = ctx.NewTxn(context.Background()) + if err != nil { + return errors.Trace(err) + } + + newRow := types.MakeDatums(int64(11), int64(22), int64(33)) + newHandle, err := t.AddRecord(ctx, newRow) + if err != nil { + return errors.Trace(err) + } + err = ctx.NewTxn(context.Background()) + if err != nil { + return errors.Trace(err) + } + + rows := [][]types.Datum{row, newRow} + + i = int64(0) + err = t.IterRecords(ctx, t.FirstKey(), t.Cols(), func(h int64, data []types.Datum, cols []*table.Column) (bool, error) { + if !reflect.DeepEqual(data, rows[i]) { + return false, errors.Errorf("%v not equal to %v", data, rows[i]) + } + i++ + return true, nil + }) + if err != nil { + return errors.Trace(err) + } + if i != 2 { + return errors.Errorf("expect 2, got %v", i) + } + + err = s.checkColumnKVExist(ctx, t, newHandle, col, columnValue, true) + if err != nil { + return errors.Trace(err) + } + // Test remove a row. + err = ctx.NewTxn(context.Background()) + if err != nil { + return errors.Trace(err) + } + + err = t.RemoveRecord(ctx, newHandle, newRow) + if err != nil { + return errors.Trace(err) + } + err = ctx.NewTxn(context.Background()) + if err != nil { + return errors.Trace(err) + } + + i = int64(0) + err = t.IterRecords(ctx, t.FirstKey(), t.Cols(), func(h int64, data []types.Datum, cols []*table.Column) (bool, error) { + i++ + return true, nil + }) + if err != nil { + return errors.Trace(err) + } + if i != 1 { + return errors.Errorf("expect 1, got %v", i) + } + + err = s.checkColumnKVExist(ctx, t, newHandle, col, columnValue, false) + if err != nil { + return errors.Trace(err) + } + err = s.testGetColumn(t, col.Name.L, false) + if err != nil { + return errors.Trace(err) + } + return nil +} + +func (s *testColumnSuite) checkReorganizationColumn(ctx sessionctx.Context, d *ddl, tblInfo *model.TableInfo, handle int64, col *table.Column, row []types.Datum, columnValue interface{}) error { + t, err := testGetTableWithError(d, s.dbInfo.ID, tblInfo.ID) + if err != nil { + return errors.Trace(err) + } + err = ctx.NewTxn(context.Background()) + if err != nil { + return errors.Trace(err) + } + + i := int64(0) + err = t.IterRecords(ctx, t.FirstKey(), t.Cols(), func(h int64, data []types.Datum, cols []*table.Column) (bool, error) { + if !reflect.DeepEqual(data, row) { + return false, errors.Errorf("%v not equal to %v", data, row) + } + i++ + return true, nil + }) + if err != nil { + return errors.Trace(err) + } + if i != 1 { + return errors.Errorf("expect 1 got %v", i) + } + + // Test add a new row. + err = ctx.NewTxn(context.Background()) + if err != nil { + return errors.Trace(err) + } + + newRow := types.MakeDatums(int64(11), int64(22), int64(33)) + newHandle, err := t.AddRecord(ctx, newRow) + if err != nil { + return errors.Trace(err) + } + err = ctx.NewTxn(context.Background()) + if err != nil { + return errors.Trace(err) + } + + rows := [][]types.Datum{row, newRow} + + i = int64(0) + err = t.IterRecords(ctx, t.FirstKey(), t.Cols(), func(h int64, data []types.Datum, cols []*table.Column) (bool, error) { + if !reflect.DeepEqual(data, rows[i]) { + return false, errors.Errorf("%v not equal to %v", data, rows[i]) + } + i++ + return true, nil + }) + if err != nil { + return errors.Trace(err) + } + if i != 2 { + return errors.Errorf("expect 2, got %v", i) + } + + err = s.checkColumnKVExist(ctx, t, newHandle, col, columnValue, true) + if err != nil { + return errors.Trace(err) + } + + // Test remove a row. + err = ctx.NewTxn(context.Background()) + if err != nil { + return errors.Trace(err) + } + + err = t.RemoveRecord(ctx, newHandle, newRow) + if err != nil { + return errors.Trace(err) + } + err = ctx.NewTxn(context.Background()) + if err != nil { + return errors.Trace(err) + } + + i = int64(0) + err = t.IterRecords(ctx, t.FirstKey(), t.Cols(), func(h int64, data []types.Datum, cols []*table.Column) (bool, error) { + i++ + return true, nil + }) + if err != nil { + return errors.Trace(err) + } + if i != 1 { + return errors.Errorf("expect 1, got %v", i) + } + err = s.testGetColumn(t, col.Name.L, false) + if err != nil { + return errors.Trace(err) + } + return nil +} + +func (s *testColumnSuite) checkPublicColumn(ctx sessionctx.Context, d *ddl, tblInfo *model.TableInfo, handle int64, newCol *table.Column, oldRow []types.Datum, columnValue interface{}) error { + t, err := testGetTableWithError(d, s.dbInfo.ID, tblInfo.ID) + if err != nil { + return errors.Trace(err) + } + err = ctx.NewTxn(context.Background()) + if err != nil { + return errors.Trace(err) + } + + i := int64(0) + updatedRow := append(oldRow, types.NewDatum(columnValue)) + err = t.IterRecords(ctx, t.FirstKey(), t.Cols(), func(h int64, data []types.Datum, cols []*table.Column) (bool, error) { + if !reflect.DeepEqual(data, updatedRow) { + return false, errors.Errorf("%v not equal to %v", data, updatedRow) + } + i++ + return true, nil + }) + if err != nil { + return errors.Trace(err) + } + if i != 1 { + return errors.Errorf("expect 1, got %v", i) + } + + // Test add a new row. + err = ctx.NewTxn(context.Background()) + if err != nil { + return errors.Trace(err) + } + + newRow := types.MakeDatums(int64(11), int64(22), int64(33), int64(44)) + handle, err = t.AddRecord(ctx, newRow) + if err != nil { + return errors.Trace(err) + } + err = ctx.NewTxn(context.Background()) + if err != nil { + return errors.Trace(err) + } + + rows := [][]types.Datum{updatedRow, newRow} + + i = int64(0) + t.IterRecords(ctx, t.FirstKey(), t.Cols(), func(h int64, data []types.Datum, cols []*table.Column) (bool, error) { + if !reflect.DeepEqual(data, rows[i]) { + return false, errors.Errorf("%v not equal to %v", data, rows[i]) + } + i++ + return true, nil + }) + if i != 2 { + return errors.Errorf("expect 2, got %v", i) + } + + // Test remove a row. + err = ctx.NewTxn(context.Background()) + if err != nil { + return errors.Trace(err) + } + + err = t.RemoveRecord(ctx, handle, newRow) + if err != nil { + return errors.Trace(err) + } + + err = ctx.NewTxn(context.Background()) + if err != nil { + return errors.Trace(err) + } + + i = int64(0) + err = t.IterRecords(ctx, t.FirstKey(), t.Cols(), func(h int64, data []types.Datum, cols []*table.Column) (bool, error) { + if !reflect.DeepEqual(data, updatedRow) { + return false, errors.Errorf("%v not equal to %v", data, updatedRow) + } + i++ + return true, nil + }) + if err != nil { + return errors.Trace(err) + } + if i != 1 { + return errors.Errorf("expect 1, got %v", i) + } + + err = s.testGetColumn(t, newCol.Name.L, true) + if err != nil { + return errors.Trace(err) + } + return nil +} + +func (s *testColumnSuite) checkAddColumn(state model.SchemaState, d *ddl, tblInfo *model.TableInfo, handle int64, newCol *table.Column, oldRow []types.Datum, columnValue interface{}) error { + ctx := testNewContext(d) + var err error + switch state { + case model.StateNone: + err = errors.Trace(s.checkNoneColumn(ctx, d, tblInfo, handle, newCol, columnValue)) + case model.StateDeleteOnly: + err = errors.Trace(s.checkDeleteOnlyColumn(ctx, d, tblInfo, handle, newCol, oldRow, columnValue)) + case model.StateWriteOnly: + err = errors.Trace(s.checkWriteOnlyColumn(ctx, d, tblInfo, handle, newCol, oldRow, columnValue)) + case model.StateWriteReorganization, model.StateDeleteReorganization: + err = errors.Trace(s.checkReorganizationColumn(ctx, d, tblInfo, handle, newCol, oldRow, columnValue)) + case model.StatePublic: + err = errors.Trace(s.checkPublicColumn(ctx, d, tblInfo, handle, newCol, oldRow, columnValue)) + } + return err +} + +func (s *testColumnSuite) testGetColumn(t table.Table, name string, isExist bool) error { + col := table.FindCol(t.Cols(), name) + if isExist { + if col == nil { + return errors.Errorf("column should not be nil") + } + } else { + if col != nil { + return errors.Errorf("column should be nil") + } + } + return nil +} + +func (s *testColumnSuite) TestAddColumn(c *C) { + d := newDDL( + context.Background(), + WithStore(s.store), + WithLease(testLease), + ) + tblInfo := testTableInfo(c, d, "t", 3) + ctx := testNewContext(d) + + err := ctx.NewTxn(context.Background()) + c.Assert(err, IsNil) + + testCreateTable(c, ctx, d, s.dbInfo, tblInfo) + t := testGetTable(c, d, s.dbInfo.ID, tblInfo.ID) + + oldRow := types.MakeDatums(int64(1), int64(2), int64(3)) + handle, err := t.AddRecord(ctx, oldRow) + c.Assert(err, IsNil) + + txn, err := ctx.Txn(true) + c.Assert(err, IsNil) + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + + newColName := "c4" + defaultColValue := int64(4) + + var mu sync.Mutex + var hookErr error + checkOK := false + + tc := &TestDDLCallback{} + tc.onJobUpdated = func(job *model.Job) { + mu.Lock() + defer mu.Unlock() + if checkOK { + return + } + + t, err1 := testGetTableWithError(d, s.dbInfo.ID, tblInfo.ID) + if err1 != nil { + hookErr = errors.Trace(err1) + return + } + newCol := table.FindCol(t.(*tables.TableCommon).Columns, newColName) + if newCol == nil { + return + } + + err1 = s.checkAddColumn(newCol.State, d, tblInfo, handle, newCol, oldRow, defaultColValue) + if err1 != nil { + hookErr = errors.Trace(err1) + return + } + + if newCol.State == model.StatePublic { + checkOK = true + } + } + + d.SetHook(tc) + + // Use local ddl for callback test. + s.d.Stop() + + d.Stop() + d.start(context.Background(), nil) + + job := testCreateColumn(c, ctx, d, s.dbInfo, tblInfo, newColName, defaultColValue) + + testCheckJobDone(c, d, job, true) + mu.Lock() + hErr := hookErr + ok := checkOK + mu.Unlock() + c.Assert(errors.ErrorStack(hErr), Equals, "") + c.Assert(ok, IsTrue) + + err = ctx.NewTxn(context.Background()) + c.Assert(err, IsNil) + + job = testDropTable(c, ctx, d, s.dbInfo, tblInfo) + testCheckJobDone(c, d, job, false) + + txn, err = ctx.Txn(true) + c.Assert(err, IsNil) + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + + d.Stop() + s.d.start(context.Background(), nil) +} + +func (s *testColumnSuite) TestDropColumn(c *C) { + d := newDDL( + context.Background(), + WithStore(s.store), + WithLease(testLease), + ) + tblInfo := testTableInfo(c, d, "t", 4) + ctx := testNewContext(d) + + err := ctx.NewTxn(context.Background()) + c.Assert(err, IsNil) + + testCreateTable(c, ctx, d, s.dbInfo, tblInfo) + t := testGetTable(c, d, s.dbInfo.ID, tblInfo.ID) + + colName := "c4" + defaultColValue := int64(4) + row := types.MakeDatums(int64(1), int64(2), int64(3)) + _, err = t.AddRecord(ctx, append(row, types.NewDatum(defaultColValue))) + c.Assert(err, IsNil) + + txn, err := ctx.Txn(true) + c.Assert(err, IsNil) + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + + checkOK := false + var hookErr error + var mu sync.Mutex + + tc := &TestDDLCallback{} + tc.onJobUpdated = func(job *model.Job) { + mu.Lock() + defer mu.Unlock() + if checkOK { + return + } + t, err1 := testGetTableWithError(d, s.dbInfo.ID, tblInfo.ID) + if err1 != nil { + hookErr = errors.Trace(err1) + return + } + col := table.FindCol(t.(*tables.TableCommon).Columns, colName) + if col == nil { + checkOK = true + return + } + } + + d.SetHook(tc) + + // Use local ddl for callback test. + s.d.Stop() + + d.Stop() + d.start(context.Background(), nil) + + job := testDropColumn(c, ctx, s.d, s.dbInfo, tblInfo, colName, false) + testCheckJobDone(c, d, job, false) + mu.Lock() + hErr := hookErr + ok := checkOK + mu.Unlock() + c.Assert(hErr, IsNil) + c.Assert(ok, IsTrue) + + err = ctx.NewTxn(context.Background()) + c.Assert(err, IsNil) + + job = testDropTable(c, ctx, d, s.dbInfo, tblInfo) + testCheckJobDone(c, d, job, false) + + txn, err = ctx.Txn(true) + c.Assert(err, IsNil) + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + + d.Stop() + s.d.start(context.Background(), nil) +} + +func (s *testColumnSuite) TestModifyColumn(c *C) { + d := newDDL( + context.Background(), + WithStore(s.store), + WithLease(testLease), + ) + defer d.Stop() + tests := []struct { + origin string + to string + err error + }{ + {"int", "bigint", nil}, + {"int", "int unsigned", errUnsupportedModifyColumn.GenWithStackByArgs("length 10 is less than origin 11")}, + {"varchar(10)", "text", nil}, + {"varbinary(10)", "blob", nil}, + {"text", "blob", errUnsupportedModifyCharset.GenWithStackByArgs("charset from utf8mb4 to binary")}, + {"varchar(10)", "varchar(8)", errUnsupportedModifyColumn.GenWithStackByArgs("length 8 is less than origin 10")}, + {"varchar(10)", "varchar(11)", nil}, + {"varchar(10) character set utf8 collate utf8_bin", "varchar(10) character set utf8", nil}, + {"decimal(2,1)", "decimal(3,2)", errUnsupportedModifyColumn.GenWithStackByArgs("can't change decimal column precision")}, + {"decimal(2,1)", "decimal(2,2)", errUnsupportedModifyColumn.GenWithStackByArgs("can't change decimal column precision")}, + {"decimal(2,1)", "decimal(2,1)", nil}, + } + for _, tt := range tests { + ftA := s.colDefStrToFieldType(c, tt.origin) + ftB := s.colDefStrToFieldType(c, tt.to) + err := modifiable(ftA, ftB) + if err == nil { + c.Assert(tt.err, IsNil) + } else { + c.Assert(err.Error(), Equals, tt.err.Error()) + } + } +} + +func (s *testColumnSuite) colDefStrToFieldType(c *C, str string) *types.FieldType { + sqlA := "alter table t modify column a " + str + stmt, err := parser.New().ParseOneStmt(sqlA, "", "") + c.Assert(err, IsNil) + colDef := stmt.(*ast.AlterTableStmt).Specs[0].NewColumns[0] + col, _, err := buildColumnAndConstraint(nil, 0, colDef, nil) + c.Assert(err, IsNil) + return &col.FieldType +} + +func (s *testColumnSuite) TestFieldCase(c *C) { + var fields = []string{"field", "Field"} + var colDefs = make([]*ast.ColumnDef, len(fields)) + colObjects := make([]interface{}, 0, len(fields)) + for i, name := range fields { + colDefs[i] = &ast.ColumnDef{ + Name: &ast.ColumnName{ + Schema: model.NewCIStr("TestSchema"), + Table: model.NewCIStr("TestTable"), + Name: model.NewCIStr(name), + }, + } + colObjects = append(colObjects, colDefs[i]) + } + err := checkDuplicateColumn(colObjects) + c.Assert(err.Error(), Equals, infoschema.ErrColumnExists.GenWithStackByArgs("Field").Error()) +} diff --git a/ddl/db_change_test.go b/ddl/db_change_test.go new file mode 100644 index 0000000..2daa9c4 --- /dev/null +++ b/ddl/db_change_test.go @@ -0,0 +1,535 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ddl_test + +import ( + "context" + "fmt" + "strings" + "sync" + "sync/atomic" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/log" + "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/util/admin" + "github.com/pingcap/tidb/util/testkit" + "go.uber.org/zap" +) + +var _ = Suite(&testStateChangeSuite{}) +var _ = SerialSuites(&serialTestStateChangeSuite{}) + +type serialTestStateChangeSuite struct { + testStateChangeSuiteBase +} + +type testStateChangeSuite struct { + testStateChangeSuiteBase +} + +type testStateChangeSuiteBase struct { + lease time.Duration + store kv.Storage + dom *domain.Domain + se session.Session + p *parser.Parser + preSQL string +} + +func (s *testStateChangeSuiteBase) SetUpSuite(c *C) { + s.lease = 200 * time.Millisecond + ddl.WaitTimeWhenErrorOccured = 1 * time.Microsecond + var err error + s.store, err = mockstore.NewMockTikvStore() + c.Assert(err, IsNil) + session.SetSchemaLease(s.lease) + s.dom, err = session.BootstrapSession(s.store) + c.Assert(err, IsNil) + s.se, err = session.CreateSession4Test(s.store) + c.Assert(err, IsNil) + _, err = s.se.Execute(context.Background(), "create database test_db_state default charset utf8 default collate utf8_bin") + c.Assert(err, IsNil) + _, err = s.se.Execute(context.Background(), "use test_db_state") + c.Assert(err, IsNil) + s.p = parser.New() +} + +func (s *testStateChangeSuiteBase) TearDownSuite(c *C) { + s.se.Execute(context.Background(), "drop database if exists test_db_state") + s.se.Close() + s.dom.Close() + s.store.Close() +} + +type sqlWithErr struct { + sql string + expectErr error +} + +type expectQuery struct { + sql string + rows []string +} + +// TestDeletaOnly tests whether the correct columns is used in PhysicalIndexScan's ToPB function. +func (s *testStateChangeSuite) TestDeleteOnly(c *C) { + sqls := make([]sqlWithErr, 1) + sqls[0] = sqlWithErr{"insert t set c1 = 'c1_insert', c3 = '2018-02-12', c4 = 1", + errors.Errorf("Can't find column c1")} + dropColumnSQL := "alter table t drop column c1" + s.runTestInSchemaState(c, model.StateDeleteOnly, "", dropColumnSQL, sqls, nil) +} + +func (s *testStateChangeSuiteBase) runTestInSchemaState(c *C, state model.SchemaState, tableName, alterTableSQL string, + sqlWithErrs []sqlWithErr, expectQuery *expectQuery) { + _, err := s.se.Execute(context.Background(), `create table t ( + c1 varchar(64), + c2 varchar(64), + c3 varchar(64), + c4 int primary key, + unique key idx2 (c2, c3))`) + c.Assert(err, IsNil) + defer s.se.Execute(context.Background(), "drop table t") + _, err = s.se.Execute(context.Background(), "insert into t values('a', 'N', '2017-07-01', 8)") + c.Assert(err, IsNil) + + callback := &ddl.TestDDLCallback{} + prevState := model.StateNone + var checkErr error + times := 0 + se, err := session.CreateSession(s.store) + c.Assert(err, IsNil) + _, err = se.Execute(context.Background(), "use test_db_state") + c.Assert(err, IsNil) + callback.OnJobUpdatedExported = func(job *model.Job) { + if job.SchemaState == prevState || checkErr != nil || times >= 3 { + return + } + times++ + if job.SchemaState != state { + return + } + for _, sqlWithErr := range sqlWithErrs { + _, err = se.Execute(context.Background(), sqlWithErr.sql) + if !terror.ErrorEqual(err, sqlWithErr.expectErr) { + checkErr = err + break + } + } + } + d := s.dom.DDL() + originalCallback := d.GetHook() + d.(ddl.DDLForTest).SetHook(callback) + _, err = s.se.Execute(context.Background(), alterTableSQL) + c.Assert(err, IsNil) + c.Assert(errors.ErrorStack(checkErr), Equals, "") + d.(ddl.DDLForTest).SetHook(originalCallback) + + if expectQuery != nil { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test_db_state") + result, err := s.execQuery(tk, expectQuery.sql) + c.Assert(err, IsNil) + err = checkResult(result, testkit.Rows(expectQuery.rows...)) + c.Assert(err, IsNil) + } +} + +func (s *testStateChangeSuiteBase) execQuery(tk *testkit.TestKit, sql string) (*testkit.Result, error) { + comment := Commentf("sql:%s", sql) + rs, err := tk.Exec(sql) + if err != nil { + return nil, err + } + result := tk.ResultSetToResult(rs, comment) + return result, nil +} + +func checkResult(result *testkit.Result, expected [][]interface{}) error { + got := fmt.Sprintf("%s", result.Rows()) + need := fmt.Sprintf("%s", expected) + if got != need { + return fmt.Errorf("need %v, but got %v", need, got) + } + return nil +} + +func (s *testStateChangeSuite) TestParallelAlterModifyColumn(c *C) { + sql := "ALTER TABLE t MODIFY COLUMN b int;" + f := func(c *C, err1, err2 error) { + c.Assert(err1, IsNil) + c.Assert(err2, IsNil) + _, err := s.se.Execute(context.Background(), "select * from t") + c.Assert(err, IsNil) + } + s.testControlParallelExecSQL(c, sql, sql, f) +} + +func (s *testStateChangeSuite) TestParallelAddColumAndSetDefaultValue(c *C) { + _, err := s.se.Execute(context.Background(), "use test_db_state") + c.Assert(err, IsNil) + _, err = s.se.Execute(context.Background(), `create table tx ( + c1 varchar(64), + c2 varchar(64), + primary key idx2 (c2, c1))`) + c.Assert(err, IsNil) + _, err = s.se.Execute(context.Background(), "insert into tx values('a', 'N')") + c.Assert(err, IsNil) + defer s.se.Execute(context.Background(), "drop table tx") + + sql1 := "alter table tx add column cx int" + sql2 := "alter table tx alter c2 set default 'N'" + + f := func(c *C, err1, err2 error) { + c.Assert(err1, IsNil) + c.Assert(err2, IsNil) + _, err := s.se.Execute(context.Background(), "delete from tx where c1='a'") + c.Assert(err, IsNil) + } + s.testControlParallelExecSQL(c, sql1, sql2, f) +} + +func (s *testStateChangeSuite) TestParallelChangeColumnName(c *C) { + sql1 := "ALTER TABLE t CHANGE a aa int;" + sql2 := "ALTER TABLE t CHANGE b aa int;" + f := func(c *C, err1, err2 error) { + // Make sure only a DDL encounters the error of 'duplicate column name'. + var oneErr error + if (err1 != nil && err2 == nil) || (err1 == nil && err2 != nil) { + if err1 != nil { + oneErr = err1 + } else { + oneErr = err2 + } + } + c.Assert(oneErr.Error(), Equals, "[schema:1060]Duplicate column name 'aa'") + } + s.testControlParallelExecSQL(c, sql1, sql2, f) +} + +func (s *testStateChangeSuite) TestParallelAlterAddIndex(c *C) { + sql1 := "ALTER TABLE t add index index_b(b);" + sql2 := "CREATE INDEX index_b ON t (c);" + f := func(c *C, err1, err2 error) { + c.Assert(err1, IsNil) + c.Assert(err2.Error(), Equals, "[ddl:1061]index already exist index_b") + } + s.testControlParallelExecSQL(c, sql1, sql2, f) +} + +func (s *testStateChangeSuite) TestParallelDropColumn(c *C) { + sql := "ALTER TABLE t drop COLUMN c ;" + f := func(c *C, err1, err2 error) { + c.Assert(err1, IsNil) + c.Assert(err2.Error(), Equals, "[ddl:1091]column c doesn't exist") + } + s.testControlParallelExecSQL(c, sql, sql, f) +} + +func (s *testStateChangeSuite) TestParallelDropIndex(c *C) { + sql1 := "alter table t drop index idx1 ;" + sql2 := "alter table t drop index idx2 ;" + f := func(c *C, err1, err2 error) { + c.Assert(err1, IsNil) + c.Assert(err2.Error(), Equals, "[autoid:1075]Incorrect table definition; there can be only one auto column and it must be defined as a key") + } + s.testControlParallelExecSQL(c, sql1, sql2, f) +} + +type checkRet func(c *C, err1, err2 error) + +func (s *testStateChangeSuiteBase) testControlParallelExecSQL(c *C, sql1, sql2 string, f checkRet) { + _, err := s.se.Execute(context.Background(), "use test_db_state") + c.Assert(err, IsNil) + _, err = s.se.Execute(context.Background(), "create table t(a int, b int, c int, d int auto_increment,e int, index idx1(d), index idx2(d,e))") + c.Assert(err, IsNil) + if len(s.preSQL) != 0 { + _, err := s.se.Execute(context.Background(), s.preSQL) + c.Assert(err, IsNil) + } + defer s.se.Execute(context.Background(), "drop table t") + + _, err = s.se.Execute(context.Background(), "drop database if exists t_part") + c.Assert(err, IsNil) + s.se.Execute(context.Background(), `create table t_part (a int key);`) + + callback := &ddl.TestDDLCallback{} + times := 0 + callback.OnJobUpdatedExported = func(job *model.Job) { + if times != 0 { + return + } + var qLen int + for { + kv.RunInNewTxn(s.store, false, func(txn kv.Transaction) error { + jobs, err1 := admin.GetDDLJobs(txn) + if err1 != nil { + return err1 + } + qLen = len(jobs) + return nil + }) + if qLen == 2 { + break + } + time.Sleep(5 * time.Millisecond) + } + times++ + } + d := s.dom.DDL() + originalCallback := d.GetHook() + defer d.(ddl.DDLForTest).SetHook(originalCallback) + d.(ddl.DDLForTest).SetHook(callback) + + wg := sync.WaitGroup{} + var err1 error + var err2 error + se, err := session.CreateSession(s.store) + c.Assert(err, IsNil) + _, err = se.Execute(context.Background(), "use test_db_state") + c.Assert(err, IsNil) + se1, err := session.CreateSession(s.store) + c.Assert(err, IsNil) + _, err = se1.Execute(context.Background(), "use test_db_state") + c.Assert(err, IsNil) + wg.Add(2) + ch := make(chan struct{}) + // Make sure the sql1 is put into the DDLJobQueue. + go func() { + var qLen int + for { + kv.RunInNewTxn(s.store, false, func(txn kv.Transaction) error { + jobs, err3 := admin.GetDDLJobs(txn) + if err3 != nil { + return err3 + } + qLen = len(jobs) + return nil + }) + if qLen == 1 { + // Make sure sql2 is executed after the sql1. + close(ch) + break + } + time.Sleep(5 * time.Millisecond) + } + }() + go func() { + defer wg.Done() + _, err1 = se.Execute(context.Background(), sql1) + }() + go func() { + defer wg.Done() + <-ch + _, err2 = se1.Execute(context.Background(), sql2) + }() + + wg.Wait() + f(c, err1, err2) +} + +func (s *testStateChangeSuite) testParallelExecSQL(c *C, sql string) { + se, err := session.CreateSession(s.store) + c.Assert(err, IsNil) + _, err = se.Execute(context.Background(), "use test_db_state") + c.Assert(err, IsNil) + + se1, err1 := session.CreateSession(s.store) + c.Assert(err1, IsNil) + _, err = se1.Execute(context.Background(), "use test_db_state") + c.Assert(err, IsNil) + + var err2, err3 error + wg := sync.WaitGroup{} + + callback := &ddl.TestDDLCallback{} + once := sync.Once{} + callback.OnJobUpdatedExported = func(job *model.Job) { + // sleep a while, let other job enqueue. + once.Do(func() { + time.Sleep(time.Millisecond * 10) + }) + } + + d := s.dom.DDL() + originalCallback := d.GetHook() + defer d.(ddl.DDLForTest).SetHook(originalCallback) + d.(ddl.DDLForTest).SetHook(callback) + + wg.Add(2) + go func() { + defer wg.Done() + _, err2 = se.Execute(context.Background(), sql) + }() + + go func() { + defer wg.Done() + _, err3 = se1.Execute(context.Background(), sql) + }() + wg.Wait() + c.Assert(err2, IsNil) + c.Assert(err3, IsNil) +} + +// TestCreateTableIfNotExists parallel exec create table if not exists xxx. No error returns is expected. +func (s *testStateChangeSuite) TestCreateTableIfNotExists(c *C) { + defer s.se.Execute(context.Background(), "drop table test_not_exists") + s.testParallelExecSQL(c, "create table if not exists test_not_exists(a int);") +} + +// TestCreateDBIfNotExists parallel exec create database if not exists xxx. No error returns is expected. +func (s *testStateChangeSuite) TestCreateDBIfNotExists(c *C) { + defer s.se.Execute(context.Background(), "drop database test_not_exists") + s.testParallelExecSQL(c, "create database if not exists test_not_exists;") +} + +// TestDDLIfNotExists parallel exec some DDLs with `if not exists` clause. No error returns is expected. +func (s *testStateChangeSuite) TestDDLIfNotExists(c *C) { + defer s.se.Execute(context.Background(), "drop table test_not_exists") + _, err := s.se.Execute(context.Background(), "create table if not exists test_not_exists(a int)") + c.Assert(err, IsNil) + + // ADD COLUMN + s.testParallelExecSQL(c, "alter table test_not_exists add column if not exists b int") + + // ADD INDEX + s.testParallelExecSQL(c, "alter table test_not_exists add index if not exists idx_b (b)") + + // CREATE INDEX + s.testParallelExecSQL(c, "create index if not exists idx_b on test_not_exists (b)") +} + +// TestDDLIfExists parallel exec some DDLs with `if exists` clause. No error returns is expected. +func (s *testStateChangeSuite) TestDDLIfExists(c *C) { + defer func() { + s.se.Execute(context.Background(), "drop table test_exists") + s.se.Execute(context.Background(), "drop table test_exists_2") + }() + _, err := s.se.Execute(context.Background(), "create table if not exists test_exists (a int key, b int)") + c.Assert(err, IsNil) + + // DROP COLUMN + s.testParallelExecSQL(c, "alter table test_exists drop column if exists b") // only `a` exists now + + // CHANGE COLUMN + s.testParallelExecSQL(c, "alter table test_exists change column if exists a c int") // only, `c` exists now + + // MODIFY COLUMN + s.testParallelExecSQL(c, "alter table test_exists modify column if exists a bigint") + + // DROP INDEX + _, err = s.se.Execute(context.Background(), "alter table test_exists add index idx_c (c)") + c.Assert(err, IsNil) + s.testParallelExecSQL(c, "alter table test_exists drop index if exists idx_c") +} + +// TestParallelDDLBeforeRunDDLJob tests a session to execute DDL with an outdated information schema. +// This test is used to simulate the following conditions: +// In a cluster, TiDB "a" executes the DDL. +// TiDB "b" fails to load schema, then TiDB "b" executes the DDL statement associated with the DDL statement executed by "a". +func (s *testStateChangeSuite) TestParallelDDLBeforeRunDDLJob(c *C) { + defer s.se.Execute(context.Background(), "drop table test_table") + _, err := s.se.Execute(context.Background(), "use test_db_state") + c.Assert(err, IsNil) + _, err = s.se.Execute(context.Background(), "create table test_table (c1 int, c2 int default 1, index (c1))") + c.Assert(err, IsNil) + + // Create two sessions. + se, err := session.CreateSession(s.store) + c.Assert(err, IsNil) + _, err = se.Execute(context.Background(), "use test_db_state") + c.Assert(err, IsNil) + se1, err := session.CreateSession(s.store) + c.Assert(err, IsNil) + _, err = se1.Execute(context.Background(), "use test_db_state") + c.Assert(err, IsNil) + + intercept := &ddl.TestInterceptor{} + firstConnID := uint64(1) + finishedCnt := int32(0) + interval := 5 * time.Millisecond + var sessionCnt int32 // sessionCnt is the number of sessions that goes into the function of OnGetInfoSchema. + intercept.OnGetInfoSchemaExported = func(ctx sessionctx.Context, is infoschema.InfoSchema) infoschema.InfoSchema { + // The following code is for testing. + // Make sure the two sessions get the same information schema before executing DDL. + // After the first session executes its DDL, then the second session executes its DDL. + var info infoschema.InfoSchema + atomic.AddInt32(&sessionCnt, 1) + for { + // Make sure there are two sessions running here. + if atomic.LoadInt32(&sessionCnt) == 2 { + info = is + break + } + // Print log to notify if TestParallelDDLBeforeRunDDLJob hang up + log.Info("sleep in TestParallelDDLBeforeRunDDLJob", zap.String("interval", interval.String())) + time.Sleep(interval) + } + + currID := ctx.GetSessionVars().ConnectionID + for { + seCnt := atomic.LoadInt32(&sessionCnt) + // Make sure the two session have got the same information schema. And the first session can continue to go on, + // or the frist session finished this SQL(seCnt = finishedCnt), then other sessions can continue to go on. + if currID == firstConnID || seCnt == finishedCnt { + break + } + // Print log to notify if TestParallelDDLBeforeRunDDLJob hang up + log.Info("sleep in TestParallelDDLBeforeRunDDLJob", zap.String("interval", interval.String())) + time.Sleep(interval) + } + + return info + } + d := s.dom.DDL() + d.(ddl.DDLForTest).SetInterceptoror(intercept) + + // Make sure the connection 1 executes a SQL before the connection 2. + // And the connection 2 executes a SQL with an outdated information schema. + wg := sync.WaitGroup{} + wg.Add(2) + go func() { + defer wg.Done() + + se.SetConnectionID(firstConnID) + _, err1 := se.Execute(context.Background(), "alter table test_table drop column c2") + c.Assert(err1, IsNil) + atomic.StoreInt32(&sessionCnt, finishedCnt) + }() + go func() { + defer wg.Done() + + se1.SetConnectionID(2) + _, err2 := se1.Execute(context.Background(), "alter table test_table add column c2 int") + c.Assert(err2, NotNil) + c.Assert(strings.Contains(err2.Error(), "Information schema is changed"), IsTrue) + }() + + wg.Wait() + + intercept = &ddl.TestInterceptor{} + d.(ddl.DDLForTest).SetInterceptoror(intercept) +} diff --git a/ddl/db_integration_test.go b/ddl/db_integration_test.go new file mode 100644 index 0000000..593a400 --- /dev/null +++ b/ddl/db_integration_test.go @@ -0,0 +1,645 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ddl_test + +import ( + "context" + "fmt" + "strings" + "sync/atomic" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/store/mockstore/mocktikv" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/testkit" +) + +var _ = Suite(&testIntegrationSuite1{&testIntegrationSuite{}}) +var _ = Suite(&testIntegrationSuite2{&testIntegrationSuite{}}) +var _ = Suite(&testIntegrationSuite3{&testIntegrationSuite{}}) +var _ = Suite(&testIntegrationSuite4{&testIntegrationSuite{}}) +var _ = Suite(&testIntegrationSuite5{&testIntegrationSuite{}}) + +type testIntegrationSuite struct { + lease time.Duration + cluster *mocktikv.Cluster + mvccStore mocktikv.MVCCStore + store kv.Storage + dom *domain.Domain + ctx sessionctx.Context + tk *testkit.TestKit +} + +func setupIntegrationSuite(s *testIntegrationSuite, c *C) { + var err error + s.lease = 50 * time.Millisecond + ddl.WaitTimeWhenErrorOccured = 0 + + s.cluster = mocktikv.NewCluster() + mocktikv.BootstrapWithSingleStore(s.cluster) + s.mvccStore = mocktikv.MustNewMVCCStore() + s.store, err = mockstore.NewMockTikvStore( + mockstore.WithCluster(s.cluster), + mockstore.WithMVCCStore(s.mvccStore), + ) + c.Assert(err, IsNil) + session.SetSchemaLease(s.lease) + session.DisableStats4Test() + s.dom, err = session.BootstrapSession(s.store) + c.Assert(err, IsNil) + + se, err := session.CreateSession4Test(s.store) + c.Assert(err, IsNil) + s.ctx = se.(sessionctx.Context) + _, err = se.Execute(context.Background(), "create database test_db") + c.Assert(err, IsNil) + s.tk = testkit.NewTestKit(c, s.store) +} + +func tearDownIntegrationSuiteTest(s *testIntegrationSuite, c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + r := tk.MustQuery("show tables") + for _, tb := range r.Rows() { + tableName := tb[0] + tk.MustExec(fmt.Sprintf("drop table %v", tableName)) + } +} + +func tearDownIntegrationSuite(s *testIntegrationSuite, c *C) { + s.dom.Close() + s.store.Close() +} + +func (s *testIntegrationSuite) SetUpSuite(c *C) { + setupIntegrationSuite(s, c) +} + +func (s *testIntegrationSuite) TearDownSuite(c *C) { + tearDownIntegrationSuite(s, c) +} + +type testIntegrationSuite1 struct{ *testIntegrationSuite } +type testIntegrationSuite2 struct{ *testIntegrationSuite } + +func (s *testIntegrationSuite2) TearDownTest(c *C) { + tearDownIntegrationSuiteTest(s.testIntegrationSuite, c) +} + +type testIntegrationSuite3 struct{ *testIntegrationSuite } +type testIntegrationSuite4 struct{ *testIntegrationSuite } +type testIntegrationSuite5 struct{ *testIntegrationSuite } + +// TestInvalidNameWhenCreateTable for issue #3848 +func (s *testIntegrationSuite3) TestInvalidNameWhenCreateTable(c *C) { + tk := testkit.NewTestKit(c, s.store) + + tk.MustExec("USE test;") + + _, err := tk.Exec("create table t(xxx.t.a bigint)") + c.Assert(err, NotNil) + c.Assert(terror.ErrorEqual(err, ddl.ErrWrongDBName), IsTrue, Commentf("err %v", err)) + + _, err = tk.Exec("create table t(test.tttt.a bigint)") + c.Assert(err, NotNil) + c.Assert(terror.ErrorEqual(err, ddl.ErrWrongTableName), IsTrue, Commentf("err %v", err)) + + _, err = tk.Exec("create table t(t.tttt.a bigint)") + c.Assert(err, NotNil) + c.Assert(terror.ErrorEqual(err, ddl.ErrWrongDBName), IsTrue, Commentf("err %v", err)) +} + +// TestCreateTableIfNotExists for issue #6879 +func (s *testIntegrationSuite3) TestCreateTableIfNotExists(c *C) { + tk := testkit.NewTestKit(c, s.store) + + tk.MustExec("USE test;") + + tk.MustExec("create table ct1(a bigint)") + tk.MustExec("create table ct(a bigint)") + + // Test duplicate create-table without `LIKE` clause + tk.MustExec("create table if not exists ct(b bigint, c varchar(60));") + warnings := tk.Se.GetSessionVars().StmtCtx.GetWarnings() + c.Assert(len(warnings), GreaterEqual, 1) + lastWarn := warnings[len(warnings)-1] + c.Assert(terror.ErrorEqual(infoschema.ErrTableExists, lastWarn.Err), IsTrue) +} + +// for issue #9910 +func (s *testIntegrationSuite2) TestCreateTableWithKeyWord(c *C) { + tk := testkit.NewTestKit(c, s.store) + + tk.MustExec("USE test;") + + _, err := tk.Exec("create table t1(pump varchar(20), drainer varchar(20), node_id varchar(20), node_state varchar(20));") + c.Assert(err, IsNil) +} + +func (s *testIntegrationSuite1) TestUniqueKeyNullValue(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("USE test") + tk.MustExec("create table t(a int primary key, b varchar(255))") + + tk.MustExec("insert into t values(1, NULL)") + tk.MustExec("insert into t values(2, NULL)") + tk.MustExec("alter table t add unique index b(b);") + res := tk.MustQuery("select count(*) from t use index(b);") + res.Check(testkit.Rows("2")) +} + +func (s *testIntegrationSuite3) TestEndIncluded(c *C) { + tk := testkit.NewTestKit(c, s.store) + + tk.MustExec("USE test") + tk.MustExec("create table t(a int, b int)") + for i := 0; i < ddl.DefaultTaskHandleCnt+1; i++ { + tk.MustExec("insert into t values(1, 1)") + } + tk.MustExec("alter table t add index b(b);") +} + +// TestModifyColumnAfterAddIndex Issue 5134 +func (s *testIntegrationSuite3) TestModifyColumnAfterAddIndex(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("create table city (city VARCHAR(2) KEY);") + tk.MustExec("alter table city change column city city varchar(50);") + tk.MustExec(`insert into city values ("abc"), ("abd");`) +} + +func (s *testIntegrationSuite3) TestIssue2293(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("create table t_issue_2293 (a int)") + tk.MustGetErrCode("alter table t_issue_2293 add b int not null default 'a'", mysql.ErrInvalidDefault) + tk.MustExec("insert into t_issue_2293 value(1)") + tk.MustQuery("select * from t_issue_2293").Check(testkit.Rows("1")) +} + +func (s *testIntegrationSuite1) TestIndexLength(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("create table idx_len(a int(0), b timestamp(0), c datetime(0), d time(0), f float(0), g decimal(0))") + tk.MustExec("create index idx on idx_len(a)") + tk.MustExec("alter table idx_len add index idxa(a)") + tk.MustExec("create index idx1 on idx_len(b)") + tk.MustExec("alter table idx_len add index idxb(b)") + tk.MustExec("create index idx2 on idx_len(c)") + tk.MustExec("alter table idx_len add index idxc(c)") + tk.MustExec("create index idx3 on idx_len(d)") + tk.MustExec("alter table idx_len add index idxd(d)") + tk.MustExec("create index idx4 on idx_len(f)") + tk.MustExec("alter table idx_len add index idxf(f)") + tk.MustExec("create index idx5 on idx_len(g)") + tk.MustExec("alter table idx_len add index idxg(g)") + tk.MustExec("create table idx_len1(a int(0), b timestamp(0), c datetime(0), d time(0), f float(0), g decimal(0), index(a), index(b), index(c), index(d), index(f), index(g))") +} + +func (s *testIntegrationSuite3) TestIssue3833(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("create table issue3833 (b char(0), c binary(0), d varchar(0))") + tk.MustGetErrCode("create index idx on issue3833 (b)", mysql.ErrWrongKeyColumn) + tk.MustGetErrCode("alter table issue3833 add index idx (b)", mysql.ErrWrongKeyColumn) + tk.MustGetErrCode("create table issue3833_2 (b char(0), c binary(0), d varchar(0), index(b))", mysql.ErrWrongKeyColumn) + tk.MustGetErrCode("create index idx on issue3833 (c)", mysql.ErrWrongKeyColumn) + tk.MustGetErrCode("alter table issue3833 add index idx (c)", mysql.ErrWrongKeyColumn) + tk.MustGetErrCode("create table issue3833_2 (b char(0), c binary(0), d varchar(0), index(c))", mysql.ErrWrongKeyColumn) + tk.MustGetErrCode("create index idx on issue3833 (d)", mysql.ErrWrongKeyColumn) + tk.MustGetErrCode("alter table issue3833 add index idx (d)", mysql.ErrWrongKeyColumn) + tk.MustGetErrCode("create table issue3833_2 (b char(0), c binary(0), d varchar(0), index(d))", mysql.ErrWrongKeyColumn) +} + +func (s *testIntegrationSuite3) TestTableDDLWithFloatType(c *C) { + s.tk = testkit.NewTestKit(c, s.store) + s.tk.MustExec("use test") + s.tk.MustExec("drop table if exists t") + s.tk.MustGetErrCode("create table t (a decimal(1, 2))", mysql.ErrMBiggerThanD) + s.tk.MustGetErrCode("create table t (a float(1, 2))", mysql.ErrMBiggerThanD) + s.tk.MustGetErrCode("create table t (a double(1, 2))", mysql.ErrMBiggerThanD) + s.tk.MustExec("create table t (a double(1, 1))") + s.tk.MustGetErrCode("alter table t add column b decimal(1, 2)", mysql.ErrMBiggerThanD) + // add multi columns now not support, so no case. + s.tk.MustGetErrCode("alter table t modify column a float(1, 4)", mysql.ErrMBiggerThanD) + s.tk.MustGetErrCode("alter table t change column a aa float(1, 4)", mysql.ErrMBiggerThanD) + s.tk.MustExec("drop table t") +} + +func (s *testIntegrationSuite1) TestTableDDLWithTimeType(c *C) { + s.tk = testkit.NewTestKit(c, s.store) + s.tk.MustExec("use test") + s.tk.MustExec("drop table if exists t") + s.tk.MustGetErrCode("create table t (a time(7))", mysql.ErrTooBigPrecision) + s.tk.MustGetErrCode("create table t (a datetime(7))", mysql.ErrTooBigPrecision) + s.tk.MustGetErrCode("create table t (a timestamp(7))", mysql.ErrTooBigPrecision) + _, err := s.tk.Exec("create table t (a time(-1))") + c.Assert(err, NotNil) + s.tk.MustExec("create table t (a datetime)") + s.tk.MustGetErrCode("alter table t add column b time(7)", mysql.ErrTooBigPrecision) + s.tk.MustGetErrCode("alter table t add column b datetime(7)", mysql.ErrTooBigPrecision) + s.tk.MustGetErrCode("alter table t add column b timestamp(7)", mysql.ErrTooBigPrecision) + s.tk.MustGetErrCode("alter table t modify column a time(7)", mysql.ErrTooBigPrecision) + s.tk.MustGetErrCode("alter table t modify column a datetime(7)", mysql.ErrTooBigPrecision) + s.tk.MustGetErrCode("alter table t modify column a timestamp(7)", mysql.ErrTooBigPrecision) + s.tk.MustGetErrCode("alter table t change column a aa time(7)", mysql.ErrTooBigPrecision) + s.tk.MustGetErrCode("alter table t change column a aa datetime(7)", mysql.ErrTooBigPrecision) + s.tk.MustGetErrCode("alter table t change column a aa timestamp(7)", mysql.ErrTooBigPrecision) + s.tk.MustExec("alter table t change column a aa datetime(0)") + s.tk.MustExec("drop table t") +} + +func (s *testIntegrationSuite5) TestBackwardCompatibility(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("create database if not exists test_backward_compatibility") + defer tk.MustExec("drop database test_backward_compatibility") + tk.MustExec("use test_backward_compatibility") + tk.MustExec("create table t(a int primary key, b int)") + for i := 0; i < 200; i++ { + tk.MustExec(fmt.Sprintf("insert into t values(%v, %v)", i, i)) + } + + // alter table t add index idx_b(b); + is := s.dom.InfoSchema() + schemaName := model.NewCIStr("test_backward_compatibility") + tableName := model.NewCIStr("t") + schema, ok := is.SchemaByName(schemaName) + c.Assert(ok, IsTrue) + tbl, err := is.TableByName(schemaName, tableName) + c.Assert(err, IsNil) + + // Split the table. + s.cluster.SplitTable(s.mvccStore, tbl.Meta().ID, 100) + + unique := false + indexName := model.NewCIStr("idx_b") + idxColName := &ast.IndexPartSpecification{ + Column: &ast.ColumnName{ + Schema: schemaName, + Table: tableName, + Name: model.NewCIStr("b"), + }, + Length: types.UnspecifiedLength, + } + idxColNames := []*ast.IndexPartSpecification{idxColName} + var indexOption *ast.IndexOption + job := &model.Job{ + SchemaID: schema.ID, + TableID: tbl.Meta().ID, + Type: model.ActionAddIndex, + BinlogInfo: &model.HistoryInfo{}, + Args: []interface{}{unique, indexName, idxColNames, indexOption}, + } + txn, err := s.store.Begin() + c.Assert(err, IsNil) + t := meta.NewMeta(txn) + job.ID, err = t.GenGlobalID() + c.Assert(err, IsNil) + job.Version = 1 + job.StartTS = txn.StartTS() + + // Simulate old TiDB init the add index job, old TiDB will not init the model.Job.ReorgMeta field, + // if we set job.SnapshotVer here, can simulate the behavior. + job.SnapshotVer = txn.StartTS() + err = t.EnQueueDDLJob(job) + c.Assert(err, IsNil) + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + ticker := time.NewTicker(s.lease) + defer ticker.Stop() + for range ticker.C { + historyJob, err := s.getHistoryDDLJob(job.ID) + c.Assert(err, IsNil) + if historyJob == nil { + + continue + } + c.Assert(historyJob.Error, IsNil) + + if historyJob.IsSynced() { + break + } + } + + // finished add index + +} + +func (s *testIntegrationSuite3) TestMultiRegionGetTableEndHandle(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("drop database if exists test_get_endhandle") + tk.MustExec("create database test_get_endhandle") + tk.MustExec("use test_get_endhandle") + + tk.MustExec("create table t(a bigint PRIMARY KEY, b int)") + for i := 0; i < 1000; i++ { + tk.MustExec(fmt.Sprintf("insert into t values(%v, %v)", i, i)) + } + + // Get table ID for split. + dom := domain.GetDomain(tk.Se) + is := dom.InfoSchema() + tbl, err := is.TableByName(model.NewCIStr("test_get_endhandle"), model.NewCIStr("t")) + c.Assert(err, IsNil) + tblID := tbl.Meta().ID + + d := s.dom.DDL() + testCtx := newTestMaxTableRowIDContext(c, d, tbl) + + // Split the table. + s.cluster.SplitTable(s.mvccStore, tblID, 100) + + maxID, emptyTable := getMaxTableRowID(testCtx, s.store) + c.Assert(emptyTable, IsFalse) + c.Assert(maxID, Equals, int64(999)) + + tk.MustExec("insert into t values(10000, 1000)") + maxID, emptyTable = getMaxTableRowID(testCtx, s.store) + c.Assert(emptyTable, IsFalse) + c.Assert(maxID, Equals, int64(10000)) +} + +type testMaxTableRowIDContext struct { + c *C + d ddl.DDL + tbl table.Table +} + +func newTestMaxTableRowIDContext(c *C, d ddl.DDL, tbl table.Table) *testMaxTableRowIDContext { + return &testMaxTableRowIDContext{ + c: c, + d: d, + tbl: tbl, + } +} + +func getMaxTableRowID(ctx *testMaxTableRowIDContext, store kv.Storage) (int64, bool) { + c := ctx.c + d := ctx.d + tbl := ctx.tbl + curVer, err := store.CurrentVersion() + c.Assert(err, IsNil) + maxID, emptyTable, err := d.GetTableMaxRowID(curVer.Ver, tbl.(table.PhysicalTable)) + c.Assert(err, IsNil) + return maxID, emptyTable +} + +func checkGetMaxTableRowID(ctx *testMaxTableRowIDContext, store kv.Storage, expectEmpty bool, expectMaxID int64) { + c := ctx.c + maxID, emptyTable := getMaxTableRowID(ctx, store) + c.Assert(emptyTable, Equals, expectEmpty) + c.Assert(maxID, Equals, expectMaxID) +} + +func (s *testIntegrationSuite) getHistoryDDLJob(id int64) (*model.Job, error) { + var job *model.Job + + err := kv.RunInNewTxn(s.store, false, func(txn kv.Transaction) error { + t := meta.NewMeta(txn) + var err1 error + job, err1 = t.GetHistoryDDLJob(id) + return errors.Trace(err1) + }) + + return job, errors.Trace(err) +} + +func (s *testIntegrationSuite2) TestAddIndexAfterAddColumn(c *C) { + s.tk = testkit.NewTestKit(c, s.store) + s.tk.MustExec("use test") + + s.tk.MustExec("create table test_add_index_after_add_col(a int, b int not null default '0')") + s.tk.MustExec("insert into test_add_index_after_add_col values(1, 2),(2,2)") + s.tk.MustExec("alter table test_add_index_after_add_col add column c int not null default '0'") + sql := "alter table test_add_index_after_add_col add unique index cc(c) " + s.tk.MustGetErrCode(sql, mysql.ErrDupEntry) + sql = "alter table test_add_index_after_add_col add index idx_test(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17);" + s.tk.MustGetErrCode(sql, mysql.ErrTooManyKeyParts) +} + +func (s *testIntegrationSuite2) TestAddAnonymousIndex(c *C) { + s.tk = testkit.NewTestKit(c, s.store) + s.tk.MustExec("use test") + s.tk.MustExec("create table t_anonymous_index (c1 int, c2 int, C3 int)") + s.tk.MustExec("alter table t_anonymous_index add index (c1, c2)") + // for dropping empty index + _, err := s.tk.Exec("alter table t_anonymous_index drop index") + c.Assert(err, NotNil) + // The index name is c1 when adding index (c1, c2). + s.tk.MustExec("alter table t_anonymous_index drop index c1") + t := testGetTableByName(c, s.ctx, "test", "t_anonymous_index") + c.Assert(t.Indices(), HasLen, 0) + // for adding some indices that the first column name is c1 + s.tk.MustExec("alter table t_anonymous_index add index (c1)") + _, err = s.tk.Exec("alter table t_anonymous_index add index c1 (c2)") + c.Assert(err, NotNil) + t = testGetTableByName(c, s.ctx, "test", "t_anonymous_index") + c.Assert(t.Indices(), HasLen, 1) + idx := t.Indices()[0].Meta().Name.L + c.Assert(idx, Equals, "c1") + // The MySQL will be a warning. + s.tk.MustExec("alter table t_anonymous_index add index c1_3 (c1)") + s.tk.MustExec("alter table t_anonymous_index add index (c1, c2, C3)") + // The MySQL will be a warning. + s.tk.MustExec("alter table t_anonymous_index add index (c1)") + t = testGetTableByName(c, s.ctx, "test", "t_anonymous_index") + c.Assert(t.Indices(), HasLen, 4) + s.tk.MustExec("alter table t_anonymous_index drop index c1") + s.tk.MustExec("alter table t_anonymous_index drop index c1_2") + s.tk.MustExec("alter table t_anonymous_index drop index c1_3") + s.tk.MustExec("alter table t_anonymous_index drop index c1_4") + // for case insensitive + s.tk.MustExec("alter table t_anonymous_index add index (C3)") + s.tk.MustExec("alter table t_anonymous_index drop index c3") + s.tk.MustExec("alter table t_anonymous_index add index c3 (C3)") + s.tk.MustExec("alter table t_anonymous_index drop index C3") + // for anonymous index with column name `primary` + s.tk.MustExec("create table t_primary (`primary` int, key (`primary`))") + t = testGetTableByName(c, s.ctx, "test", "t_primary") + c.Assert(t.Indices()[0].Meta().Name.String(), Equals, "primary_2") + s.tk.MustExec("create table t_primary_2 (`primary` int, key primary_2 (`primary`), key (`primary`))") + t = testGetTableByName(c, s.ctx, "test", "t_primary_2") + c.Assert(t.Indices()[0].Meta().Name.String(), Equals, "primary_2") + c.Assert(t.Indices()[1].Meta().Name.String(), Equals, "primary_3") + s.tk.MustExec("create table t_primary_3 (`primary_2` int, key(`primary_2`), `primary` int, key(`primary`));") + t = testGetTableByName(c, s.ctx, "test", "t_primary_3") + c.Assert(t.Indices()[0].Meta().Name.String(), Equals, "primary_2") + c.Assert(t.Indices()[1].Meta().Name.String(), Equals, "primary_3") +} + +func (s *testIntegrationSuite1) TestAddColumnTooMany(c *C) { + s.tk = testkit.NewTestKit(c, s.store) + s.tk.MustExec("use test") + count := int(atomic.LoadUint32(&ddl.TableColumnCountLimit) - 1) + var cols []string + for i := 0; i < count; i++ { + cols = append(cols, fmt.Sprintf("a%d int", i)) + } + createSQL := fmt.Sprintf("create table t_column_too_many (%s)", strings.Join(cols, ",")) + s.tk.MustExec(createSQL) + s.tk.MustExec("alter table t_column_too_many add column a_512 int") + alterSQL := "alter table t_column_too_many add column a_513 int" + s.tk.MustGetErrCode(alterSQL, mysql.ErrTooManyFields) +} + +func (s *testIntegrationSuite3) TestAlterColumn(c *C) { + s.tk = testkit.NewTestKit(c, s.store) + s.tk.MustExec("use test_db") + + s.tk.MustExec("create table test_alter_column (a int default 111, b varchar(8), c varchar(8) not null, d timestamp on update current_timestamp)") + s.tk.MustExec("insert into test_alter_column set b = 'a', c = 'aa'") + s.tk.MustQuery("select a from test_alter_column").Check(testkit.Rows("111")) + ctx := s.tk.Se.(sessionctx.Context) + is := domain.GetDomain(ctx).InfoSchema() + tbl, err := is.TableByName(model.NewCIStr("test_db"), model.NewCIStr("test_alter_column")) + c.Assert(err, IsNil) + tblInfo := tbl.Meta() + colA := tblInfo.Columns[0] + hasNoDefault := mysql.HasNoDefaultValueFlag(colA.Flag) + c.Assert(hasNoDefault, IsFalse) + s.tk.MustExec("alter table test_alter_column alter column a set default 222") + s.tk.MustExec("insert into test_alter_column set b = 'b', c = 'bb'") + s.tk.MustQuery("select a from test_alter_column").Check(testkit.Rows("111", "222")) + is = domain.GetDomain(ctx).InfoSchema() + tbl, err = is.TableByName(model.NewCIStr("test_db"), model.NewCIStr("test_alter_column")) + c.Assert(err, IsNil) + tblInfo = tbl.Meta() + colA = tblInfo.Columns[0] + hasNoDefault = mysql.HasNoDefaultValueFlag(colA.Flag) + c.Assert(hasNoDefault, IsFalse) + s.tk.MustExec("alter table test_alter_column alter column b set default null") + s.tk.MustExec("insert into test_alter_column set c = 'cc'") + s.tk.MustQuery("select b from test_alter_column").Check(testkit.Rows("a", "b", "")) + is = domain.GetDomain(ctx).InfoSchema() + tbl, err = is.TableByName(model.NewCIStr("test_db"), model.NewCIStr("test_alter_column")) + c.Assert(err, IsNil) + tblInfo = tbl.Meta() + colC := tblInfo.Columns[2] + hasNoDefault = mysql.HasNoDefaultValueFlag(colC.Flag) + c.Assert(hasNoDefault, IsTrue) + s.tk.MustExec("alter table test_alter_column alter column c set default 'xx'") + s.tk.MustExec("insert into test_alter_column set a = 123") + s.tk.MustQuery("select c from test_alter_column").Check(testkit.Rows("aa", "bb", "cc", "xx")) + is = domain.GetDomain(ctx).InfoSchema() + tbl, err = is.TableByName(model.NewCIStr("test_db"), model.NewCIStr("test_alter_column")) + c.Assert(err, IsNil) + tblInfo = tbl.Meta() + colC = tblInfo.Columns[2] + hasNoDefault = mysql.HasNoDefaultValueFlag(colC.Flag) + c.Assert(hasNoDefault, IsFalse) + // TODO: After fix issue 2606. + // s.tk.MustExec( "alter table test_alter_column alter column d set default null") + s.tk.MustExec("alter table test_alter_column alter column a drop default") + s.tk.MustExec("insert into test_alter_column set b = 'd', c = 'dd'") + s.tk.MustQuery("select a from test_alter_column").Check(testkit.Rows("111", "222", "222", "123", "")) + + // for failing tests + sql := "alter table db_not_exist.test_alter_column alter column b set default 'c'" + s.tk.MustGetErrCode(sql, mysql.ErrNoSuchTable) + sql = "alter table test_not_exist alter column b set default 'c'" + s.tk.MustGetErrCode(sql, mysql.ErrNoSuchTable) + sql = "alter table test_alter_column alter column col_not_exist set default 'c'" + s.tk.MustGetErrCode(sql, mysql.ErrBadField) + sql = "alter table test_alter_column alter column c set default null" + s.tk.MustGetErrCode(sql, mysql.ErrInvalidDefault) + + // The followings tests whether adding constraints via change / modify column + // is forbidden as expected. + s.tk.MustExec("drop table if exists mc") + s.tk.MustExec("create table mc(a int key, b int, c int)") + _, err = s.tk.Exec("alter table mc modify column a int key") // Adds a new primary key + c.Assert(err, NotNil) + _, err = s.tk.Exec("alter table mc modify column c int unique") // Adds a new unique key + c.Assert(err, NotNil) + result := s.tk.MustQuery("show create table mc") + createSQL := result.Rows()[0][1] + expected := "CREATE TABLE `mc` (\n `a` int(11) NOT NULL,\n `b` int(11) DEFAULT NULL,\n `c` int(11) DEFAULT NULL,\n PRIMARY KEY (`a`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin" + c.Assert(createSQL, Equals, expected) + + // Change / modify column should preserve index options. + s.tk.MustExec("drop table if exists mc") + s.tk.MustExec("create table mc(a int key, b int, c int unique)") + s.tk.MustExec("alter table mc modify column a bigint") // NOT NULL & PRIMARY KEY should be preserved + s.tk.MustExec("alter table mc modify column b bigint") + s.tk.MustExec("alter table mc modify column c bigint") // Unique should be preserved + result = s.tk.MustQuery("show create table mc") + createSQL = result.Rows()[0][1] + expected = "CREATE TABLE `mc` (\n `a` bigint(20) NOT NULL,\n `b` bigint(20) DEFAULT NULL,\n `c` bigint(20) DEFAULT NULL,\n PRIMARY KEY (`a`),\n UNIQUE KEY `c` (`c`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin" + c.Assert(createSQL, Equals, expected) + + // Dropping or keeping auto_increment is allowed, however adding is not allowed. + s.tk.MustExec("drop table if exists mc") + s.tk.MustExec("create table mc(a int key auto_increment, b int)") + s.tk.MustExec("alter table mc modify column a bigint auto_increment") // Keeps auto_increment + result = s.tk.MustQuery("show create table mc") + createSQL = result.Rows()[0][1] + expected = "CREATE TABLE `mc` (\n `a` bigint(20) NOT NULL AUTO_INCREMENT,\n `b` int(11) DEFAULT NULL,\n PRIMARY KEY (`a`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin" + c.Assert(createSQL, Equals, expected) + _, err = s.tk.Exec("alter table mc modify column a bigint") // Droppping auto_increment is not allow when @@tidb_allow_remove_auto_inc == 'off' + c.Assert(err, NotNil) + s.tk.MustExec("set @@tidb_allow_remove_auto_inc = on") + s.tk.MustExec("alter table mc modify column a bigint") // Dropping auto_increment is ok when @@tidb_allow_remove_auto_inc == 'on' + result = s.tk.MustQuery("show create table mc") + createSQL = result.Rows()[0][1] + expected = "CREATE TABLE `mc` (\n `a` bigint(20) NOT NULL,\n `b` int(11) DEFAULT NULL,\n PRIMARY KEY (`a`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin" + c.Assert(createSQL, Equals, expected) + + _, err = s.tk.Exec("alter table mc modify column a bigint auto_increment") // Adds auto_increment should throw error + c.Assert(err, NotNil) + + s.tk.MustExec("drop table if exists t") + + s.tk.MustExec("drop table if exists multi_unique") + s.tk.MustExec("create table multi_unique (a int unique unique)") + s.tk.MustExec("drop table multi_unique") + s.tk.MustExec("create table multi_unique (a int key primary key unique unique)") + s.tk.MustExec("drop table multi_unique") + s.tk.MustExec("create table multi_unique (a int key unique unique key unique)") + s.tk.MustExec("drop table multi_unique") + s.tk.MustExec("create table multi_unique (a serial serial default value)") + s.tk.MustExec("drop table multi_unique") + s.tk.MustExec("create table multi_unique (a serial serial default value serial default value)") + s.tk.MustExec("drop table multi_unique") +} + +func (s *testIntegrationSuite4) TestDropAutoIncrementIndex(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("create database if not exists test") + tk.MustExec("use test") + + tk.MustExec("drop table if exists t1") + tk.MustExec("create table t1 (a int auto_increment, unique key (a))") + dropIndexSQL := "alter table t1 drop index a" + tk.MustGetErrCode(dropIndexSQL, mysql.ErrWrongAutoKey) + + tk.MustExec("drop table if exists t1") + tk.MustExec("create table t1 (a int(11) not null auto_increment, b int(11), c bigint, unique key (a, b, c))") + dropIndexSQL = "alter table t1 drop index a" + tk.MustGetErrCode(dropIndexSQL, mysql.ErrWrongAutoKey) +} diff --git a/ddl/db_test.go b/ddl/db_test.go new file mode 100644 index 0000000..a0a2ee8 --- /dev/null +++ b/ddl/db_test.go @@ -0,0 +1,1191 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ddl_test + +import ( + "context" + "fmt" + "strings" + "sync/atomic" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/ddl" + testddlutil "github.com/pingcap/tidb/ddl/testutil" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta/autoid" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/store/mockstore/mocktikv" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/admin" + "github.com/pingcap/tidb/util/mock" + "github.com/pingcap/tidb/util/testkit" + "github.com/pingcap/tidb/util/testutil" +) + +var _ = Suite(&testDBSuite1{&testDBSuite{}}) +var _ = Suite(&testDBSuite2{&testDBSuite{}}) +var _ = Suite(&testDBSuite3{&testDBSuite{}}) +var _ = Suite(&testDBSuite4{&testDBSuite{}}) +var _ = Suite(&testDBSuite5{&testDBSuite{}}) + +type testDBSuite struct { + cluster *mocktikv.Cluster + mvccStore mocktikv.MVCCStore + store kv.Storage + dom *domain.Domain + schemaName string + tk *testkit.TestKit + s session.Session + lease time.Duration + autoIDStep int64 +} + +func setUpSuite(s *testDBSuite, c *C) { + var err error + + s.lease = 100 * time.Millisecond + session.SetSchemaLease(s.lease) + session.DisableStats4Test() + s.schemaName = "test_db" + s.autoIDStep = autoid.GetStep() + ddl.WaitTimeWhenErrorOccured = 0 + + s.cluster = mocktikv.NewCluster() + mocktikv.BootstrapWithSingleStore(s.cluster) + s.mvccStore = mocktikv.MustNewMVCCStore() + s.store, err = mockstore.NewMockTikvStore( + mockstore.WithCluster(s.cluster), + mockstore.WithMVCCStore(s.mvccStore), + ) + c.Assert(err, IsNil) + + s.dom, err = session.BootstrapSession(s.store) + c.Assert(err, IsNil) + s.s, err = session.CreateSession4Test(s.store) + c.Assert(err, IsNil) + + _, err = s.s.Execute(context.Background(), "create database test_db") + c.Assert(err, IsNil) + + s.tk = testkit.NewTestKit(c, s.store) +} + +func tearDownSuite(s *testDBSuite, c *C) { + s.s.Execute(context.Background(), "drop database if exists test_db") + s.s.Close() + s.dom.Close() + s.store.Close() +} + +func (s *testDBSuite) SetUpSuite(c *C) { + setUpSuite(s, c) +} + +func (s *testDBSuite) TearDownSuite(c *C) { + tearDownSuite(s, c) +} + +type testDBSuite1 struct{ *testDBSuite } +type testDBSuite2 struct{ *testDBSuite } +type testDBSuite3 struct{ *testDBSuite } +type testDBSuite4 struct{ *testDBSuite } +type testDBSuite5 struct{ *testDBSuite } + +func (s *testDBSuite4) TestAddIndexWithPK(c *C) { + s.tk = testkit.NewTestKit(c, s.store) + s.tk.MustExec("use " + s.schemaName) + + s.tk.MustExec("create table test_add_index_with_pk(a int not null, b int not null default '0', primary key(a))") + s.tk.MustExec("insert into test_add_index_with_pk values(1, 2)") + s.tk.MustExec("alter table test_add_index_with_pk add index idx (a)") + s.tk.MustQuery("select a from test_add_index_with_pk").Check(testkit.Rows("1")) + s.tk.MustExec("insert into test_add_index_with_pk values(2, 2)") + s.tk.MustExec("alter table test_add_index_with_pk add index idx1 (a, b)") + s.tk.MustQuery("select * from test_add_index_with_pk").Check(testkit.Rows("1 2", "2 2")) + s.tk.MustExec("create table test_add_index_with_pk1(a int not null, b int not null default '0', c int, d int, primary key(c))") + s.tk.MustExec("insert into test_add_index_with_pk1 values(1, 1, 1, 1)") + s.tk.MustExec("alter table test_add_index_with_pk1 add index idx (c)") + s.tk.MustExec("insert into test_add_index_with_pk1 values(2, 2, 2, 2)") + s.tk.MustQuery("select * from test_add_index_with_pk1").Check(testkit.Rows("1 1 1 1", "2 2 2 2")) + s.tk.MustExec("create table test_add_index_with_pk2(a int not null, b int not null default '0', c int unsigned, d int, primary key(c))") + s.tk.MustExec("insert into test_add_index_with_pk2 values(1, 1, 1, 1)") + s.tk.MustExec("alter table test_add_index_with_pk2 add index idx (c)") + s.tk.MustExec("insert into test_add_index_with_pk2 values(2, 2, 2, 2)") + s.tk.MustQuery("select * from test_add_index_with_pk2").Check(testkit.Rows("1 1 1 1", "2 2 2 2")) +} + +func testGetTableByName(c *C, ctx sessionctx.Context, db, table string) table.Table { + dom := domain.GetDomain(ctx) + // Make sure the table schema is the new schema. + err := dom.Reload() + c.Assert(err, IsNil) + tbl, err := dom.InfoSchema().TableByName(model.NewCIStr(db), model.NewCIStr(table)) + c.Assert(err, IsNil) + return tbl +} + +func (s *testDBSuite) testGetTable(c *C, name string) table.Table { + ctx := s.s.(sessionctx.Context) + return testGetTableByName(c, ctx, s.schemaName, name) +} + +func (s *testDBSuite) testGetDB(c *C, dbName string) *model.DBInfo { + ctx := s.s.(sessionctx.Context) + dom := domain.GetDomain(ctx) + // Make sure the table schema is the new schema. + err := dom.Reload() + c.Assert(err, IsNil) + db, ok := dom.InfoSchema().SchemaByName(model.NewCIStr(dbName)) + c.Assert(ok, IsTrue) + return db +} + +func backgroundExec(s kv.Storage, sql string, done chan error) { + se, err := session.CreateSession4Test(s) + if err != nil { + done <- errors.Trace(err) + return + } + defer se.Close() + _, err = se.Execute(context.Background(), "use test_db") + if err != nil { + done <- errors.Trace(err) + return + } + _, err = se.Execute(context.Background(), sql) + done <- errors.Trace(err) +} + +// TestCancelAddIndex1 tests canceling ddl job when the add index worker is not started. +func (s *testDBSuite4) TestCancelAddIndex1(c *C) { + s.tk = testkit.NewTestKit(c, s.store) + s.mustExec(c, "use test_db") + s.mustExec(c, "drop table if exists t") + s.mustExec(c, "create table t(c1 int, c2 int)") + defer s.mustExec(c, "drop table t;") + + for i := 0; i < 50; i++ { + s.mustExec(c, fmt.Sprintf("insert into t values (%d, %d)", i, i)) + } + + var checkErr error + hook := &ddl.TestDDLCallback{} + hook.OnJobRunBeforeExported = func(job *model.Job) { + if job.Type == model.ActionAddIndex && job.State == model.JobStateRunning && job.SchemaState == model.StateWriteReorganization && job.SnapshotVer == 0 { + jobIDs := []int64{job.ID} + hookCtx := mock.NewContext() + hookCtx.Store = s.store + err := hookCtx.NewTxn(context.Background()) + if err != nil { + checkErr = errors.Trace(err) + return + } + txn, err := hookCtx.Txn(true) + if err != nil { + checkErr = errors.Trace(err) + return + } + errs, err := admin.CancelJobs(txn, jobIDs) + if err != nil { + checkErr = errors.Trace(err) + return + } + + if errs[0] != nil { + checkErr = errors.Trace(errs[0]) + return + } + + checkErr = txn.Commit(context.Background()) + } + } + originalHook := s.dom.DDL().GetHook() + s.dom.DDL().(ddl.DDLForTest).SetHook(hook) + rs, err := s.tk.Exec("alter table t add index idx_c2(c2)") + if rs != nil { + rs.Close() + } + c.Assert(checkErr, IsNil) + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[ddl:8214]Cancelled DDL job") + + s.dom.DDL().(ddl.DDLForTest).SetHook(originalHook) + t := s.testGetTable(c, "t") + for _, idx := range t.Indices() { + c.Assert(strings.EqualFold(idx.Meta().Name.L, "idx_c2"), IsFalse) + } + s.mustExec(c, "alter table t add index idx_c2(c2)") + s.mustExec(c, "alter table t drop index idx_c2") +} + +// TestCancelDropIndex tests cancel ddl job which type is drop index. +func (s *testDBSuite5) TestCancelDropIndex(c *C) { + idxName := "idx_c2" + addIdxSQL := "alter table t add index idx_c2 (c2);" + dropIdxSQL := "alter table t drop index idx_c2;" + testCancelDropIndex(c, s.store, s.dom.DDL(), idxName, addIdxSQL, dropIdxSQL) +} + +// testCancelDropIndex tests cancel ddl job which type is drop index. +func testCancelDropIndex(c *C, store kv.Storage, d ddl.DDL, idxName, addIdxSQL, dropIdxSQL string) { + tk := testkit.NewTestKit(c, store) + tk.MustExec("use test_db") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(c1 int, c2 int)") + defer tk.MustExec("drop table t;") + for i := 0; i < 5; i++ { + tk.MustExec(fmt.Sprintf("insert into t values (%d, %d)", i, i)) + } + testCases := []struct { + needAddIndex bool + jobState model.JobState + JobSchemaState model.SchemaState + cancelSucc bool + }{ + // model.JobStateNone means the jobs is canceled before the first run. + {true, model.JobStateNone, model.StateNone, true}, + {false, model.JobStateRunning, model.StateWriteOnly, true}, + {false, model.JobStateRunning, model.StateDeleteOnly, false}, + {true, model.JobStateRunning, model.StateDeleteReorganization, false}, + } + var checkErr error + hook := &ddl.TestDDLCallback{} + var jobID int64 + testCase := &testCases[0] + hook.OnJobRunBeforeExported = func(job *model.Job) { + if (job.Type == model.ActionDropIndex || job.Type == model.ActionDropPrimaryKey) && + job.State == testCase.jobState && job.SchemaState == testCase.JobSchemaState { + jobID = job.ID + jobIDs := []int64{job.ID} + hookCtx := mock.NewContext() + hookCtx.Store = store + err := hookCtx.NewTxn(context.TODO()) + if err != nil { + checkErr = errors.Trace(err) + return + } + txn, err := hookCtx.Txn(true) + if err != nil { + checkErr = errors.Trace(err) + return + } + + errs, err := admin.CancelJobs(txn, jobIDs) + if err != nil { + checkErr = errors.Trace(err) + return + } + if errs[0] != nil { + checkErr = errors.Trace(errs[0]) + return + } + checkErr = txn.Commit(context.Background()) + } + } + originalHook := d.GetHook() + d.(ddl.DDLForTest).SetHook(hook) + ctx := tk.Se.(sessionctx.Context) + for i := range testCases { + testCase = &testCases[i] + if testCase.needAddIndex { + tk.MustExec(addIdxSQL) + } + rs, err := tk.Exec(dropIdxSQL) + if rs != nil { + rs.Close() + } + t := testGetTableByName(c, ctx, "test_db", "t") + indexInfo := t.Meta().FindIndexByName(idxName) + if testCase.cancelSucc { + c.Assert(checkErr, IsNil) + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[ddl:8214]Cancelled DDL job") + c.Assert(indexInfo, NotNil) + c.Assert(indexInfo.State, Equals, model.StatePublic) + } else { + err1 := admin.ErrCannotCancelDDLJob.GenWithStackByArgs(jobID) + c.Assert(err, IsNil) + c.Assert(checkErr, NotNil) + c.Assert(checkErr.Error(), Equals, err1.Error()) + c.Assert(indexInfo, IsNil) + } + } + d.(ddl.DDLForTest).SetHook(originalHook) + tk.MustExec(addIdxSQL) + tk.MustExec(dropIdxSQL) +} + +// TestCancelDropTable tests cancel ddl job which type is drop table. +func (s *testDBSuite2) TestCancelDropTableAndSchema(c *C) { + s.tk = testkit.NewTestKit(c, s.store) + testCases := []struct { + needAddTableOrDB bool + action model.ActionType + jobState model.JobState + JobSchemaState model.SchemaState + cancelSucc bool + }{ + // Check drop table. + // model.JobStateNone means the jobs is canceled before the first run. + {true, model.ActionDropTable, model.JobStateNone, model.StateNone, true}, + {false, model.ActionDropTable, model.JobStateRunning, model.StateWriteOnly, false}, + {true, model.ActionDropTable, model.JobStateRunning, model.StateDeleteOnly, false}, + + // Check drop database. + {true, model.ActionDropSchema, model.JobStateNone, model.StateNone, true}, + {false, model.ActionDropSchema, model.JobStateRunning, model.StateWriteOnly, false}, + {true, model.ActionDropSchema, model.JobStateRunning, model.StateDeleteOnly, false}, + } + var checkErr error + hook := &ddl.TestDDLCallback{} + var jobID int64 + testCase := &testCases[0] + s.mustExec(c, "create database if not exists test_drop_db") + dbInfo := s.testGetDB(c, "test_drop_db") + + hook.OnJobRunBeforeExported = func(job *model.Job) { + if job.Type == testCase.action && job.State == testCase.jobState && job.SchemaState == testCase.JobSchemaState && job.SchemaID == dbInfo.ID { + jobIDs := []int64{job.ID} + jobID = job.ID + hookCtx := mock.NewContext() + hookCtx.Store = s.store + err := hookCtx.NewTxn(context.TODO()) + if err != nil { + checkErr = errors.Trace(err) + return + } + txn, err := hookCtx.Txn(true) + if err != nil { + checkErr = errors.Trace(err) + return + } + errs, err := admin.CancelJobs(txn, jobIDs) + if err != nil { + checkErr = errors.Trace(err) + return + } + if errs[0] != nil { + checkErr = errors.Trace(errs[0]) + return + } + checkErr = txn.Commit(context.Background()) + } + } + originHook := s.dom.DDL().GetHook() + defer s.dom.DDL().(ddl.DDLForTest).SetHook(originHook) + s.dom.DDL().(ddl.DDLForTest).SetHook(hook) + var err error + sql := "" + for i := range testCases { + testCase = &testCases[i] + if testCase.needAddTableOrDB { + s.mustExec(c, "create database if not exists test_drop_db") + s.mustExec(c, "use test_drop_db") + s.mustExec(c, "create table if not exists t(c1 int, c2 int)") + } + + dbInfo = s.testGetDB(c, "test_drop_db") + + if testCase.action == model.ActionDropTable { + sql = "drop table t;" + } else if testCase.action == model.ActionDropSchema { + sql = "drop database test_drop_db;" + } + + _, err = s.tk.Exec(sql) + if testCase.cancelSucc { + c.Assert(checkErr, IsNil) + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[ddl:8214]Cancelled DDL job") + s.mustExec(c, fmt.Sprintf("insert into t values (%d, %d)", i, i)) + } else { + c.Assert(err, IsNil) + c.Assert(checkErr, NotNil) + c.Assert(checkErr.Error(), Equals, admin.ErrCannotCancelDDLJob.GenWithStackByArgs(jobID).Error()) + _, err = s.tk.Exec(fmt.Sprintf("insert into t values (%d, %d)", i, i)) + c.Assert(err, NotNil) + } + } +} + +func (s *testDBSuite3) TestAddAnonymousIndex(c *C) { + s.tk = testkit.NewTestKit(c, s.store) + s.tk.MustExec("use " + s.schemaName) + s.mustExec(c, "create table t_anonymous_index (c1 int, c2 int, C3 int)") + s.mustExec(c, "alter table t_anonymous_index add index (c1, c2)") + // for dropping empty index + _, err := s.tk.Exec("alter table t_anonymous_index drop index") + c.Assert(err, NotNil) + // The index name is c1 when adding index (c1, c2). + s.mustExec(c, "alter table t_anonymous_index drop index c1") + t := s.testGetTable(c, "t_anonymous_index") + c.Assert(t.Indices(), HasLen, 0) + // for adding some indices that the first column name is c1 + s.mustExec(c, "alter table t_anonymous_index add index (c1)") + _, err = s.tk.Exec("alter table t_anonymous_index add index c1 (c2)") + c.Assert(err, NotNil) + t = s.testGetTable(c, "t_anonymous_index") + c.Assert(t.Indices(), HasLen, 1) + idx := t.Indices()[0].Meta().Name.L + c.Assert(idx, Equals, "c1") + // The MySQL will be a warning. + s.mustExec(c, "alter table t_anonymous_index add index c1_3 (c1)") + s.mustExec(c, "alter table t_anonymous_index add index (c1, c2, C3)") + // The MySQL will be a warning. + s.mustExec(c, "alter table t_anonymous_index add index (c1)") + t = s.testGetTable(c, "t_anonymous_index") + c.Assert(t.Indices(), HasLen, 4) + s.mustExec(c, "alter table t_anonymous_index drop index c1") + s.mustExec(c, "alter table t_anonymous_index drop index c1_2") + s.mustExec(c, "alter table t_anonymous_index drop index c1_3") + s.mustExec(c, "alter table t_anonymous_index drop index c1_4") + // for case insensitive + s.mustExec(c, "alter table t_anonymous_index add index (C3)") + s.mustExec(c, "alter table t_anonymous_index drop index c3") + s.mustExec(c, "alter table t_anonymous_index add index c3 (C3)") + s.mustExec(c, "alter table t_anonymous_index drop index C3") + // for anonymous index with column name `primary` + s.mustExec(c, "create table t_primary (`primary` int, key (`primary`))") + t = s.testGetTable(c, "t_primary") + c.Assert(t.Indices()[0].Meta().Name.String(), Equals, "primary_2") + s.mustExec(c, "create table t_primary_2 (`primary` int, key primary_2 (`primary`), key (`primary`))") + t = s.testGetTable(c, "t_primary_2") + c.Assert(t.Indices()[0].Meta().Name.String(), Equals, "primary_2") + c.Assert(t.Indices()[1].Meta().Name.String(), Equals, "primary_3") + s.mustExec(c, "create table t_primary_3 (`primary_2` int, key(`primary_2`), `primary` int, key(`primary`));") + t = s.testGetTable(c, "t_primary_3") + c.Assert(t.Indices()[0].Meta().Name.String(), Equals, "primary_2") + c.Assert(t.Indices()[1].Meta().Name.String(), Equals, "primary_3") +} + +func (s *testDBSuite5) TestAddMultiColumnsIndex(c *C) { + s.tk = testkit.NewTestKit(c, s.store) + s.tk.MustExec("use " + s.schemaName) + + s.tk.MustExec("drop database if exists tidb;") + s.tk.MustExec("create database tidb;") + s.tk.MustExec("use tidb;") + s.tk.MustExec("create table tidb.test (a int auto_increment primary key, b int);") + s.tk.MustExec("insert tidb.test values (1, 2);") + s.tk.MustExec("insert into tidb.test values (2, 2);") + // Test that the b value is nil. + s.tk.MustExec("insert into tidb.test (a) values (3);") + s.tk.MustExec("insert into tidb.test values (4, 4);") + // Test that the b value is nil again. + s.tk.MustExec("insert into tidb.test (a) values (5);") + s.tk.MustExec("insert tidb.test values (6, 6);") + s.tk.MustExec("alter table tidb.test add index idx1 (a, b);") + +} + +func (s *testDBSuite2) TestDropIndex(c *C) { + idxName := "c3_index" + createSQL := "create table test_drop_index (c1 int, c2 int, c3 int, unique key(c1), key c3_index(c3))" + dropIdxSQL := "alter table test_drop_index drop index c3_index;" + testDropIndex(c, s.store, s.lease, createSQL, dropIdxSQL, idxName) +} + +func testDropIndex(c *C, store kv.Storage, lease time.Duration, createSQL, dropIdxSQL, idxName string) { + tk := testkit.NewTestKit(c, store) + tk.MustExec("use test_db") + tk.MustExec("drop table if exists test_drop_index") + tk.MustExec(createSQL) + done := make(chan error, 1) + tk.MustExec("delete from test_drop_index") + + num := 100 + // add some rows + for i := 0; i < num; i++ { + tk.MustExec(fmt.Sprintf("insert into test_drop_index values (%d, %d, %d)", i, i, i)) + } + ctx := tk.Se.(sessionctx.Context) + t := testGetTableByName(c, ctx, "test_db", "test_drop_index") + var c3idx table.Index + for _, tidx := range t.Indices() { + if tidx.Meta().Name.L == idxName { + c3idx = tidx + break + } + } + c.Assert(c3idx, NotNil) + + testddlutil.SessionExecInGoroutine(c, store, dropIdxSQL, done) + + ticker := time.NewTicker(lease / 2) + defer ticker.Stop() +LOOP: + for { + select { + case err := <-done: + if err == nil { + break LOOP + } + c.Assert(err, IsNil, Commentf("err:%v", errors.ErrorStack(err))) + case <-ticker.C: + step := 5 + // add some data + for i := num; i < num+step; i++ { + tk.MustExec(fmt.Sprintf("insert into test_drop_index values (%d, %d, %d)", i, i, i)) + } + num += step + } + } + + rows := tk.MustQuery("explain select c1 from test_drop_index where c3 >= 0") + c.Assert(strings.Contains(fmt.Sprintf("%v", rows), idxName), IsFalse) + + // Check in index, it must be no index in KV. + // Make sure there is no index with name c3_index. + t = testGetTableByName(c, ctx, "test_db", "test_drop_index") + var nidx table.Index + for _, tidx := range t.Indices() { + if tidx.Meta().Name.L == idxName { + nidx = tidx + break + } + } + c.Assert(nidx, IsNil) + tk.MustExec("drop table test_drop_index") +} + +func (s *testDBSuite4) TestAddIndexWithDupCols(c *C) { + s.tk = testkit.NewTestKit(c, s.store) + s.tk.MustExec("use " + s.schemaName) + err1 := infoschema.ErrColumnExists.GenWithStackByArgs("b") + err2 := infoschema.ErrColumnExists.GenWithStackByArgs("B") + + s.tk.MustExec("create table test_add_index_with_dup (a int, b int)") + _, err := s.tk.Exec("create index c on test_add_index_with_dup(b, a, b)") + c.Check(errors.Cause(err1).(*terror.Error).Equal(err), Equals, true) + + _, err = s.tk.Exec("create index c on test_add_index_with_dup(b, a, B)") + c.Check(errors.Cause(err2).(*terror.Error).Equal(err), Equals, true) + + _, err = s.tk.Exec("alter table test_add_index_with_dup add index c (b, a, b)") + c.Check(errors.Cause(err1).(*terror.Error).Equal(err), Equals, true) + + _, err = s.tk.Exec("alter table test_add_index_with_dup add index c (b, a, B)") + c.Check(errors.Cause(err2).(*terror.Error).Equal(err), Equals, true) + + s.tk.MustExec("drop table test_add_index_with_dup") +} + +func (s *testDBSuite1) TestAddColumnTooMany(c *C) { + s.tk = testkit.NewTestKit(c, s.store) + s.tk.MustExec("use test") + count := int(atomic.LoadUint32(&ddl.TableColumnCountLimit) - 1) + var cols []string + for i := 0; i < count; i++ { + cols = append(cols, fmt.Sprintf("a%d int", i)) + } + createSQL := fmt.Sprintf("create table t_column_too_many (%s)", strings.Join(cols, ",")) + s.tk.MustExec(createSQL) + s.tk.MustExec("alter table t_column_too_many add column a_512 int") + alterSQL := "alter table t_column_too_many add column a_513 int" + s.tk.MustGetErrCode(alterSQL, mysql.ErrTooManyFields) +} + +// TestDropColumn is for inserting value with a to-be-dropped column when do drop column. +// Column info from schema in build-insert-plan should be public only, +// otherwise they will not be consist with Table.Col(), then the server will panic. +func (s *testDBSuite2) TestDropColumn(c *C) { + s.tk = testkit.NewTestKit(c, s.store) + s.tk.MustExec("create database drop_col_db") + s.tk.MustExec("use drop_col_db") + num := 25 + multiDDL := make([]string, 0, num) + sql := "create table t2 (c1 int, c2 int, c3 int, " + for i := 4; i < 4+num; i++ { + multiDDL = append(multiDDL, fmt.Sprintf("alter table t2 drop column c%d", i)) + + if i != 3+num { + sql += fmt.Sprintf("c%d int, ", i) + } else { + sql += fmt.Sprintf("c%d int)", i) + } + } + s.tk.MustExec(sql) + dmlDone := make(chan error, num) + ddlDone := make(chan error, num) + + testddlutil.ExecMultiSQLInGoroutine(c, s.store, "drop_col_db", multiDDL, ddlDone) + for i := 0; i < num; i++ { + testddlutil.ExecMultiSQLInGoroutine(c, s.store, "drop_col_db", []string{"insert into t2 set c1 = 1, c2 = 1, c3 = 1, c4 = 1"}, dmlDone) + } + for i := 0; i < num; i++ { + select { + case err := <-ddlDone: + c.Assert(err, IsNil, Commentf("err:%v", errors.ErrorStack(err))) + } + } + + s.tk.MustExec("drop database drop_col_db") +} + +func (s *testDBSuite4) TestChangeColumn(c *C) { + s.tk = testkit.NewTestKit(c, s.store) + s.tk.MustExec("use " + s.schemaName) + + s.mustExec(c, "create table t3 (a int default '0', b varchar(10), d int not null default '0')") + s.mustExec(c, "insert into t3 set b = 'a'") + s.tk.MustQuery("select a from t3").Check(testkit.Rows("0")) + s.mustExec(c, "alter table t3 change a aa bigint") + s.mustExec(c, "insert into t3 set b = 'b'") + s.tk.MustQuery("select aa from t3").Check(testkit.Rows("0", "")) + // for no default flag + s.mustExec(c, "alter table t3 change d dd bigint not null") + ctx := s.tk.Se.(sessionctx.Context) + is := domain.GetDomain(ctx).InfoSchema() + tbl, err := is.TableByName(model.NewCIStr("test_db"), model.NewCIStr("t3")) + c.Assert(err, IsNil) + tblInfo := tbl.Meta() + colD := tblInfo.Columns[2] + hasNoDefault := mysql.HasNoDefaultValueFlag(colD.Flag) + c.Assert(hasNoDefault, IsTrue) + // for the following definitions: 'not null', 'null', 'default value' and 'comment' + s.mustExec(c, "alter table t3 change b b varchar(20) null default 'c'") + is = domain.GetDomain(ctx).InfoSchema() + tbl, err = is.TableByName(model.NewCIStr("test_db"), model.NewCIStr("t3")) + c.Assert(err, IsNil) + tblInfo = tbl.Meta() + colB := tblInfo.Columns[1] + hasNotNull := mysql.HasNotNullFlag(colB.Flag) + c.Assert(hasNotNull, IsFalse) + s.mustExec(c, "insert into t3 set aa = 3, dd = 5") + s.tk.MustQuery("select b from t3").Check(testkit.Rows("a", "b", "c")) + + // for failing tests + sql := "alter table t3 change aa a bigint default ''" + s.tk.MustGetErrCode(sql, mysql.ErrInvalidDefault) + sql = "alter table t3 change a testx.t3.aa bigint" + s.tk.MustGetErrCode(sql, mysql.ErrWrongDBName) + sql = "alter table t3 change t.a aa bigint" + s.tk.MustGetErrCode(sql, mysql.ErrWrongTableName) + s.mustExec(c, "create table t4 (c1 int, c2 int, c3 int default 1, index (c1));") + s.tk.MustExec("insert into t4(c2) values (null);") + sql = "alter table t4 change c1 a1 int not null;" + s.tk.MustGetErrCode(sql, mysql.ErrInvalidUseOfNull) + sql = "alter table t4 change c2 a bigint not null;" + s.tk.MustGetErrCode(sql, mysql.WarnDataTruncated) + // Rename to an existing column. + s.mustExec(c, "alter table t3 add column a bigint") + sql = "alter table t3 change aa a bigint" + s.tk.MustGetErrCode(sql, mysql.ErrDupFieldName) + + s.tk.MustExec("drop table t3") +} + +func (s *testDBSuite) mustExec(c *C, query string) { + s.tk.MustExec(query) +} + +func (s *testDBSuite1) TestCreateTable(c *C) { + s.tk.MustExec("use test") + s.tk.MustExec("CREATE TABLE `t` (`a` double DEFAULT 1.0 DEFAULT now() DEFAULT 2.0 );") + s.tk.MustExec("CREATE TABLE IF NOT EXISTS `t` (`a` double DEFAULT 1.0 DEFAULT now() DEFAULT 2.0 );") + ctx := s.tk.Se.(sessionctx.Context) + is := domain.GetDomain(ctx).InfoSchema() + tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + c.Assert(err, IsNil) + cols := tbl.Cols() + + c.Assert(len(cols), Equals, 1) + col := cols[0] + c.Assert(col.Name.L, Equals, "a") + d, ok := col.DefaultValue.(string) + c.Assert(ok, IsTrue) + c.Assert(d, Equals, "2.0") + + s.tk.MustExec("drop table t") + + // test for enum column + failSQL := "create table t_enum (a enum('e','e'));" + s.tk.MustGetErrCode(failSQL, mysql.ErrDuplicatedValueInType) + failSQL = "create table t_enum (a enum('e','E'));" + s.tk.MustGetErrCode(failSQL, mysql.ErrDuplicatedValueInType) + failSQL = "create table t_enum (a enum('abc','Abc'));" + s.tk.MustGetErrCode(failSQL, mysql.ErrDuplicatedValueInType) + // test for set column + failSQL = "create table t_enum (a set('e','e'));" + s.tk.MustGetErrCode(failSQL, mysql.ErrDuplicatedValueInType) + failSQL = "create table t_enum (a set('e','E'));" + s.tk.MustGetErrCode(failSQL, mysql.ErrDuplicatedValueInType) + failSQL = "create table t_enum (a set('abc','Abc'));" + s.tk.MustGetErrCode(failSQL, mysql.ErrDuplicatedValueInType) + _, err = s.tk.Exec("create table t_enum (a enum('B','b'));") + c.Assert(err.Error(), Equals, "[types:1291]Column 'a' has duplicated value 'B' in ENUM") +} + +func (s *testDBSuite5) TestCheckColumnDefaultValue(c *C) { + s.tk = testkit.NewTestKit(c, s.store) + s.tk.MustExec("use test;") + s.tk.MustExec("drop table if exists text_default_text;") + s.tk.MustGetErrCode("create table text_default_text(c1 text not null default '');", mysql.ErrBlobCantHaveDefault) + s.tk.MustGetErrCode("create table text_default_text(c1 text not null default 'scds');", mysql.ErrBlobCantHaveDefault) + + s.tk.MustExec("drop table if exists text_default_blob;") + s.tk.MustGetErrCode("create table text_default_blob(c1 blob not null default '');", mysql.ErrBlobCantHaveDefault) + s.tk.MustGetErrCode("create table text_default_blob(c1 blob not null default 'scds54');", mysql.ErrBlobCantHaveDefault) + + s.tk.MustExec("set sql_mode='';") + s.tk.MustExec("create table text_default_text(c1 text not null default '');") + s.tk.MustQuery(`show create table text_default_text`).Check(testutil.RowsWithSep("|", + "text_default_text CREATE TABLE `text_default_text` (\n"+ + " `c1` text NOT NULL\n"+ + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", + )) + ctx := s.tk.Se.(sessionctx.Context) + is := domain.GetDomain(ctx).InfoSchema() + tblInfo, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("text_default_text")) + c.Assert(err, IsNil) + c.Assert(tblInfo.Meta().Columns[0].DefaultValue, Equals, "") + + s.tk.MustExec("create table text_default_blob(c1 blob not null default '');") + s.tk.MustQuery(`show create table text_default_blob`).Check(testutil.RowsWithSep("|", + "text_default_blob CREATE TABLE `text_default_blob` (\n"+ + " `c1` blob NOT NULL\n"+ + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", + )) + is = domain.GetDomain(ctx).InfoSchema() + tblInfo, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("text_default_blob")) + c.Assert(err, IsNil) + c.Assert(tblInfo.Meta().Columns[0].DefaultValue, Equals, "") +} + +func (s *testDBSuite3) TestColumnModifyingDefinition(c *C) { + s.tk = testkit.NewTestKit(c, s.store) + s.tk.MustExec("use test") + s.tk.MustExec("drop table if exists test2;") + s.tk.MustExec("create table test2 (c1 int, c2 int, c3 int default 1, index (c1));") + s.tk.MustExec("alter table test2 change c2 a int not null;") + ctx := s.tk.Se.(sessionctx.Context) + is := domain.GetDomain(ctx).InfoSchema() + t, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("test2")) + c.Assert(err, IsNil) + var c2 *table.Column + for _, col := range t.Cols() { + if col.Name.L == "a" { + c2 = col + } + } + c.Assert(mysql.HasNotNullFlag(c2.Flag), IsTrue) + + s.tk.MustExec("drop table if exists test2;") + s.tk.MustExec("create table test2 (c1 int, c2 int, c3 int default 1, index (c1));") + s.tk.MustExec("insert into test2(c2) values (null);") + s.tk.MustGetErrCode("alter table test2 change c2 a int not null", mysql.ErrInvalidUseOfNull) + s.tk.MustGetErrCode("alter table test2 change c1 a1 bigint not null;", mysql.WarnDataTruncated) +} + +func (s *testDBSuite5) TestModifyColumnRollBack(c *C) { + s.tk = testkit.NewTestKit(c, s.store) + s.mustExec(c, "use test_db") + s.mustExec(c, "drop table if exists t1") + s.mustExec(c, "create table t1 (c1 int, c2 int, c3 int default 1, index (c1));") + + var c2 *table.Column + var checkErr error + hook := &ddl.TestDDLCallback{} + hook.OnJobUpdatedExported = func(job *model.Job) { + if checkErr != nil { + return + } + + t := s.testGetTable(c, "t1") + for _, col := range t.Cols() { + if col.Name.L == "c2" { + c2 = col + } + } + if mysql.HasPreventNullInsertFlag(c2.Flag) { + s.tk.MustGetErrCode("insert into t1(c2) values (null);", mysql.ErrBadNull) + } + + hookCtx := mock.NewContext() + hookCtx.Store = s.store + err := hookCtx.NewTxn(context.Background()) + if err != nil { + checkErr = errors.Trace(err) + return + } + + jobIDs := []int64{job.ID} + txn, err := hookCtx.Txn(true) + if err != nil { + checkErr = errors.Trace(err) + return + } + errs, err := admin.CancelJobs(txn, jobIDs) + if err != nil { + checkErr = errors.Trace(err) + return + } + // It only tests cancel one DDL job. + if errs[0] != nil { + checkErr = errors.Trace(errs[0]) + return + } + + txn, err = hookCtx.Txn(true) + if err != nil { + checkErr = errors.Trace(err) + return + } + err = txn.Commit(context.Background()) + if err != nil { + checkErr = errors.Trace(err) + } + } + + originalHook := s.dom.DDL().GetHook() + s.dom.DDL().(ddl.DDLForTest).SetHook(hook) + done := make(chan error, 1) + go backgroundExec(s.store, "alter table t1 change c2 c2 bigint not null;", done) + ticker := time.NewTicker(s.lease / 2) + defer ticker.Stop() +LOOP: + for { + select { + case err := <-done: + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[ddl:8214]Cancelled DDL job") + break LOOP + case <-ticker.C: + s.mustExec(c, "insert into t1(c2) values (null);") + } + } + + t := s.testGetTable(c, "t1") + for _, col := range t.Cols() { + if col.Name.L == "c2" { + c2 = col + } + } + c.Assert(mysql.HasNotNullFlag(c2.Flag), IsFalse) + s.dom.DDL().(ddl.DDLForTest).SetHook(originalHook) + s.mustExec(c, "drop table t1") +} + +func (s *testDBSuite1) TestModifyColumnNullToNotNull(c *C) { + s.tk = testkit.NewTestKit(c, s.store) + tk2 := testkit.NewTestKit(c, s.store) + tk2.MustExec("use test_db") + s.mustExec(c, "use test_db") + s.mustExec(c, "drop table if exists t1") + s.mustExec(c, "create table t1 (c1 int, c2 int);") + + tbl := s.testGetTable(c, "t1") + getModifyColumn := func() *table.Column { + t := s.testGetTable(c, "t1") + for _, col := range t.Cols() { + if col.Name.L == "c2" { + return col + } + } + return nil + } + + originalHook := s.dom.DDL().GetHook() + defer s.dom.DDL().(ddl.DDLForTest).SetHook(originalHook) + + // Check insert null before job first update. + times := 0 + hook := &ddl.TestDDLCallback{} + s.tk.MustExec("delete from t1") + var checkErr error + hook.OnJobRunBeforeExported = func(job *model.Job) { + if tbl.Meta().ID != job.TableID { + return + } + if times == 0 { + _, checkErr = tk2.Exec("insert into t1 values ();") + } + times++ + } + s.dom.DDL().(ddl.DDLForTest).SetHook(hook) + _, err := s.tk.Exec("alter table t1 change c2 c2 int not null;") + c.Assert(checkErr, IsNil) + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[ddl:1138]Invalid use of NULL value") + s.tk.MustQuery("select * from t1").Check(testkit.Rows(" ")) + + // Check insert error when column has PreventNullInsertFlag. + s.tk.MustExec("delete from t1") + hook.OnJobRunBeforeExported = func(job *model.Job) { + if tbl.Meta().ID != job.TableID { + return + } + if job.State != model.JobStateRunning { + return + } + // now c2 has PreventNullInsertFlag, an error is expected. + _, checkErr = tk2.Exec("insert into t1 values ();") + } + s.dom.DDL().(ddl.DDLForTest).SetHook(hook) + s.tk.MustExec("alter table t1 change c2 c2 bigint not null;") + c.Assert(checkErr.Error(), Equals, "[table:1048]Column 'c2' cannot be null") + + c2 := getModifyColumn() + c.Assert(mysql.HasNotNullFlag(c2.Flag), IsTrue) + c.Assert(mysql.HasPreventNullInsertFlag(c2.Flag), IsFalse) + _, err = s.tk.Exec("insert into t1 values ();") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[table:1364]Field 'c2' doesn't have a default value") +} + +func (s *testDBSuite3) TestTransactionWithWriteOnlyColumn(c *C) { + s.tk = testkit.NewTestKit(c, s.store) + s.mustExec(c, "use test_db") + s.mustExec(c, "drop table if exists t1") + s.mustExec(c, "create table t1 (a int key);") + + transactions := [][]string{ + { + "begin", + "insert into t1 set a=2", + "commit", + }, + } + + originHook := s.dom.DDL().GetHook() + defer s.dom.DDL().(ddl.DDLForTest).SetHook(originHook) + hook := &ddl.TestDDLCallback{} + hook.OnJobRunBeforeExported = func(job *model.Job) { + switch job.SchemaState { + case model.StateWriteOnly: + default: + return + } + // do transaction. + for _, transaction := range transactions { + for _, sql := range transaction { + s.mustExec(c, sql) + } + } + } + s.dom.DDL().(ddl.DDLForTest).SetHook(hook) + done := make(chan error, 1) + // test transaction on add column. + go backgroundExec(s.store, "alter table t1 add column c int not null", done) + err := <-done + c.Assert(err, IsNil) + s.tk.MustQuery("select a from t1").Check(testkit.Rows("2")) + s.mustExec(c, "delete from t1") + + // test transaction on drop column. + go backgroundExec(s.store, "alter table t1 drop column c", done) + err = <-done + c.Assert(err, IsNil) + s.tk.MustQuery("select a from t1").Check(testkit.Rows("2")) +} + +func (s *testDBSuite4) TestAddColumn2(c *C) { + s.tk = testkit.NewTestKit(c, s.store) + s.mustExec(c, "use test_db") + s.mustExec(c, "drop table if exists t1") + s.mustExec(c, "create table t1 (a int key, b int);") + defer s.mustExec(c, "drop table if exists t1, t2") + + originHook := s.dom.DDL().GetHook() + defer s.dom.DDL().(ddl.DDLForTest).SetHook(originHook) + hook := &ddl.TestDDLCallback{} + var writeOnlyTable table.Table + hook.OnJobRunBeforeExported = func(job *model.Job) { + if job.SchemaState == model.StateWriteOnly { + writeOnlyTable, _ = s.dom.InfoSchema().TableByID(job.TableID) + } + } + s.dom.DDL().(ddl.DDLForTest).SetHook(hook) + done := make(chan error, 1) + // test transaction on add column. + go backgroundExec(s.store, "alter table t1 add column c int not null", done) + err := <-done + c.Assert(err, IsNil) + + s.mustExec(c, "insert into t1 values (1,1,1)") + s.tk.MustQuery("select a,b,c from t1").Check(testkit.Rows("1 1 1")) + + // mock for outdated tidb update record. + c.Assert(writeOnlyTable, NotNil) + ctx := context.Background() + err = s.tk.Se.NewTxn(ctx) + c.Assert(err, IsNil) + oldRow, err := writeOnlyTable.RowWithCols(s.tk.Se, 1, writeOnlyTable.WritableCols()) + c.Assert(err, IsNil) + c.Assert(len(oldRow), Equals, 3) + err = writeOnlyTable.RemoveRecord(s.tk.Se, 1, oldRow) + c.Assert(err, IsNil) + _, err = writeOnlyTable.AddRecord(s.tk.Se, types.MakeDatums(oldRow[0].GetInt64(), 2, oldRow[2].GetInt64()), table.IsUpdate) + c.Assert(err, IsNil) + err = s.tk.Se.StmtCommit() + c.Assert(err, IsNil) + err = s.tk.Se.CommitTxn(ctx) + c.Assert(err, IsNil) + + s.tk.MustQuery("select a,b,c from t1").Check(testkit.Rows("1 2 1")) + + // Test for _tidb_rowid + var re *testkit.Result + s.mustExec(c, "create table t2 (a int);") + hook.OnJobRunBeforeExported = func(job *model.Job) { + if job.SchemaState != model.StateWriteOnly { + return + } + // allow write _tidb_rowid first + s.mustExec(c, "set @@tidb_opt_write_row_id=1") + s.mustExec(c, "begin") + s.mustExec(c, "insert into t2 (a,_tidb_rowid) values (1,2);") + re = s.tk.MustQuery(" select a,_tidb_rowid from t2;") + s.mustExec(c, "commit") + + } + s.dom.DDL().(ddl.DDLForTest).SetHook(hook) + + go backgroundExec(s.store, "alter table t2 add column b int not null default 3", done) + err = <-done + c.Assert(err, IsNil) + re.Check(testkit.Rows("1 2")) + s.tk.MustQuery("select a,b,_tidb_rowid from t2").Check(testkit.Rows("1 3 2")) +} + +func (s *testDBSuite4) TestIfNotExists(c *C) { + s.tk = testkit.NewTestKit(c, s.store) + s.tk.MustExec("use test_db") + s.mustExec(c, "drop table if exists t1") + s.mustExec(c, "create table t1 (a int key);") + + // ADD COLUMN + sql := "alter table t1 add column b int" + s.mustExec(c, sql) + s.tk.MustGetErrCode(sql, mysql.ErrDupFieldName) + s.mustExec(c, "alter table t1 add column if not exists b int") + c.Assert(s.tk.Se.GetSessionVars().StmtCtx.WarningCount(), Equals, uint16(1)) + s.tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Note|1060|Duplicate column name 'b'")) + + // ADD INDEX + sql = "alter table t1 add index idx_b (b)" + s.mustExec(c, sql) + s.tk.MustGetErrCode(sql, mysql.ErrDupKeyName) + s.mustExec(c, "alter table t1 add index if not exists idx_b (b)") + c.Assert(s.tk.Se.GetSessionVars().StmtCtx.WarningCount(), Equals, uint16(1)) + s.tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Note|1061|index already exist idx_b")) + + // CREATE INDEX + sql = "create index idx_b on t1 (b)" + s.tk.MustGetErrCode(sql, mysql.ErrDupKeyName) + s.mustExec(c, "create index if not exists idx_b on t1 (b)") + c.Assert(s.tk.Se.GetSessionVars().StmtCtx.WarningCount(), Equals, uint16(1)) + s.tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Note|1061|index already exist idx_b")) +} + +func (s *testDBSuite4) TestIfExists(c *C) { + s.tk = testkit.NewTestKit(c, s.store) + s.tk.MustExec("use test_db") + s.mustExec(c, "drop table if exists t1") + s.mustExec(c, "create table t1 (a int key, b int);") + + // DROP COLUMN + sql := "alter table t1 drop column b" + s.mustExec(c, sql) + s.tk.MustGetErrCode(sql, mysql.ErrCantDropFieldOrKey) + s.mustExec(c, "alter table t1 drop column if exists b") // only `a` exists now + c.Assert(s.tk.Se.GetSessionVars().StmtCtx.WarningCount(), Equals, uint16(1)) + s.tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Note|1091|column b doesn't exist")) + + // CHANGE COLUMN + sql = "alter table t1 change column b c int" + s.tk.MustGetErrCode(sql, mysql.ErrBadField) + s.mustExec(c, "alter table t1 change column if exists b c int") + c.Assert(s.tk.Se.GetSessionVars().StmtCtx.WarningCount(), Equals, uint16(1)) + s.tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Note|1054|Unknown column 'b' in 't1'")) + s.mustExec(c, "alter table t1 change column if exists a c int") // only `c` exists now + + // MODIFY COLUMN + sql = "alter table t1 modify column a bigint" + s.tk.MustGetErrCode(sql, mysql.ErrBadField) + s.mustExec(c, "alter table t1 modify column if exists a bigint") + c.Assert(s.tk.Se.GetSessionVars().StmtCtx.WarningCount(), Equals, uint16(1)) + s.tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Note|1054|Unknown column 'a' in 't1'")) + s.mustExec(c, "alter table t1 modify column if exists c bigint") // only `c` exists now + + // DROP INDEX + s.mustExec(c, "alter table t1 add index idx_c (c)") + sql = "alter table t1 drop index idx_c" + s.mustExec(c, sql) + s.tk.MustGetErrCode(sql, mysql.ErrCantDropFieldOrKey) + s.mustExec(c, "alter table t1 drop index if exists idx_c") + c.Assert(s.tk.Se.GetSessionVars().StmtCtx.WarningCount(), Equals, uint16(1)) + s.tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Note|1091|index idx_c doesn't exist")) +} + +func (s *testDBSuite1) TestModifyColumnCharset(c *C) { + s.tk = testkit.NewTestKit(c, s.store) + s.tk.MustExec("use test_db") + s.tk.MustExec("create table t_mcc(a varchar(8) charset utf8, b varchar(8) charset utf8)") + defer s.mustExec(c, "drop table t_mcc;") + + result := s.tk.MustQuery(`show create table t_mcc`) + result.Check(testkit.Rows( + "t_mcc CREATE TABLE `t_mcc` (\n" + + " `a` varchar(8) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,\n" + + " `b` varchar(8) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) + + s.tk.MustExec("alter table t_mcc modify column a varchar(8);") + t := s.testGetTable(c, "t_mcc") + t.Meta().Version = model.TableInfoVersion0 + // When the table version is TableInfoVersion0, the following statement don't change "b" charset. + // So the behavior is not compatible with MySQL. + s.tk.MustExec("alter table t_mcc modify column b varchar(8);") + result = s.tk.MustQuery(`show create table t_mcc`) + result.Check(testkit.Rows( + "t_mcc CREATE TABLE `t_mcc` (\n" + + " `a` varchar(8) DEFAULT NULL,\n" + + " `b` varchar(8) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) + +} + +func (s *testDBSuite2) TestSkipSchemaChecker(c *C) { + s.tk = testkit.NewTestKit(c, s.store) + tk := s.tk + tk.MustExec("use test") + tk.MustExec("drop table if exists t1") + defer tk.MustExec("drop table if exists t1") + tk.MustExec("create table t1 (a int)") + tk2 := testkit.NewTestKit(c, s.store) + tk2.MustExec("use test") + + // Test can't skip schema checker. + tk.MustExec("begin") + tk.MustExec("insert into t1 set a=1;") + tk2.MustExec("alter table t1 add column b int;") + _, err := tk.Exec("commit") + c.Assert(terror.ErrorEqual(domain.ErrInfoSchemaChanged, err), IsTrue) +} + +func init() { + // Make sure it will only be executed once. + domain.SchemaOutOfDateRetryInterval = int64(50 * time.Millisecond) +} diff --git a/ddl/ddl.go b/ddl/ddl.go new file mode 100644 index 0000000..d56ffb7 --- /dev/null +++ b/ddl/ddl.go @@ -0,0 +1,574 @@ +// Copyright 2013 The ql Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSES/QL-LICENSE file. + +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ddl + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/google/uuid" + "github.com/ngaut/pools" + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/ddl/util" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta" + "github.com/pingcap/tidb/owner" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/table" + tidbutil "github.com/pingcap/tidb/util" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +const ( + // currentVersion is for all new DDL jobs. + currentVersion = 1 + // DDLOwnerKey is the ddl owner path that is saved to etcd, and it's exported for testing. + DDLOwnerKey = "/tidb/ddl/fg/owner" + ddlPrompt = "ddl" +) + +var ( + // TableColumnCountLimit is limit of the number of columns in a table. + // It's exported for testing. + TableColumnCountLimit = uint32(512) +) + +var ( + // errWorkerClosed means we have already closed the DDL worker. + errInvalidWorker = terror.ClassDDL.New(mysql.ErrInvalidDDLWorker, mysql.MySQLErrName[mysql.ErrInvalidDDLWorker]) + // errNotOwner means we are not owner and can't handle DDL jobs. + errNotOwner = terror.ClassDDL.New(mysql.ErrNotOwner, mysql.MySQLErrName[mysql.ErrNotOwner]) + errCantDecodeIndex = terror.ClassDDL.New(mysql.ErrCantDecodeIndex, mysql.MySQLErrName[mysql.ErrCantDecodeIndex]) + errInvalidDDLJob = terror.ClassDDL.New(mysql.ErrInvalidDDLJob, mysql.MySQLErrName[mysql.ErrInvalidDDLJob]) + errCancelledDDLJob = terror.ClassDDL.New(mysql.ErrCancelledDDLJob, mysql.MySQLErrName[mysql.ErrCancelledDDLJob]) + errRunMultiSchemaChanges = terror.ClassDDL.New(mysql.ErrUnsupportedDDLOperation, fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation], "multi schema change")) + errWaitReorgTimeout = terror.ClassDDL.New(mysql.ErrLockWaitTimeout, mysql.MySQLErrName[mysql.ErrWaitReorgTimeout]) + errInvalidStoreVer = terror.ClassDDL.New(mysql.ErrInvalidStoreVersion, mysql.MySQLErrName[mysql.ErrInvalidStoreVersion]) + + // We don't support dropping column with index covered now. + errCantDropColWithIndex = terror.ClassDDL.New(mysql.ErrUnsupportedDDLOperation, fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation], "drop column with index")) + errUnsupportedAddColumn = terror.ClassDDL.New(mysql.ErrUnsupportedDDLOperation, fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation], "add column")) + errUnsupportedModifyColumn = terror.ClassDDL.New(mysql.ErrUnsupportedDDLOperation, fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation], "modify column: %s")) + errUnsupportedModifyCharset = terror.ClassDDL.New(mysql.ErrUnsupportedDDLOperation, fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation], "modify %s")) + errUnsupportedPKHandle = terror.ClassDDL.New(mysql.ErrUnsupportedDDLOperation, fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation], "drop integer primary key")) + errUnsupportedCharset = terror.ClassDDL.New(mysql.ErrUnsupportedDDLOperation, fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation], "charset %s and collate %s")) + errUnsupportedShardRowIDBits = terror.ClassDDL.New(mysql.ErrUnsupportedDDLOperation, fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation], "shard_row_id_bits for table with primary key as row id")) + errBlobKeyWithoutLength = terror.ClassDDL.New(mysql.ErrBlobKeyWithoutLength, mysql.MySQLErrName[mysql.ErrBlobKeyWithoutLength]) + errIncorrectPrefixKey = terror.ClassDDL.New(mysql.ErrWrongSubKey, mysql.MySQLErrName[mysql.ErrWrongSubKey]) + errTooLongKey = terror.ClassDDL.New(mysql.ErrTooLongKey, + fmt.Sprintf(mysql.MySQLErrName[mysql.ErrTooLongKey], maxPrefixLength)) + errKeyColumnDoesNotExits = terror.ClassDDL.New(mysql.ErrKeyColumnDoesNotExits, mysql.MySQLErrName[mysql.ErrKeyColumnDoesNotExits]) + errInvalidUseOfNull = terror.ClassDDL.New(mysql.ErrInvalidUseOfNull, mysql.MySQLErrName[mysql.ErrInvalidUseOfNull]) + errTooManyFields = terror.ClassDDL.New(mysql.ErrTooManyFields, mysql.MySQLErrName[mysql.ErrTooManyFields]) + errInvalidSplitRegionRanges = terror.ClassDDL.New(mysql.ErrInvalidSplitRegionRanges, mysql.MySQLErrName[mysql.ErrInvalidSplitRegionRanges]) + errReorgPanic = terror.ClassDDL.New(mysql.ErrReorgPanic, mysql.MySQLErrName[mysql.ErrReorgPanic]) + + // errWrongKeyColumn is for table column cannot be indexed. + errWrongKeyColumn = terror.ClassDDL.New(mysql.ErrWrongKeyColumn, mysql.MySQLErrName[mysql.ErrWrongKeyColumn]) + // errJSONUsedAsKey forbids to use JSON as key or index. + errJSONUsedAsKey = terror.ClassDDL.New(mysql.ErrJSONUsedAsKey, mysql.MySQLErrName[mysql.ErrJSONUsedAsKey]) + // errBlobCantHaveDefault forbids to give not null default value to TEXT/BLOB/JSON. + errBlobCantHaveDefault = terror.ClassDDL.New(mysql.ErrBlobCantHaveDefault, mysql.MySQLErrName[mysql.ErrBlobCantHaveDefault]) + errTooLongIndexComment = terror.ClassDDL.New(mysql.ErrTooLongIndexComment, mysql.MySQLErrName[mysql.ErrTooLongIndexComment]) + // ErrInvalidDefaultValue returns for invalid default value for columns. + ErrInvalidDefaultValue = terror.ClassDDL.New(mysql.ErrInvalidDefault, mysql.MySQLErrName[mysql.ErrInvalidDefault]) + // ErrGeneratedColumnRefAutoInc forbids to refer generated columns to auto-increment columns . + ErrGeneratedColumnRefAutoInc = terror.ClassDDL.New(mysql.ErrGeneratedColumnRefAutoInc, mysql.MySQLErrName[mysql.ErrGeneratedColumnRefAutoInc]) + // ErrGeneratedColumnFunctionIsNotAllowed returns for unsupported functions for generated columns. + ErrGeneratedColumnFunctionIsNotAllowed = terror.ClassDDL.New(mysql.ErrGeneratedColumnFunctionIsNotAllowed, mysql.MySQLErrName[mysql.ErrGeneratedColumnFunctionIsNotAllowed]) + errUnsupportedIndexType = terror.ClassDDL.New(mysql.ErrUnsupportedDDLOperation, fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation], "index type")) + + // ErrDupKeyName returns for duplicated key name + ErrDupKeyName = terror.ClassDDL.New(mysql.ErrDupKeyName, mysql.MySQLErrName[mysql.ErrDupKeyName]) + // ErrInvalidDDLState returns for invalid ddl model object state. + ErrInvalidDDLState = terror.ClassDDL.New(mysql.ErrInvalidDDLState, fmt.Sprintf(mysql.MySQLErrName[mysql.ErrInvalidDDLState])) + // ErrUnsupportedModifyPrimaryKey returns an error when add or drop the primary key. + // It's exported for testing. + ErrUnsupportedModifyPrimaryKey = terror.ClassDDL.New(mysql.ErrUnsupportedDDLOperation, fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation], "%s primary key")) + + // ErrBadField forbids to refer to unknown column. + ErrBadField = terror.ClassDDL.New(mysql.ErrBadField, mysql.MySQLErrName[mysql.ErrBadField]) + // ErrCantRemoveAllFields returns for deleting all columns. + ErrCantRemoveAllFields = terror.ClassDDL.New(mysql.ErrCantRemoveAllFields, mysql.MySQLErrName[mysql.ErrCantRemoveAllFields]) + // ErrCantDropFieldOrKey returns for dropping a non-existent field or key. + ErrCantDropFieldOrKey = terror.ClassDDL.New(mysql.ErrCantDropFieldOrKey, mysql.MySQLErrName[mysql.ErrCantDropFieldOrKey]) + // ErrInvalidOnUpdate returns for invalid ON UPDATE clause. + ErrInvalidOnUpdate = terror.ClassDDL.New(mysql.ErrInvalidOnUpdate, mysql.MySQLErrName[mysql.ErrInvalidOnUpdate]) + // ErrTooLongIdent returns for too long name of database/table/column/index. + ErrTooLongIdent = terror.ClassDDL.New(mysql.ErrTooLongIdent, mysql.MySQLErrName[mysql.ErrTooLongIdent]) + // ErrWrongDBName returns for wrong database name. + ErrWrongDBName = terror.ClassDDL.New(mysql.ErrWrongDBName, mysql.MySQLErrName[mysql.ErrWrongDBName]) + // ErrWrongTableName returns for wrong table name. + ErrWrongTableName = terror.ClassDDL.New(mysql.ErrWrongTableName, mysql.MySQLErrName[mysql.ErrWrongTableName]) + // ErrWrongColumnName returns for wrong column name. + ErrWrongColumnName = terror.ClassDDL.New(mysql.ErrWrongColumnName, mysql.MySQLErrName[mysql.ErrWrongColumnName]) + // ErrInvalidGroupFuncUse returns for using invalid group functions. + ErrInvalidGroupFuncUse = terror.ClassDDL.New(mysql.ErrInvalidGroupFuncUse, mysql.MySQLErrName[mysql.ErrInvalidGroupFuncUse]) + // ErrTableMustHaveColumns returns for missing column when creating a table. + ErrTableMustHaveColumns = terror.ClassDDL.New(mysql.ErrTableMustHaveColumns, mysql.MySQLErrName[mysql.ErrTableMustHaveColumns]) + // ErrWrongNameForIndex returns for wrong index name. + ErrWrongNameForIndex = terror.ClassDDL.New(mysql.ErrWrongNameForIndex, mysql.MySQLErrName[mysql.ErrWrongNameForIndex]) + // ErrUnknownCharacterSet returns unknown character set. + ErrUnknownCharacterSet = terror.ClassDDL.New(mysql.ErrUnknownCharacterSet, mysql.MySQLErrName[mysql.ErrUnknownCharacterSet]) + // ErrCollationCharsetMismatch returns when collation not match the charset. + ErrCollationCharsetMismatch = terror.ClassDDL.New(mysql.ErrCollationCharsetMismatch, mysql.MySQLErrName[mysql.ErrCollationCharsetMismatch]) + // ErrConflictingDeclarations return conflict declarations. + ErrConflictingDeclarations = terror.ClassDDL.New(mysql.ErrConflictingDeclarations, fmt.Sprintf(mysql.MySQLErrName[mysql.ErrConflictingDeclarations], "CHARACTER SET ", "%s", "CHARACTER SET ", "%s")) + // ErrPrimaryCantHaveNull returns All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead + ErrPrimaryCantHaveNull = terror.ClassDDL.New(mysql.ErrPrimaryCantHaveNull, mysql.MySQLErrName[mysql.ErrPrimaryCantHaveNull]) + // ErrErrorOnRename returns error for wrong database name in alter table rename + ErrErrorOnRename = terror.ClassDDL.New(mysql.ErrErrorOnRename, mysql.MySQLErrName[mysql.ErrErrorOnRename]) + + // ErrPartitionMgmtOnNonpartitioned returns it's not a partition table. + ErrPartitionMgmtOnNonpartitioned = terror.ClassDDL.New(mysql.ErrPartitionMgmtOnNonpartitioned, mysql.MySQLErrName[mysql.ErrPartitionMgmtOnNonpartitioned]) + // ErrWarnDataTruncated returns data truncated error. + ErrWarnDataTruncated = terror.ClassDDL.New(mysql.WarnDataTruncated, mysql.MySQLErrName[mysql.WarnDataTruncated]) + // ErrAlterOperationNotSupported returns when alter operations is not supported. + ErrAlterOperationNotSupported = terror.ClassDDL.New(mysql.ErrAlterOperationNotSupportedReason, mysql.MySQLErrName[mysql.ErrAlterOperationNotSupportedReason]) + // ErrTableCantHandleFt returns FULLTEXT keys are not supported by table type + ErrTableCantHandleFt = terror.ClassDDL.New(mysql.ErrTableCantHandleFt, mysql.MySQLErrName[mysql.ErrTableCantHandleFt]) +) + +// DDL is responsible for updating schema in data store and maintaining in-memory InfoSchema cache. +type DDL interface { + CreateSchema(ctx sessionctx.Context, name model.CIStr, charsetInfo *ast.CharsetOpt) error + DropSchema(ctx sessionctx.Context, schema model.CIStr) error + CreateTable(ctx sessionctx.Context, stmt *ast.CreateTableStmt) error + DropTable(ctx sessionctx.Context, tableIdent ast.Ident) (err error) + CreateIndex(ctx sessionctx.Context, tableIdent ast.Ident, keyType ast.IndexKeyType, indexName model.CIStr, + columnNames []*ast.IndexPartSpecification, indexOption *ast.IndexOption, ifNotExists bool) error + DropIndex(ctx sessionctx.Context, tableIdent ast.Ident, indexName model.CIStr, ifExists bool) error + AlterTable(ctx sessionctx.Context, tableIdent ast.Ident, spec []*ast.AlterTableSpec) error + + // GetLease returns current schema lease time. + GetLease() time.Duration + // Stats returns the DDL statistics. + Stats(vars *variable.SessionVars) (map[string]interface{}, error) + // GetScope gets the status variables scope. + GetScope(status string) variable.ScopeFlag + // Stop stops DDL worker. + Stop() error + // SchemaSyncer gets the schema syncer. + SchemaSyncer() util.SchemaSyncer + // OwnerManager gets the owner manager. + OwnerManager() owner.Manager + // GetID gets the ddl ID. + GetID() string + // GetTableMaxRowID gets the max row ID of a normal table or a partition. + GetTableMaxRowID(startTS uint64, tbl table.PhysicalTable) (int64, bool, error) + // GetHook gets the hook. It's exported for testing. + GetHook() Callback +} + +// ddl is used to handle the statements that define the structure or schema of the database. +type ddl struct { + m sync.RWMutex + quitCh chan struct{} + + *ddlCtx + workers map[workerType]*worker + sessPool *sessionPool +} + +// ddlCtx is the context when we use worker to handle DDL jobs. +type ddlCtx struct { + uuid string + store kv.Storage + ownerManager owner.Manager + schemaSyncer util.SchemaSyncer + ddlJobDoneCh chan struct{} + lease time.Duration // lease is schema lease. + infoHandle *infoschema.Handle + + // hook may be modified. + mu struct { + sync.RWMutex + hook Callback + interceptor Interceptor + } +} + +func (dc *ddlCtx) isOwner() bool { + isOwner := dc.ownerManager.IsOwner() + logutil.BgLogger().Debug("[ddl] check whether is the DDL owner", zap.Bool("isOwner", isOwner), zap.String("selfID", dc.uuid)) + if isOwner { + + } + return isOwner +} + +// NewDDL creates a new DDL. +func NewDDL(ctx context.Context, options ...Option) DDL { + return newDDL(ctx, options...) +} + +func newDDL(ctx context.Context, options ...Option) *ddl { + opt := &Options{ + Hook: &BaseCallback{}, + } + for _, o := range options { + o(opt) + } + + id := uuid.New().String() + ctx, cancelFunc := context.WithCancel(ctx) + var manager owner.Manager + var syncer util.SchemaSyncer + if etcdCli := opt.EtcdCli; etcdCli == nil { + // The etcdCli is nil if the store is localstore which is only used for testing. + // So we use mockOwnerManager and MockSchemaSyncer. + manager = owner.NewMockManager(id, cancelFunc) + syncer = NewMockSchemaSyncer() + } else { + manager = owner.NewOwnerManager(etcdCli, ddlPrompt, id, DDLOwnerKey, cancelFunc) + syncer = util.NewSchemaSyncer(etcdCli, id, manager) + } + + ddlCtx := &ddlCtx{ + uuid: id, + store: opt.Store, + lease: opt.Lease, + ddlJobDoneCh: make(chan struct{}, 1), + ownerManager: manager, + schemaSyncer: syncer, + infoHandle: opt.InfoHandle, + } + ddlCtx.mu.hook = opt.Hook + ddlCtx.mu.interceptor = &BaseInterceptor{} + d := &ddl{ + ddlCtx: ddlCtx, + } + + d.start(ctx, opt.ResourcePool) + variable.RegisterStatistics(d) + + return d +} + +// Stop implements DDL.Stop interface. +func (d *ddl) Stop() error { + d.m.Lock() + defer d.m.Unlock() + + d.close() + logutil.BgLogger().Info("[ddl] stop DDL", zap.String("ID", d.uuid)) + return nil +} + +// start campaigns the owner and starts workers. +// ctxPool is used for the worker's delRangeManager and creates sessions. +func (d *ddl) start(ctx context.Context, ctxPool *pools.ResourcePool) { + logutil.BgLogger().Info("[ddl] start DDL", zap.String("ID", d.uuid)) + d.quitCh = make(chan struct{}) + + err := d.ownerManager.CampaignOwner(ctx) + terror.Log(errors.Trace(err)) + + d.workers = make(map[workerType]*worker, 2) + d.sessPool = newSessionPool(ctxPool) + d.workers[generalWorker] = newWorker(generalWorker, d.sessPool) + d.workers[addIdxWorker] = newWorker(addIdxWorker, d.sessPool) + for _, worker := range d.workers { + worker.wg.Add(1) + w := worker + go tidbutil.WithRecovery( + func() { w.start(d.ddlCtx) }, + func(r interface{}) { + if r != nil { + logutil.Logger(w.logCtx).Error("[ddl] DDL worker meet panic", zap.String("ID", d.uuid)) + + } + }) + + // When the start function is called, we will send a fake job to let worker + // checks owner firstly and try to find whether a job exists and run. + asyncNotify(worker.ddlJobCh) + } + + go tidbutil.WithRecovery( + func() { d.schemaSyncer.StartCleanWork() }, + func(r interface{}) { + if r != nil { + logutil.BgLogger().Error("[ddl] DDL syncer clean worker meet panic", + zap.String("ID", d.uuid), zap.Reflect("r", r), zap.Stack("stack trace")) + + } + }) + +} + +func (d *ddl) close() { + if isChanClosed(d.quitCh) { + return + } + + startTime := time.Now() + close(d.quitCh) + d.ownerManager.Cancel() + d.schemaSyncer.CloseCleanWork() + err := d.schemaSyncer.RemoveSelfVersionPath() + if err != nil { + logutil.BgLogger().Error("[ddl] remove self version path failed", zap.Error(err)) + } + + for _, worker := range d.workers { + worker.close() + } + if d.sessPool != nil { + d.sessPool.close() + } + + logutil.BgLogger().Info("[ddl] DDL closed", zap.String("ID", d.uuid), zap.Duration("take time", time.Since(startTime))) +} + +// GetLease implements DDL.GetLease interface. +func (d *ddl) GetLease() time.Duration { + d.m.RLock() + lease := d.lease + d.m.RUnlock() + return lease +} + +// GetInfoSchemaWithInterceptor gets the infoschema binding to d. It's exported for testing. +// Please don't use this function, it is used by TestParallelDDLBeforeRunDDLJob to intercept the calling of d.infoHandle.Get(), use d.infoHandle.Get() instead. +// Otherwise, the TestParallelDDLBeforeRunDDLJob will hang up forever. +func (d *ddl) GetInfoSchemaWithInterceptor(ctx sessionctx.Context) infoschema.InfoSchema { + is := d.infoHandle.Get() + + d.mu.RLock() + defer d.mu.RUnlock() + return d.mu.interceptor.OnGetInfoSchema(ctx, is) +} + +func (d *ddl) genGlobalIDs(count int) ([]int64, error) { + var ret []int64 + err := kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error { + failpoint.Inject("mockGenGlobalIDFail", func(val failpoint.Value) { + if val.(bool) { + failpoint.Return(errors.New("gofail genGlobalIDs error")) + } + }) + + m := meta.NewMeta(txn) + var err error + ret, err = m.GenGlobalIDs(count) + return err + }) + + return ret, err +} + +// SchemaSyncer implements DDL.SchemaSyncer interface. +func (d *ddl) SchemaSyncer() util.SchemaSyncer { + return d.schemaSyncer +} + +// OwnerManager implements DDL.OwnerManager interface. +func (d *ddl) OwnerManager() owner.Manager { + return d.ownerManager +} + +// GetID implements DDL.GetID interface. +func (d *ddl) GetID() string { + return d.uuid +} + +func checkJobMaxInterval(job *model.Job) time.Duration { + // The job of adding index takes more time to process. + // So it uses the longer time. + if job.Type == model.ActionAddIndex || job.Type == model.ActionAddPrimaryKey { + return 3 * time.Second + } + if job.Type == model.ActionCreateTable || job.Type == model.ActionCreateSchema { + return 500 * time.Millisecond + } + return 1 * time.Second +} + +func (d *ddl) asyncNotifyWorker(jobTp model.ActionType) { + if jobTp == model.ActionAddIndex || jobTp == model.ActionAddPrimaryKey { + asyncNotify(d.workers[addIdxWorker].ddlJobCh) + } else { + asyncNotify(d.workers[generalWorker].ddlJobCh) + } +} + +func (d *ddl) doDDLJob(ctx sessionctx.Context, job *model.Job) error { + // Get a global job ID and put the DDL job in the queue. + err := d.addDDLJob(ctx, job) + if err != nil { + return errors.Trace(err) + } + ctx.GetSessionVars().StmtCtx.IsDDLJobInQueue = true + + // Notice worker that we push a new job and wait the job done. + d.asyncNotifyWorker(job.Type) + logutil.BgLogger().Info("[ddl] start DDL job", zap.String("job", job.String()), zap.String("query", job.Query)) + + var historyJob *model.Job + jobID := job.ID + // For a job from start to end, the state of it will be none -> delete only -> write only -> reorganization -> public + // For every state changes, we will wait as lease 2 * lease time, so here the ticker check is 10 * lease. + // But we use etcd to speed up, normally it takes less than 0.5s now, so we use 0.5s or 1s or 3s as the max value. + ticker := time.NewTicker(chooseLeaseTime(10*d.lease, checkJobMaxInterval(job))) + + defer func() { + ticker.Stop() + }() + for { + select { + case <-d.ddlJobDoneCh: + case <-ticker.C: + } + + historyJob, err = d.getHistoryDDLJob(jobID) + if err != nil { + logutil.BgLogger().Error("[ddl] get history DDL job failed, check again", zap.Error(err)) + continue + } else if historyJob == nil { + logutil.BgLogger().Debug("[ddl] DDL job is not in history, maybe not run", zap.Int64("jobID", jobID)) + continue + } + + // If a job is a history job, the state must be JobStateSynced or JobStateRollbackDone or JobStateCancelled. + if historyJob.IsSynced() { + logutil.BgLogger().Info("[ddl] DDL job is finished", zap.Int64("jobID", jobID)) + return nil + } + + if historyJob.Error != nil { + return errors.Trace(historyJob.Error) + } + panic("When the state is JobStateRollbackDone or JobStateCancelled, historyJob.Error should never be nil") + } +} + +func (d *ddl) callHookOnChanged(err error) error { + d.mu.RLock() + defer d.mu.RUnlock() + + err = d.mu.hook.OnChanged(err) + return errors.Trace(err) +} + +// GetHook implements DDL.GetHook interface. +func (d *ddl) GetHook() Callback { + d.mu.Lock() + defer d.mu.Unlock() + + return d.mu.hook +} + +func init() { + ddlMySQLErrCodes := map[terror.ErrCode]uint16{ + mysql.ErrAlterOperationNotSupportedReason: mysql.ErrAlterOperationNotSupportedReason, + mysql.ErrBadField: mysql.ErrBadField, + mysql.ErrBadNull: mysql.ErrBadNull, + mysql.ErrBlobCantHaveDefault: mysql.ErrBlobCantHaveDefault, + mysql.ErrBlobKeyWithoutLength: mysql.ErrBlobKeyWithoutLength, + mysql.ErrCancelledDDLJob: mysql.ErrCancelledDDLJob, + mysql.ErrCantDecodeIndex: mysql.ErrCantDecodeIndex, + mysql.ErrCantDropFieldOrKey: mysql.ErrCantDropFieldOrKey, + mysql.ErrCantRemoveAllFields: mysql.ErrCantRemoveAllFields, + mysql.ErrCoalesceOnlyOnHashPartition: mysql.ErrCoalesceOnlyOnHashPartition, + mysql.ErrCollationCharsetMismatch: mysql.ErrCollationCharsetMismatch, + mysql.ErrConflictingDeclarations: mysql.ErrConflictingDeclarations, + mysql.ErrDependentByGeneratedColumn: mysql.ErrDependentByGeneratedColumn, + mysql.ErrDropLastPartition: mysql.ErrDropLastPartition, + mysql.ErrDropPartitionNonExistent: mysql.ErrDropPartitionNonExistent, + mysql.ErrDupKeyName: mysql.ErrDupKeyName, + mysql.ErrErrorOnRename: mysql.ErrErrorOnRename, + mysql.ErrFieldNotFoundPart: mysql.ErrFieldNotFoundPart, + mysql.ErrFieldTypeNotAllowedAsPartitionField: mysql.ErrFieldTypeNotAllowedAsPartitionField, + mysql.ErrFileNotFound: mysql.ErrFileNotFound, + mysql.ErrGeneratedColumnFunctionIsNotAllowed: mysql.ErrGeneratedColumnFunctionIsNotAllowed, + mysql.ErrGeneratedColumnNonPrior: mysql.ErrGeneratedColumnNonPrior, + mysql.ErrGeneratedColumnRefAutoInc: mysql.ErrGeneratedColumnRefAutoInc, + mysql.ErrInvalidDDLJob: mysql.ErrInvalidDDLJob, + mysql.ErrInvalidDDLState: mysql.ErrInvalidDDLState, + mysql.ErrInvalidDDLWorker: mysql.ErrInvalidDDLWorker, + mysql.ErrInvalidDefault: mysql.ErrInvalidDefault, + mysql.ErrInvalidGroupFuncUse: mysql.ErrInvalidGroupFuncUse, + mysql.ErrInvalidDDLJobFlag: mysql.ErrInvalidDDLJobFlag, + mysql.ErrInvalidDDLJobVersion: mysql.ErrInvalidDDLJobVersion, + mysql.ErrInvalidOnUpdate: mysql.ErrInvalidOnUpdate, + mysql.ErrInvalidSplitRegionRanges: mysql.ErrInvalidSplitRegionRanges, + mysql.ErrInvalidStoreVersion: mysql.ErrInvalidStoreVersion, + mysql.ErrInvalidUseOfNull: mysql.ErrInvalidUseOfNull, + mysql.ErrJSONUsedAsKey: mysql.ErrJSONUsedAsKey, + mysql.ErrKeyColumnDoesNotExits: mysql.ErrKeyColumnDoesNotExits, + mysql.ErrLockWaitTimeout: mysql.ErrLockWaitTimeout, + mysql.ErrNoParts: mysql.ErrNoParts, + mysql.ErrNotOwner: mysql.ErrNotOwner, + mysql.ErrOnlyOnRangeListPartition: mysql.ErrOnlyOnRangeListPartition, + mysql.ErrPartitionColumnList: mysql.ErrPartitionColumnList, + mysql.ErrPartitionFuncNotAllowed: mysql.ErrPartitionFuncNotAllowed, + mysql.ErrPartitionFunctionIsNotAllowed: mysql.ErrPartitionFunctionIsNotAllowed, + mysql.ErrPartitionMaxvalue: mysql.ErrPartitionMaxvalue, + mysql.ErrPartitionMgmtOnNonpartitioned: mysql.ErrPartitionMgmtOnNonpartitioned, + mysql.ErrPartitionRequiresValues: mysql.ErrPartitionRequiresValues, + mysql.ErrPartitionWrongNoPart: mysql.ErrPartitionWrongNoPart, + mysql.ErrPartitionWrongNoSubpart: mysql.ErrPartitionWrongNoSubpart, + mysql.ErrPartitionWrongValues: mysql.ErrPartitionWrongValues, + mysql.ErrPartitionsMustBeDefined: mysql.ErrPartitionsMustBeDefined, + mysql.ErrPrimaryCantHaveNull: mysql.ErrPrimaryCantHaveNull, + mysql.ErrRangeNotIncreasing: mysql.ErrRangeNotIncreasing, + mysql.ErrRowSinglePartitionField: mysql.ErrRowSinglePartitionField, + mysql.ErrSameNamePartition: mysql.ErrSameNamePartition, + mysql.ErrSubpartition: mysql.ErrSubpartition, + mysql.ErrSystemVersioningWrongPartitions: mysql.ErrSystemVersioningWrongPartitions, + mysql.ErrTableCantHandleFt: mysql.ErrTableCantHandleFt, + mysql.ErrTableMustHaveColumns: mysql.ErrTableMustHaveColumns, + mysql.ErrTooLongIdent: mysql.ErrTooLongIdent, + mysql.ErrTooLongIndexComment: mysql.ErrTooLongIndexComment, + mysql.ErrTooLongKey: mysql.ErrTooLongKey, + mysql.ErrTooManyFields: mysql.ErrTooManyFields, + mysql.ErrTooManyPartitions: mysql.ErrTooManyPartitions, + mysql.ErrTooManyValues: mysql.ErrTooManyValues, + mysql.ErrUniqueKeyNeedAllFieldsInPf: mysql.ErrUniqueKeyNeedAllFieldsInPf, + mysql.ErrUnknownCharacterSet: mysql.ErrUnknownCharacterSet, + mysql.ErrUnknownCollation: mysql.ErrUnknownCollation, + mysql.ErrUnknownPartition: mysql.ErrUnknownPartition, + mysql.ErrUnsupportedDDLOperation: mysql.ErrUnsupportedDDLOperation, + mysql.ErrUnsupportedOnGeneratedColumn: mysql.ErrUnsupportedOnGeneratedColumn, + mysql.ErrViewWrongList: mysql.ErrViewWrongList, + mysql.ErrWrongColumnName: mysql.ErrWrongColumnName, + mysql.ErrWrongDBName: mysql.ErrWrongDBName, + mysql.ErrWrongExprInPartitionFunc: mysql.ErrWrongExprInPartitionFunc, + mysql.ErrWrongFKOptionForGeneratedColumn: mysql.ErrWrongFKOptionForGeneratedColumn, + mysql.ErrWrongKeyColumn: mysql.ErrWrongKeyColumn, + mysql.ErrWrongNameForIndex: mysql.ErrWrongNameForIndex, + mysql.ErrWrongObject: mysql.ErrWrongObject, + mysql.ErrWrongPartitionTypeExpectedSystemTime: mysql.ErrWrongPartitionTypeExpectedSystemTime, + mysql.ErrWrongSubKey: mysql.ErrWrongSubKey, + mysql.ErrWrongTableName: mysql.ErrWrongTableName, + mysql.ErrWrongTypeColumnValue: mysql.ErrWrongTypeColumnValue, + mysql.WarnDataTruncated: mysql.WarnDataTruncated, + } + terror.ErrClassToMySQLCodes[terror.ClassDDL] = ddlMySQLErrCodes +} diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go new file mode 100644 index 0000000..f1c1f48 --- /dev/null +++ b/ddl/ddl_api.go @@ -0,0 +1,1951 @@ +// Copyright 2013 The ql Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSES/QL-LICENSE file. + +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ddl + +import ( + "fmt" + "strings" + "sync/atomic" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/charset" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + field_types "github.com/pingcap/tidb/parser/types" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/table/tables" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/mock" + "github.com/pingcap/tidb/util/set" +) + +func (d *ddl) CreateSchema(ctx sessionctx.Context, schema model.CIStr, charsetInfo *ast.CharsetOpt) (err error) { + is := d.GetInfoSchemaWithInterceptor(ctx) + _, ok := is.SchemaByName(schema) + if ok { + return infoschema.ErrDatabaseExists.GenWithStackByArgs(schema) + } + + if err = checkTooLongSchema(schema); err != nil { + return errors.Trace(err) + } + + genIDs, err := d.genGlobalIDs(1) + if err != nil { + return errors.Trace(err) + } + schemaID := genIDs[0] + dbInfo := &model.DBInfo{ + Name: schema, + } + + if charsetInfo != nil { + err = checkCharsetAndCollation(charsetInfo.Chs, charsetInfo.Col) + if err != nil { + return errors.Trace(err) + } + dbInfo.Charset = charsetInfo.Chs + dbInfo.Collate = charsetInfo.Col + } else { + dbInfo.Charset, dbInfo.Collate = charset.GetDefaultCharsetAndCollate() + } + + job := &model.Job{ + SchemaID: schemaID, + SchemaName: dbInfo.Name.L, + Type: model.ActionCreateSchema, + BinlogInfo: &model.HistoryInfo{}, + Args: []interface{}{dbInfo}, + } + + err = d.doDDLJob(ctx, job) + err = d.callHookOnChanged(err) + return errors.Trace(err) +} + +func (d *ddl) DropSchema(ctx sessionctx.Context, schema model.CIStr) (err error) { + is := d.GetInfoSchemaWithInterceptor(ctx) + old, ok := is.SchemaByName(schema) + if !ok { + return errors.Trace(infoschema.ErrDatabaseNotExists) + } + job := &model.Job{ + SchemaID: old.ID, + SchemaName: old.Name.L, + Type: model.ActionDropSchema, + BinlogInfo: &model.HistoryInfo{}, + } + + err = d.doDDLJob(ctx, job) + err = d.callHookOnChanged(err) + return errors.Trace(err) +} + +func checkTooLongSchema(schema model.CIStr) error { + if len(schema.L) > mysql.MaxDatabaseNameLength { + return ErrTooLongIdent.GenWithStackByArgs(schema) + } + return nil +} + +func checkTooLongTable(table model.CIStr) error { + if len(table.L) > mysql.MaxTableNameLength { + return ErrTooLongIdent.GenWithStackByArgs(table) + } + return nil +} + +func checkTooLongIndex(index model.CIStr) error { + if len(index.L) > mysql.MaxIndexIdentifierLen { + return ErrTooLongIdent.GenWithStackByArgs(index) + } + return nil +} + +func setColumnFlagWithConstraint(colMap map[string]*table.Column, v *ast.Constraint) { + switch v.Tp { + case ast.ConstraintPrimaryKey: + for _, key := range v.Keys { + c, ok := colMap[key.Column.Name.L] + if !ok { + continue + } + c.Flag |= mysql.PriKeyFlag + // Primary key can not be NULL. + c.Flag |= mysql.NotNullFlag + } + case ast.ConstraintUniq, ast.ConstraintUniqIndex, ast.ConstraintUniqKey: + for i, key := range v.Keys { + c, ok := colMap[key.Column.Name.L] + if !ok { + continue + } + if i == 0 { + // Only the first column can be set + // if unique index has multi columns, + // the flag should be MultipleKeyFlag. + // See https://dev.mysql.com/doc/refman/5.7/en/show-columns.html + if len(v.Keys) > 1 { + c.Flag |= mysql.MultipleKeyFlag + } else { + c.Flag |= mysql.UniqueKeyFlag + } + } + } + case ast.ConstraintKey, ast.ConstraintIndex: + for i, key := range v.Keys { + c, ok := colMap[key.Column.Name.L] + if !ok { + continue + } + if i == 0 { + // Only the first column can be set. + c.Flag |= mysql.MultipleKeyFlag + } + } + } +} + +func buildColumnsAndConstraints(ctx sessionctx.Context, colDefs []*ast.ColumnDef, + constraints []*ast.Constraint) ([]*table.Column, []*ast.Constraint, error) { + colMap := map[string]*table.Column{} + // outPriKeyConstraint is the primary key constraint out of column definition. such as: create table t1 (id int , age int, primary key(id)); + var outPriKeyConstraint *ast.Constraint + for _, v := range constraints { + if v.Tp == ast.ConstraintPrimaryKey { + outPriKeyConstraint = v + break + } + } + cols := make([]*table.Column, 0, len(colDefs)) + for i, colDef := range colDefs { + col, cts, err := buildColumnAndConstraint(ctx, i, colDef, outPriKeyConstraint) + if err != nil { + return nil, nil, errors.Trace(err) + } + col.State = model.StatePublic + constraints = append(constraints, cts...) + cols = append(cols, col) + colMap[colDef.Name.Name.L] = col + } + // Traverse table Constraints and set col.flag. + for _, v := range constraints { + setColumnFlagWithConstraint(colMap, v) + } + return cols, constraints, nil +} + +// ResolveCharsetCollation will resolve the charset by the order: table charset > database charset > server default charset, +// and it will also resolve the collate by the order: table collate > database collate > server default collate. +func ResolveCharsetCollation(tblCharset, tblCollate, dbCharset, dbCollate string) (string, string, error) { + if len(tblCharset) != 0 { + // tblCollate is not specified by user. + if len(tblCollate) == 0 { + defCollate, err := charset.GetDefaultCollation(tblCharset) + if err != nil { + // return terror is better. + return "", "", ErrUnknownCharacterSet.GenWithStackByArgs(tblCharset) + } + return tblCharset, defCollate, nil + } + return tblCharset, tblCollate, nil + } + + if len(dbCharset) != 0 { + // dbCollate is not specified by user. + if len(dbCollate) == 0 { + defCollate, err := charset.GetDefaultCollation(dbCharset) + if err != nil { + return "", "", ErrUnknownCharacterSet.GenWithStackByArgs(dbCharset) + } + return dbCharset, defCollate, nil + } + return dbCharset, dbCollate, nil + } + + charset, collate := charset.GetDefaultCharsetAndCollate() + return charset, collate, nil +} + +func typesNeedCharset(tp byte) bool { + switch tp { + case mysql.TypeString, mysql.TypeVarchar, mysql.TypeVarString, + mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob, + mysql.TypeEnum, mysql.TypeSet: + return true + } + return false +} + +func setCharsetCollationFlenDecimal(tp *types.FieldType, specifiedCollates []string) error { + tp.Charset = strings.ToLower(tp.Charset) + tp.Collate = strings.ToLower(tp.Collate) + if len(tp.Charset) == 0 { + if typesNeedCharset(tp.Tp) { + if len(specifiedCollates) == 0 { + // Both the charset and collate are not specified. + tp.Charset, tp.Collate = charset.GetDefaultCharsetAndCollate() + } else { + // The charset is not specified but the collate is. + // We should derive charset from it's collate specified rather than getting from table and db. + // It is handled like mysql's logic, use derived charset to judge conflict with next collate. + for _, spc := range specifiedCollates { + derivedCollation, err := charset.GetCollationByName(spc) + if err != nil { + return errors.Trace(err) + } + if len(tp.Charset) == 0 { + tp.Charset = derivedCollation.CharsetName + } else if tp.Charset != derivedCollation.CharsetName { + return ErrCollationCharsetMismatch.GenWithStackByArgs(derivedCollation.Name, tp.Charset) + } + tp.Collate = derivedCollation.Name + } + } + } else { + tp.Charset = charset.CharsetBin + tp.Collate = charset.CharsetBin + } + } else { + if !charset.ValidCharsetAndCollation(tp.Charset, tp.Collate) { + return errUnsupportedCharset.GenWithStackByArgs(tp.Charset, tp.Collate) + } + if len(tp.Collate) == 0 { + if len(specifiedCollates) == 0 { + // The charset is specified, but the collate is not. + var err error + tp.Collate, err = charset.GetDefaultCollation(tp.Charset) + if err != nil { + return errors.Trace(err) + } + } else { + // Both the charset and collate are specified. + for _, spc := range specifiedCollates { + derivedCollation, err := charset.GetCollationByName(spc) + if err != nil { + return errors.Trace(err) + } + if tp.Charset != derivedCollation.CharsetName { + return ErrCollationCharsetMismatch.GenWithStackByArgs(derivedCollation.Name, tp.Charset) + } + tp.Collate = derivedCollation.Name + } + } + } + } + + // Use default value for flen or decimal when they are unspecified. + defaultFlen, defaultDecimal := mysql.GetDefaultFieldLengthAndDecimal(tp.Tp) + if tp.Flen == types.UnspecifiedLength { + tp.Flen = defaultFlen + if mysql.HasUnsignedFlag(tp.Flag) && tp.Tp != mysql.TypeLonglong && mysql.IsIntegerType(tp.Tp) { + // Issue #4684: the flen of unsigned integer(except bigint) is 1 digit shorter than signed integer + // because it has no prefix "+" or "-" character. + tp.Flen-- + } + } + if tp.Decimal == types.UnspecifiedLength { + tp.Decimal = defaultDecimal + } + return nil +} + +// outPriKeyConstraint is the primary key constraint out of column definition. such as: create table t1 (id int , age int, primary key(id)); +func buildColumnAndConstraint(ctx sessionctx.Context, offset int, + colDef *ast.ColumnDef, outPriKeyConstraint *ast.Constraint) (*table.Column, []*ast.Constraint, error) { + // specifiedCollates refers to collates in colDef.Options, should handle them together. + specifiedCollates := extractCollateFromOption(colDef) + + if err := setCharsetCollationFlenDecimal(colDef.Tp, specifiedCollates); err != nil { + return nil, nil, errors.Trace(err) + } + col, cts, err := columnDefToCol(ctx, offset, colDef, outPriKeyConstraint) + if err != nil { + return nil, nil, errors.Trace(err) + } + return col, cts, nil +} + +// checkColumnDefaultValue checks the default value of the column. +// In non-strict SQL mode, if the default value of the column is an empty string, the default value can be ignored. +// In strict SQL mode, TEXT/BLOB/JSON can't have not null default values. +// In NO_ZERO_DATE SQL mode, TIMESTAMP/DATE/DATETIME type can't have zero date like '0000-00-00' or '0000-00-00 00:00:00'. +func checkColumnDefaultValue(ctx sessionctx.Context, col *table.Column, value interface{}) (bool, interface{}, error) { + hasDefaultValue := true + if value != nil && (col.Tp == mysql.TypeTinyBlob || col.Tp == mysql.TypeMediumBlob || + col.Tp == mysql.TypeLongBlob || col.Tp == mysql.TypeBlob) { + // In non-strict SQL mode. + if !ctx.GetSessionVars().SQLMode.HasStrictMode() && value == "" { + if col.Tp == mysql.TypeBlob || col.Tp == mysql.TypeLongBlob { + // The TEXT/BLOB default value can be ignored. + hasDefaultValue = false + } + sc := ctx.GetSessionVars().StmtCtx + sc.AppendWarning(errBlobCantHaveDefault.GenWithStackByArgs(col.Name.O)) + return hasDefaultValue, value, nil + } + // In strict SQL mode or default value is not an empty string. + return hasDefaultValue, value, errBlobCantHaveDefault.GenWithStackByArgs(col.Name.O) + } + return hasDefaultValue, value, nil +} + +// columnDefToCol converts ColumnDef to Col and TableConstraints. +// outPriKeyConstraint is the primary key constraint out of column definition. such as: create table t1 (id int , age int, primary key(id)); +func columnDefToCol(ctx sessionctx.Context, offset int, colDef *ast.ColumnDef, outPriKeyConstraint *ast.Constraint) (*table.Column, []*ast.Constraint, error) { + var constraints = make([]*ast.Constraint, 0) + col := table.ToColumn(&model.ColumnInfo{ + Offset: offset, + Name: colDef.Name.Name, + FieldType: *colDef.Tp, + // TODO: remove this version field after there is no old version. + Version: model.CurrLatestColumnInfoVersion, + }) + + var err error + hasDefaultValue := false + hasNullFlag := false + if colDef.Options != nil { + length := types.UnspecifiedLength + + keys := []*ast.IndexPartSpecification{ + { + Column: colDef.Name, + Length: length, + }, + } + + for _, v := range colDef.Options { + switch v.Tp { + case ast.ColumnOptionNotNull: + col.Flag |= mysql.NotNullFlag + case ast.ColumnOptionNull: + col.Flag &= ^mysql.NotNullFlag + removeOnUpdateNowFlag(col) + hasNullFlag = true + case ast.ColumnOptionAutoIncrement: + col.Flag |= mysql.AutoIncrementFlag + case ast.ColumnOptionPrimaryKey: + // Check PriKeyFlag first to avoid extra duplicate constraints. + if col.Flag&mysql.PriKeyFlag == 0 { + constraint := &ast.Constraint{Tp: ast.ConstraintPrimaryKey, Keys: keys} + constraints = append(constraints, constraint) + col.Flag |= mysql.PriKeyFlag + } + case ast.ColumnOptionUniqKey: + // Check UniqueFlag first to avoid extra duplicate constraints. + if col.Flag&mysql.UniqueFlag == 0 { + constraint := &ast.Constraint{Tp: ast.ConstraintUniqKey, Keys: keys} + constraints = append(constraints, constraint) + col.Flag |= mysql.UniqueKeyFlag + } + case ast.ColumnOptionDefaultValue: + hasDefaultValue, err = setDefaultValue(ctx, col, v) + if err != nil { + return nil, nil, errors.Trace(err) + } + removeOnUpdateNowFlag(col) + case ast.ColumnOptionComment: + err := setColumnComment(ctx, col, v) + if err != nil { + return nil, nil, errors.Trace(err) + } + case ast.ColumnOptionCollate: + if field_types.HasCharset(colDef.Tp) { + col.FieldType.Collate = v.StrValue + } + case ast.ColumnOptionFulltext: + ctx.GetSessionVars().StmtCtx.AppendWarning(ErrTableCantHandleFt) + } + } + } + + // Set `NoDefaultValueFlag` if this field doesn't have a default value and + // it is `not null` and not an `AUTO_INCREMENT` field or `TIMESTAMP` field. + setNoDefaultValueFlag(col, hasDefaultValue) + if col.FieldType.EvalType().IsStringKind() && col.Charset == charset.CharsetBin { + col.Flag |= mysql.BinaryFlag + } + if col.Tp == mysql.TypeBit { + // For BIT field, it's charset is binary but does not have binary flag. + col.Flag &= ^mysql.BinaryFlag + col.Flag |= mysql.UnsignedFlag + } + if col.Tp == mysql.TypeYear { + // For Year field, it's charset is binary but does not have binary flag. + col.Flag &= ^mysql.BinaryFlag + col.Flag |= mysql.ZerofillFlag + } + + // If you specify ZEROFILL for a numeric column, MySQL automatically adds the UNSIGNED attribute to the column. + // See https://dev.mysql.com/doc/refman/5.7/en/numeric-type-overview.html for more details. + // But some types like bit and year, won't show its unsigned flag in `show create table`. + if mysql.HasZerofillFlag(col.Flag) { + col.Flag |= mysql.UnsignedFlag + } + err = checkPriKeyConstraint(col, hasDefaultValue, hasNullFlag, outPriKeyConstraint) + if err != nil { + return nil, nil, errors.Trace(err) + } + err = checkColumnValueConstraint(col) + if err != nil { + return nil, nil, errors.Trace(err) + } + err = checkDefaultValue(ctx, col, hasDefaultValue) + if err != nil { + return nil, nil, errors.Trace(err) + } + err = checkColumnFieldLength(col) + if err != nil { + return nil, nil, errors.Trace(err) + } + return col, constraints, nil +} + +func getDefaultValue(ctx sessionctx.Context, col *table.Column, c *ast.ColumnOption) (interface{}, error) { + v, err := expression.EvalAstExpr(ctx, c.Expr) + if err != nil { + return nil, errors.Trace(err) + } + + if v.IsNull() { + return nil, nil + } + + return v.ToString() +} + +func removeOnUpdateNowFlag(c *table.Column) { + // For timestamp Col, if it is set null or default value, + // OnUpdateNowFlag should be removed. + if mysql.HasTimestampFlag(c.Flag) { + c.Flag &= ^mysql.OnUpdateNowFlag + } +} + +func setNoDefaultValueFlag(c *table.Column, hasDefaultValue bool) { + if hasDefaultValue { + return + } + + if !mysql.HasNotNullFlag(c.Flag) { + return + } + + // Check if it is an `AUTO_INCREMENT` field or `TIMESTAMP` field. + if !mysql.HasAutoIncrementFlag(c.Flag) && !mysql.HasTimestampFlag(c.Flag) { + c.Flag |= mysql.NoDefaultValueFlag + } +} + +func checkDefaultValue(ctx sessionctx.Context, c *table.Column, hasDefaultValue bool) error { + if !hasDefaultValue { + return nil + } + + if c.GetDefaultValue() != nil { + if _, err := table.GetColDefaultValue(ctx, c.ToInfo()); err != nil { + return types.ErrInvalidDefault.GenWithStackByArgs(c.Name) + } + return nil + } + // Primary key default null is invalid. + if mysql.HasPriKeyFlag(c.Flag) { + return ErrPrimaryCantHaveNull + } + + // Set not null but default null is invalid. + if mysql.HasNotNullFlag(c.Flag) { + return types.ErrInvalidDefault.GenWithStackByArgs(c.Name) + } + + return nil +} + +// checkPriKeyConstraint check all parts of a PRIMARY KEY must be NOT NULL +func checkPriKeyConstraint(col *table.Column, hasDefaultValue, hasNullFlag bool, outPriKeyConstraint *ast.Constraint) error { + // Primary key should not be null. + if mysql.HasPriKeyFlag(col.Flag) && hasDefaultValue && col.GetDefaultValue() == nil { + return types.ErrInvalidDefault.GenWithStackByArgs(col.Name) + } + // Set primary key flag for outer primary key constraint. + // Such as: create table t1 (id int , age int, primary key(id)) + if !mysql.HasPriKeyFlag(col.Flag) && outPriKeyConstraint != nil { + for _, key := range outPriKeyConstraint.Keys { + if key.Column.Name.L != col.Name.L { + continue + } + col.Flag |= mysql.PriKeyFlag + break + } + } + // Primary key should not be null. + if mysql.HasPriKeyFlag(col.Flag) && hasNullFlag { + return ErrPrimaryCantHaveNull + } + return nil +} + +func checkColumnValueConstraint(col *table.Column) error { + if col.Tp != mysql.TypeEnum && col.Tp != mysql.TypeSet { + return nil + } + valueMap := make(map[string]string, len(col.Elems)) + for i := range col.Elems { + val := strings.ToLower(col.Elems[i]) + if _, ok := valueMap[val]; ok { + tpStr := "ENUM" + if col.Tp == mysql.TypeSet { + tpStr = "SET" + } + return types.ErrDuplicatedValueInType.GenWithStackByArgs(col.Name, valueMap[val], tpStr) + } + valueMap[val] = col.Elems[i] + } + return nil +} + +func checkDuplicateColumn(cols []interface{}) error { + colNames := set.StringSet{} + colName := model.NewCIStr("") + for _, col := range cols { + switch x := col.(type) { + case *ast.ColumnDef: + colName = x.Name.Name + case model.CIStr: + colName = x + default: + colName.O, colName.L = "", "" + } + if colNames.Exist(colName.L) { + return infoschema.ErrColumnExists.GenWithStackByArgs(colName.O) + } + colNames.Insert(colName.L) + } + return nil +} + +func checkTooLongColumn(cols []interface{}) error { + var colName string + for _, col := range cols { + switch x := col.(type) { + case *ast.ColumnDef: + colName = x.Name.Name.O + case model.CIStr: + colName = x.O + default: + colName = "" + } + if len(colName) > mysql.MaxColumnNameLength { + return ErrTooLongIdent.GenWithStackByArgs(colName) + } + } + return nil +} + +func checkTooManyColumns(colDefs []*ast.ColumnDef) error { + if uint32(len(colDefs)) > atomic.LoadUint32(&TableColumnCountLimit) { + return errTooManyFields + } + return nil +} + +// checkColumnsAttributes checks attributes for multiple columns. +func checkColumnsAttributes(colDefs []*ast.ColumnDef) error { + for _, colDef := range colDefs { + if err := checkColumnAttributes(colDef.Name.OrigColName(), colDef.Tp); err != nil { + return errors.Trace(err) + } + } + return nil +} + +func checkColumnFieldLength(col *table.Column) error { + if col.Tp == mysql.TypeVarchar { + if err := IsTooBigFieldLength(col.Flen, col.Name.O, col.Charset); err != nil { + return errors.Trace(err) + } + } + + return nil +} + +// IsTooBigFieldLength check if the varchar type column exceeds the maximum length limit. +func IsTooBigFieldLength(colDefTpFlen int, colDefName, setCharset string) error { + desc, err := charset.GetCharsetDesc(setCharset) + if err != nil { + return errors.Trace(err) + } + maxFlen := mysql.MaxFieldVarCharLength + maxFlen /= desc.Maxlen + if colDefTpFlen != types.UnspecifiedLength && colDefTpFlen > maxFlen { + return types.ErrTooBigFieldLength.GenWithStack("Column length too big for column '%s' (max = %d); use BLOB or TEXT instead", colDefName, maxFlen) + } + return nil +} + +// checkColumnAttributes check attributes for single column. +func checkColumnAttributes(colName string, tp *types.FieldType) error { + switch tp.Tp { + case mysql.TypeNewDecimal, mysql.TypeDouble, mysql.TypeFloat: + if tp.Flen < tp.Decimal { + return types.ErrMBiggerThanD.GenWithStackByArgs(colName) + } + case mysql.TypeDatetime, mysql.TypeDuration, mysql.TypeTimestamp: + if tp.Decimal != int(types.UnspecifiedFsp) && (tp.Decimal < int(types.MinFsp) || tp.Decimal > int(types.MaxFsp)) { + return types.ErrTooBigPrecision.GenWithStackByArgs(tp.Decimal, colName, types.MaxFsp) + } + } + return nil +} + +func checkDuplicateConstraint(namesMap map[string]bool, name string) error { + if name == "" { + return nil + } + nameLower := strings.ToLower(name) + if namesMap[nameLower] { + return ErrDupKeyName.GenWithStack("duplicate key name %s", name) + } + namesMap[nameLower] = true + return nil +} + +func setEmptyConstraintName(namesMap map[string]bool, constr *ast.Constraint) { + if constr.Name == "" && len(constr.Keys) > 0 { + colName := constr.Keys[0].Column.Name.L + constrName := colName + i := 2 + if strings.EqualFold(constrName, mysql.PrimaryKeyName) { + constrName = fmt.Sprintf("%s_%d", constrName, 2) + i = 3 + } + for namesMap[constrName] { + // We loop forever until we find constrName that haven't been used. + constrName = fmt.Sprintf("%s_%d", colName, i) + i++ + } + constr.Name = constrName + namesMap[constrName] = true + } +} + +func checkConstraintNames(constraints []*ast.Constraint) error { + constrNames := map[string]bool{} + + // Check not empty constraint name whether is duplicated. + for _, constr := range constraints { + err := checkDuplicateConstraint(constrNames, constr.Name) + if err != nil { + return errors.Trace(err) + } + } + + // Set empty constraint names. + for _, constr := range constraints { + setEmptyConstraintName(constrNames, constr) + } + + return nil +} + +func buildTableInfo(ctx sessionctx.Context, d *ddl, tableName model.CIStr, cols []*table.Column, constraints []*ast.Constraint) (tbInfo *model.TableInfo, err error) { + tbInfo = &model.TableInfo{ + Name: tableName, + Version: model.CurrLatestTableInfoVersion, + } + // When this function is called by MockTableInfo, we should set a particular table id. + // So the `ddl` structure may be nil. + if d != nil { + genIDs, err := d.genGlobalIDs(1) + if err != nil { + return nil, errors.Trace(err) + } + tbInfo.ID = genIDs[0] + } + for _, v := range cols { + v.ID = allocateColumnID(tbInfo) + tbInfo.Columns = append(tbInfo.Columns, v.ToInfo()) + } + for _, constr := range constraints { + if constr.Tp == ast.ConstraintPrimaryKey { + lastCol, err := checkPKOnGeneratedColumn(tbInfo, constr.Keys) + if err != nil { + return nil, err + } + if len(constr.Keys) == 1 { + switch lastCol.Tp { + case mysql.TypeLong, mysql.TypeLonglong, + mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24: + tbInfo.PKIsHandle = true + // Avoid creating index for PK handle column. + continue + } + } + } + + if constr.Tp == ast.ConstraintFulltext { + sc := ctx.GetSessionVars().StmtCtx + sc.AppendWarning(ErrTableCantHandleFt) + continue + } + // build index info. + idxInfo, err := buildIndexInfo(tbInfo, model.NewCIStr(constr.Name), constr.Keys, model.StatePublic) + if err != nil { + return nil, errors.Trace(err) + } + //check if the index is primary or uniqiue. + switch constr.Tp { + case ast.ConstraintPrimaryKey: + idxInfo.Primary = true + idxInfo.Unique = true + idxInfo.Name = model.NewCIStr(mysql.PrimaryKeyName) + case ast.ConstraintUniq, ast.ConstraintUniqKey, ast.ConstraintUniqIndex: + idxInfo.Unique = true + } + // set index type. + if constr.Option != nil { + idxInfo.Comment, err = validateCommentLength(ctx.GetSessionVars(), idxInfo.Name.String(), constr.Option) + if err != nil { + return nil, errors.Trace(err) + } + if constr.Option.Tp == model.IndexTypeInvalid { + // Use btree as default index type. + idxInfo.Tp = model.IndexTypeBtree + } else { + idxInfo.Tp = constr.Option.Tp + } + } else { + // Use btree as default index type. + idxInfo.Tp = model.IndexTypeBtree + } + idxInfo.ID = allocateIndexID(tbInfo) + tbInfo.Indices = append(tbInfo.Indices, idxInfo) + } + return +} + +// checkTableInfoValid uses to check table info valid. This is used to validate table info. +func checkTableInfoValid(tblInfo *model.TableInfo) error { + _, err := tables.TableFromMeta(nil, tblInfo) + return err +} + +// BuildTableInfoFromAST builds model.TableInfo from a SQL statement. +// The SQL string should be a create table statement. +// Don't use this function to build a partitioned table. +func BuildTableInfoFromAST(s *ast.CreateTableStmt) (*model.TableInfo, error) { + return buildTableInfoWithCheck(mock.NewContext(), nil, s, mysql.DefaultCharset, "") +} + +func buildTableInfoWithCheck(ctx sessionctx.Context, d *ddl, s *ast.CreateTableStmt, dbCharset, dbCollate string) (*model.TableInfo, error) { + ident := ast.Ident{Schema: s.Table.Schema, Name: s.Table.Name} + colDefs := s.Cols + colObjects := make([]interface{}, 0, len(colDefs)) + for _, col := range colDefs { + colObjects = append(colObjects, col) + } + if err := checkTooLongTable(ident.Name); err != nil { + return nil, errors.Trace(err) + } + if err := checkDuplicateColumn(colObjects); err != nil { + return nil, errors.Trace(err) + } + if err := checkTooLongColumn(colObjects); err != nil { + return nil, errors.Trace(err) + } + if err := checkTooManyColumns(colDefs); err != nil { + return nil, errors.Trace(err) + } + + if err := checkColumnsAttributes(colDefs); err != nil { + return nil, errors.Trace(err) + } + + // The column charset haven't been resolved here. + cols, newConstraints, err := buildColumnsAndConstraints(ctx, colDefs, s.Constraints) + if err != nil { + return nil, errors.Trace(err) + } + + err = checkConstraintNames(newConstraints) + if err != nil { + return nil, errors.Trace(err) + } + + var tbInfo *model.TableInfo + tbInfo, err = buildTableInfo(ctx, d, ident.Name, cols, newConstraints) + if err != nil { + return nil, errors.Trace(err) + } + tbInfo.Charset, tbInfo.Collate = charset.GetDefaultCharsetAndCollate() + + return tbInfo, nil +} + +func (d *ddl) CreateTable(ctx sessionctx.Context, s *ast.CreateTableStmt) (err error) { + ident := ast.Ident{Schema: s.Table.Schema, Name: s.Table.Name} + is := d.GetInfoSchemaWithInterceptor(ctx) + schema, ok := is.SchemaByName(ident.Schema) + if !ok { + return infoschema.ErrDatabaseNotExists.GenWithStackByArgs(ident.Schema) + } + if is.TableExists(ident.Schema, ident.Name) { + err = infoschema.ErrTableExists.GenWithStackByArgs(ident) + if s.IfNotExists { + ctx.GetSessionVars().StmtCtx.AppendNote(err) + return nil + } + return err + } + + tbInfo, err := buildTableInfoWithCheck(ctx, d, s, schema.Charset, schema.Collate) + if err != nil { + return errors.Trace(err) + } + tbInfo.State = model.StatePublic + err = checkTableInfoValid(tbInfo) + if err != nil { + return err + } + tbInfo.State = model.StateNone + + job := &model.Job{ + SchemaID: schema.ID, + TableID: tbInfo.ID, + SchemaName: schema.Name.L, + Type: model.ActionCreateTable, + BinlogInfo: &model.HistoryInfo{}, + Args: []interface{}{tbInfo}, + } + + err = d.doDDLJob(ctx, job) + + // table exists, but if_not_exists flags is true, so we ignore this error. + if infoschema.ErrTableExists.Equal(err) && s.IfNotExists { + ctx.GetSessionVars().StmtCtx.AppendNote(err) + return nil + } + err = d.callHookOnChanged(err) + return errors.Trace(err) +} + +func checkCharsetAndCollation(cs string, co string) error { + if !charset.ValidCharsetAndCollation(cs, co) { + return ErrUnknownCharacterSet.GenWithStackByArgs(cs) + } + return nil +} + +// isIgnorableSpec checks if the spec type is ignorable. +// Some specs are parsed by ignored. This is for compatibility. +func isIgnorableSpec(tp ast.AlterTableType) bool { + // AlterTableLock/AlterTableAlgorithm are ignored. + return tp == ast.AlterTableLock || tp == ast.AlterTableAlgorithm +} + +// resolveAlterTableSpec resolves alter table algorithm and removes ignore table spec in specs. +// returns valied specs, and the occurred error. +func resolveAlterTableSpec(ctx sessionctx.Context, specs []*ast.AlterTableSpec) ([]*ast.AlterTableSpec, error) { + validSpecs := make([]*ast.AlterTableSpec, 0, len(specs)) + for _, spec := range specs { + if isIgnorableSpec(spec.Tp) { + continue + } + validSpecs = append(validSpecs, spec) + } + + if len(validSpecs) > 1 { + // Now we only allow one schema changing at the same time. + return nil, errRunMultiSchemaChanges + } + + // Only handle valid specs. + return validSpecs, nil +} + +func (d *ddl) AlterTable(ctx sessionctx.Context, ident ast.Ident, specs []*ast.AlterTableSpec) (err error) { + validSpecs, err := resolveAlterTableSpec(ctx, specs) + if err != nil { + return errors.Trace(err) + } + + for _, spec := range validSpecs { + switch spec.Tp { + case ast.AlterTableAddColumns: + if len(spec.NewColumns) != 1 { + return errRunMultiSchemaChanges + } + err = d.AddColumn(ctx, ident, spec) + case ast.AlterTableDropColumn: + err = d.DropColumn(ctx, ident, spec) + case ast.AlterTableDropIndex: + err = d.DropIndex(ctx, ident, model.NewCIStr(spec.Name), spec.IfExists) + case ast.AlterTableDropPrimaryKey: + err = d.dropIndex(ctx, ident, true, model.NewCIStr(mysql.PrimaryKeyName), spec.IfExists) + case ast.AlterTableAddConstraint: + constr := spec.Constraint + switch spec.Constraint.Tp { + case ast.ConstraintKey, ast.ConstraintIndex: + err = d.CreateIndex(ctx, ident, ast.IndexKeyTypeNone, model.NewCIStr(constr.Name), + spec.Constraint.Keys, constr.Option, constr.IfNotExists) + case ast.ConstraintUniq, ast.ConstraintUniqIndex, ast.ConstraintUniqKey: + err = d.CreateIndex(ctx, ident, ast.IndexKeyTypeUnique, model.NewCIStr(constr.Name), + spec.Constraint.Keys, constr.Option, false) // IfNotExists should be not applied + case ast.ConstraintPrimaryKey: + err = d.CreatePrimaryKey() + case ast.ConstraintFulltext: + ctx.GetSessionVars().StmtCtx.AppendWarning(ErrTableCantHandleFt) + default: + // Nothing to do now. + } + case ast.AlterTableModifyColumn: + err = d.ModifyColumn(ctx, ident, spec) + case ast.AlterTableChangeColumn: + err = d.ChangeColumn(ctx, ident, spec) + case ast.AlterTableAlterColumn: + err = d.AlterColumn(ctx, ident, spec) + case ast.AlterTablePartition: + // Prevent silent succeed if user executes ALTER TABLE x PARTITION BY ... + err = errors.New("alter table partition is unsupported") + default: + // Nothing to do now. + } + + if err != nil { + return errors.Trace(err) + } + } + + return nil +} + +// ShardRowID shards the implicit row ID by adding shard value to the row ID's first few bits. +func (d *ddl) ShardRowID(ctx sessionctx.Context, tableIdent ast.Ident, uVal uint64) error { + schema, t, err := d.getSchemaAndTableByIdent(ctx, tableIdent) + if err != nil { + return errors.Trace(err) + } + if uVal == t.Meta().ShardRowIDBits { + // Nothing need to do. + return nil + } + if uVal > 0 && t.Meta().PKIsHandle { + return errUnsupportedShardRowIDBits + } + err = verifyNoOverflowShardBits(d.sessPool, t, uVal) + if err != nil { + return err + } + job := &model.Job{ + Type: model.ActionShardRowID, + SchemaID: schema.ID, + TableID: t.Meta().ID, + SchemaName: schema.Name.L, + BinlogInfo: &model.HistoryInfo{}, + Args: []interface{}{uVal}, + } + err = d.doDDLJob(ctx, job) + err = d.callHookOnChanged(err) + return errors.Trace(err) +} + +func (d *ddl) getSchemaAndTableByIdent(ctx sessionctx.Context, tableIdent ast.Ident) (dbInfo *model.DBInfo, t table.Table, err error) { + is := d.GetInfoSchemaWithInterceptor(ctx) + schema, ok := is.SchemaByName(tableIdent.Schema) + if !ok { + return nil, nil, infoschema.ErrDatabaseNotExists.GenWithStackByArgs(tableIdent.Schema) + } + t, err = is.TableByName(tableIdent.Schema, tableIdent.Name) + if err != nil { + return nil, nil, infoschema.ErrTableNotExists.GenWithStackByArgs(tableIdent.Schema, tableIdent.Name) + } + return schema, t, nil +} + +func checkUnsupportedColumnConstraint(col *ast.ColumnDef, ti ast.Ident) error { + for _, constraint := range col.Options { + switch constraint.Tp { + case ast.ColumnOptionAutoIncrement: + return errUnsupportedAddColumn.GenWithStack("unsupported add column '%s' constraint AUTO_INCREMENT when altering '%s.%s'", col.Name, ti.Schema, ti.Name) + case ast.ColumnOptionPrimaryKey: + return errUnsupportedAddColumn.GenWithStack("unsupported add column '%s' constraint PRIMARY KEY when altering '%s.%s'", col.Name, ti.Schema, ti.Name) + case ast.ColumnOptionUniqKey: + return errUnsupportedAddColumn.GenWithStack("unsupported add column '%s' constraint UNIQUE KEY when altering '%s.%s'", col.Name, ti.Schema, ti.Name) + } + } + + return nil +} + +// AddColumn will add a new column to the table. +func (d *ddl) AddColumn(ctx sessionctx.Context, ti ast.Ident, spec *ast.AlterTableSpec) error { + specNewColumn := spec.NewColumns[0] + + err := checkUnsupportedColumnConstraint(specNewColumn, ti) + if err != nil { + return errors.Trace(err) + } + + colName := specNewColumn.Name.Name.O + if err = checkColumnAttributes(colName, specNewColumn.Tp); err != nil { + return errors.Trace(err) + } + + schema, t, err := d.getSchemaAndTableByIdent(ctx, ti) + if err != nil { + return errors.Trace(err) + } + if err = checkAddColumnTooManyColumns(len(t.Cols()) + 1); err != nil { + return errors.Trace(err) + } + // Check whether added column has existed. + col := table.FindCol(t.Cols(), colName) + if col != nil { + err = infoschema.ErrColumnExists.GenWithStackByArgs(colName) + if spec.IfNotExists { + ctx.GetSessionVars().StmtCtx.AppendNote(err) + return nil + } + return err + } + + if len(colName) > mysql.MaxColumnNameLength { + return ErrTooLongIdent.GenWithStackByArgs(colName) + } + + // Ignore table constraints now, maybe return error later. + // We use length(t.Cols()) as the default offset firstly, we will change the + // column's offset later. + col, _, err = buildColumnAndConstraint(ctx, len(t.Cols()), specNewColumn, nil) + if err != nil { + return errors.Trace(err) + } + + col.OriginDefaultValue, err = generateOriginDefaultValue(col.ToInfo()) + if err != nil { + return errors.Trace(err) + } + + job := &model.Job{ + SchemaID: schema.ID, + TableID: t.Meta().ID, + SchemaName: schema.Name.L, + Type: model.ActionAddColumn, + BinlogInfo: &model.HistoryInfo{}, + Args: []interface{}{col, 0}, + } + + err = d.doDDLJob(ctx, job) + // column exists, but if_not_exists flags is true, so we ignore this error. + if infoschema.ErrColumnExists.Equal(err) && spec.IfNotExists { + ctx.GetSessionVars().StmtCtx.AppendNote(err) + return nil + } + err = d.callHookOnChanged(err) + return errors.Trace(err) +} + +// DropColumn will drop a column from the table, now we don't support drop the column with index covered. +func (d *ddl) DropColumn(ctx sessionctx.Context, ti ast.Ident, spec *ast.AlterTableSpec) error { + schema, t, err := d.getSchemaAndTableByIdent(ctx, ti) + if err != nil { + return errors.Trace(err) + } + + // Check whether dropped column has existed. + colName := spec.OldColumnName.Name + col := table.FindCol(t.Cols(), colName.L) + if col == nil { + err = ErrCantDropFieldOrKey.GenWithStack("column %s doesn't exist", colName) + if spec.IfExists { + ctx.GetSessionVars().StmtCtx.AppendNote(err) + return nil + } + return err + } + + tblInfo := t.Meta() + if err = isDroppableColumn(tblInfo, colName); err != nil { + return errors.Trace(err) + } + // We don't support dropping column with PK handle covered now. + if col.IsPKHandleColumn(tblInfo) { + return errUnsupportedPKHandle + } + + job := &model.Job{ + SchemaID: schema.ID, + TableID: t.Meta().ID, + SchemaName: schema.Name.L, + Type: model.ActionDropColumn, + BinlogInfo: &model.HistoryInfo{}, + Args: []interface{}{colName}, + } + + err = d.doDDLJob(ctx, job) + // column not exists, but if_exists flags is true, so we ignore this error. + if ErrCantDropFieldOrKey.Equal(err) && spec.IfExists { + ctx.GetSessionVars().StmtCtx.AppendNote(err) + return nil + } + err = d.callHookOnChanged(err) + return errors.Trace(err) +} + +// modifiableCharsetAndCollation returns error when the charset or collation is not modifiable. +func modifiableCharsetAndCollation(toCharset, toCollate, origCharset, origCollate string) error { + if !charset.ValidCharsetAndCollation(toCharset, toCollate) { + return ErrUnknownCharacterSet.GenWithStack("Unknown character set: '%s', collation: '%s'", toCharset, toCollate) + } + if (origCharset == charset.CharsetUTF8 && toCharset == charset.CharsetUTF8MB4) || + (origCharset == charset.CharsetUTF8 && toCharset == charset.CharsetUTF8) || + (origCharset == charset.CharsetUTF8MB4 && toCharset == charset.CharsetUTF8MB4) { + // TiDB only allow utf8 to be changed to utf8mb4, or changing the collation when the charset is utf8/utf8mb4. + return nil + } + + if toCharset != origCharset { + msg := fmt.Sprintf("charset from %s to %s", origCharset, toCharset) + return errUnsupportedModifyCharset.GenWithStackByArgs(msg) + } + if toCollate != origCollate { + msg := fmt.Sprintf("change collate from %s to %s", origCollate, toCollate) + return errUnsupportedModifyCharset.GenWithStackByArgs(msg) + } + return nil +} + +// modifiable checks if the 'origin' type can be modified to 'to' type with out the need to +// change or check existing data in the table. +// It returns true if the two types has the same Charset and Collation, the same sign, both are +// integer types or string types, and new Flen and Decimal must be greater than or equal to origin. +func modifiable(origin *types.FieldType, to *types.FieldType) error { + unsupportedMsg := fmt.Sprintf("type %v not match origin %v", to.CompactStr(), origin.CompactStr()) + switch origin.Tp { + case mysql.TypeVarchar, mysql.TypeString, mysql.TypeVarString, + mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob: + switch to.Tp { + case mysql.TypeVarchar, mysql.TypeString, mysql.TypeVarString, + mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob: + default: + return errUnsupportedModifyColumn.GenWithStackByArgs(unsupportedMsg) + } + case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong: + switch to.Tp { + case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong: + default: + return errUnsupportedModifyColumn.GenWithStackByArgs(unsupportedMsg) + } + case mysql.TypeEnum: + if origin.Tp != to.Tp { + msg := fmt.Sprintf("cannot modify enum type column's to type %s", to.String()) + return errUnsupportedModifyColumn.GenWithStackByArgs(msg) + } + if len(to.Elems) < len(origin.Elems) { + msg := fmt.Sprintf("the number of enum column's elements is less than the original: %d", len(origin.Elems)) + return errUnsupportedModifyColumn.GenWithStackByArgs(msg) + } + for index, originElem := range origin.Elems { + toElem := to.Elems[index] + if originElem != toElem { + msg := fmt.Sprintf("cannot modify enum column value %s to %s", originElem, toElem) + return errUnsupportedModifyColumn.GenWithStackByArgs(msg) + } + } + case mysql.TypeNewDecimal: + // The root cause is modifying decimal precision needs to rewrite binary representation of that decimal. + if to.Flen != origin.Flen || to.Decimal != origin.Decimal { + return errUnsupportedModifyColumn.GenWithStackByArgs("can't change decimal column precision") + } + default: + if origin.Tp != to.Tp { + return errUnsupportedModifyColumn.GenWithStackByArgs(unsupportedMsg) + } + } + + if to.Flen > 0 && to.Flen < origin.Flen { + msg := fmt.Sprintf("length %d is less than origin %d", to.Flen, origin.Flen) + return errUnsupportedModifyColumn.GenWithStackByArgs(msg) + } + if to.Decimal > 0 && to.Decimal < origin.Decimal { + msg := fmt.Sprintf("decimal %d is less than origin %d", to.Decimal, origin.Decimal) + return errUnsupportedModifyColumn.GenWithStackByArgs(msg) + } + + toUnsigned := mysql.HasUnsignedFlag(to.Flag) + originUnsigned := mysql.HasUnsignedFlag(origin.Flag) + if originUnsigned != toUnsigned { + msg := fmt.Sprintf("can't change unsigned integer to signed or vice versa") + return errUnsupportedModifyColumn.GenWithStackByArgs(msg) + } + + err := modifiableCharsetAndCollation(to.Charset, to.Collate, origin.Charset, origin.Collate) + return errors.Trace(err) +} + +func setDefaultValue(ctx sessionctx.Context, col *table.Column, option *ast.ColumnOption) (bool, error) { + hasDefaultValue := false + value, err := getDefaultValue(ctx, col, option) + if err != nil { + return hasDefaultValue, errors.Trace(err) + } + + if hasDefaultValue, value, err = checkColumnDefaultValue(ctx, col, value); err != nil { + return hasDefaultValue, errors.Trace(err) + } + if err != nil { + return hasDefaultValue, errors.Trace(err) + } + err = col.SetDefaultValue(value) + if err != nil { + return hasDefaultValue, errors.Trace(err) + } + return hasDefaultValue, nil +} + +func setColumnComment(ctx sessionctx.Context, col *table.Column, option *ast.ColumnOption) error { + value, err := expression.EvalAstExpr(ctx, option.Expr) + if err != nil { + return errors.Trace(err) + } + col.Comment, err = value.ToString() + return errors.Trace(err) +} + +// processColumnOptions is only used in getModifiableColumnJob. +func processColumnOptions(ctx sessionctx.Context, col *table.Column, options []*ast.ColumnOption) error { + var hasDefaultValue bool + var err error + for _, opt := range options { + switch opt.Tp { + case ast.ColumnOptionDefaultValue: + hasDefaultValue, err = setDefaultValue(ctx, col, opt) + if err != nil { + return errors.Trace(err) + } + case ast.ColumnOptionComment: + err := setColumnComment(ctx, col, opt) + if err != nil { + return errors.Trace(err) + } + case ast.ColumnOptionNotNull: + col.Flag |= mysql.NotNullFlag + case ast.ColumnOptionNull: + col.Flag &= ^mysql.NotNullFlag + case ast.ColumnOptionAutoIncrement: + col.Flag |= mysql.AutoIncrementFlag + case ast.ColumnOptionPrimaryKey, ast.ColumnOptionUniqKey: + return errUnsupportedModifyColumn.GenWithStack("can't change column constraint - %v", opt.Tp) + case ast.ColumnOptionCollate: + col.Collate = opt.StrValue + case ast.ColumnOptionReference: + return errors.Trace(errUnsupportedModifyColumn.GenWithStackByArgs("can't modify with references")) + case ast.ColumnOptionFulltext: + return errors.Trace(errUnsupportedModifyColumn.GenWithStackByArgs("can't modify with full text")) + default: + return errors.Trace(errUnsupportedModifyColumn.GenWithStackByArgs(fmt.Sprintf("unknown column option type: %d", opt.Tp))) + } + } + + // Set `NoDefaultValueFlag` if this field doesn't have a default value and + // it is `not null` and not an `AUTO_INCREMENT` field or `TIMESTAMP` field. + setNoDefaultValueFlag(col, hasDefaultValue) + + if col.Tp == mysql.TypeBit { + col.Flag |= mysql.UnsignedFlag + } + + if hasDefaultValue { + return errors.Trace(checkDefaultValue(ctx, col, true)) + } + + return nil +} + +func (d *ddl) getModifiableColumnJob(ctx sessionctx.Context, ident ast.Ident, originalColName model.CIStr, + spec *ast.AlterTableSpec) (*model.Job, error) { + specNewColumn := spec.NewColumns[0] + is := d.infoHandle.Get() + schema, ok := is.SchemaByName(ident.Schema) + if !ok { + return nil, errors.Trace(infoschema.ErrDatabaseNotExists) + } + t, err := is.TableByName(ident.Schema, ident.Name) + if err != nil { + return nil, errors.Trace(infoschema.ErrTableNotExists.GenWithStackByArgs(ident.Schema, ident.Name)) + } + + col := table.FindCol(t.Cols(), originalColName.L) + if col == nil { + return nil, infoschema.ErrColumnNotExists.GenWithStackByArgs(originalColName, ident.Name) + } + newColName := specNewColumn.Name.Name + // If we want to rename the column name, we need to check whether it already exists. + if newColName.L != originalColName.L { + c := table.FindCol(t.Cols(), newColName.L) + if c != nil { + return nil, infoschema.ErrColumnExists.GenWithStackByArgs(newColName) + } + } + + // Constraints in the new column means adding new constraints. Errors should thrown, + // which will be done by `processColumnOptions` later. + if specNewColumn.Tp == nil { + // Make sure the column definition is simple field type. + return nil, errors.Trace(errUnsupportedModifyColumn) + } + + if err = checkColumnAttributes(specNewColumn.Name.OrigColName(), specNewColumn.Tp); err != nil { + return nil, errors.Trace(err) + } + + newCol := table.ToColumn(&model.ColumnInfo{ + ID: col.ID, + // We use this PR(https://github.com/pingcap/tidb/pull/6274) as the dividing line to define whether it is a new version or an old version TiDB. + // The old version TiDB initializes the column's offset and state here. + // The new version TiDB doesn't initialize the column's offset and state, and it will do the initialization in run DDL function. + // When we do the rolling upgrade the following may happen: + // a new version TiDB builds the DDL job that doesn't be set the column's offset and state, + // and the old version TiDB is the DDL owner, it doesn't get offset and state from the store. Then it will encounter errors. + // So here we set offset and state to support the rolling upgrade. + Offset: col.Offset, + State: col.State, + OriginDefaultValue: col.OriginDefaultValue, + FieldType: *specNewColumn.Tp, + Name: newColName, + Version: col.Version, + }) + + // TODO: Remove it when all table versions are greater than or equal to TableInfoVersion1. + // If newCol's charset is empty and the table's version less than TableInfoVersion1, + // we will not modify the charset of the column. This behavior is not compatible with MySQL. + if len(newCol.FieldType.Charset) == 0 && t.Meta().Version < model.TableInfoVersion1 { + newCol.FieldType.Charset = col.FieldType.Charset + newCol.FieldType.Collate = col.FieldType.Collate + } + // specifiedCollates refers to collates in colDef.Option. When setting charset and collate here we + // should take the collate in colDef.Option into consideration rather than handling it separately + specifiedCollates := extractCollateFromOption(specNewColumn) + + err = setCharsetCollationFlenDecimal(&newCol.FieldType, specifiedCollates) + if err != nil { + return nil, errors.Trace(err) + } + + if err = processColumnOptions(ctx, newCol, specNewColumn.Options); err != nil { + return nil, errors.Trace(err) + } + + if err = modifiable(&col.FieldType, &newCol.FieldType); err != nil { + return nil, errors.Trace(err) + } + + // Copy index related options to the new spec. + indexFlags := col.FieldType.Flag & (mysql.PriKeyFlag | mysql.UniqueKeyFlag | mysql.MultipleKeyFlag) + newCol.FieldType.Flag |= indexFlags + if mysql.HasPriKeyFlag(col.FieldType.Flag) { + newCol.FieldType.Flag |= mysql.NotNullFlag + // TODO: If user explicitly set NULL, we should throw error ErrPrimaryCantHaveNull. + } + + // We don't support modifying column from not_auto_increment to auto_increment. + if !mysql.HasAutoIncrementFlag(col.Flag) && mysql.HasAutoIncrementFlag(newCol.Flag) { + return nil, errUnsupportedModifyColumn.GenWithStackByArgs("can't set auto_increment") + } + // Disallow modifying column from auto_increment to not auto_increment if the session variable `AllowRemoveAutoInc` is false. + if !ctx.GetSessionVars().AllowRemoveAutoInc && mysql.HasAutoIncrementFlag(col.Flag) && !mysql.HasAutoIncrementFlag(newCol.Flag) { + return nil, errUnsupportedModifyColumn.GenWithStackByArgs("can't remove auto_increment without @@tidb_allow_remove_auto_inc enabled") + } + + // We support modifying the type definitions of 'null' to 'not null' now. + var modifyColumnTp byte + if !mysql.HasNotNullFlag(col.Flag) && mysql.HasNotNullFlag(newCol.Flag) { + if err = checkForNullValue(ctx, col.Tp != newCol.Tp, ident.Schema, ident.Name, newCol.Name, col.ColumnInfo); err != nil { + return nil, errors.Trace(err) + } + // `modifyColumnTp` indicates that there is a type modification. + modifyColumnTp = mysql.TypeNull + } + + if err = checkColumnFieldLength(newCol); err != nil { + return nil, err + } + + if err = checkColumnWithIndexConstraint(t.Meta(), col.ColumnInfo, newCol.ColumnInfo); err != nil { + return nil, err + } + + job := &model.Job{ + SchemaID: schema.ID, + TableID: t.Meta().ID, + SchemaName: schema.Name.L, + Type: model.ActionModifyColumn, + BinlogInfo: &model.HistoryInfo{}, + Args: []interface{}{&newCol, originalColName, modifyColumnTp}, + } + return job, nil +} + +// checkColumnWithIndexConstraint is used to check the related index constraint of the modified column. +// Index has a max-prefix-length constraint. eg: a varchar(100), index idx(a), modifying column a to a varchar(4000) +// will cause index idx to break the max-prefix-length constraint. +func checkColumnWithIndexConstraint(tbInfo *model.TableInfo, originalCol, newCol *model.ColumnInfo) error { + var columns []*model.ColumnInfo + for _, indexInfo := range tbInfo.Indices { + containColumn := false + for _, col := range indexInfo.Columns { + if col.Name.L == originalCol.Name.L { + containColumn = true + break + } + } + if !containColumn { + continue + } + if columns == nil { + columns = make([]*model.ColumnInfo, 0, len(tbInfo.Columns)) + columns = append(columns, tbInfo.Columns...) + // replace old column with new column. + for i, col := range columns { + if col.Name.L != originalCol.Name.L { + continue + } + columns[i] = newCol.Clone() + columns[i].Name = originalCol.Name + break + } + } + err := checkIndexPrefixLength(columns, indexInfo.Columns) + if err != nil { + return err + } + } + return nil +} + +// ChangeColumn renames an existing column and modifies the column's definition, +// currently we only support limited kind of changes +// that do not need to change or check data on the table. +func (d *ddl) ChangeColumn(ctx sessionctx.Context, ident ast.Ident, spec *ast.AlterTableSpec) error { + specNewColumn := spec.NewColumns[0] + if len(specNewColumn.Name.Schema.O) != 0 && ident.Schema.L != specNewColumn.Name.Schema.L { + return ErrWrongDBName.GenWithStackByArgs(specNewColumn.Name.Schema.O) + } + if len(spec.OldColumnName.Schema.O) != 0 && ident.Schema.L != spec.OldColumnName.Schema.L { + return ErrWrongDBName.GenWithStackByArgs(spec.OldColumnName.Schema.O) + } + if len(specNewColumn.Name.Table.O) != 0 && ident.Name.L != specNewColumn.Name.Table.L { + return ErrWrongTableName.GenWithStackByArgs(specNewColumn.Name.Table.O) + } + if len(spec.OldColumnName.Table.O) != 0 && ident.Name.L != spec.OldColumnName.Table.L { + return ErrWrongTableName.GenWithStackByArgs(spec.OldColumnName.Table.O) + } + + job, err := d.getModifiableColumnJob(ctx, ident, spec.OldColumnName.Name, spec) + if err != nil { + if infoschema.ErrColumnNotExists.Equal(err) && spec.IfExists { + ctx.GetSessionVars().StmtCtx.AppendNote(infoschema.ErrColumnNotExists.GenWithStackByArgs(spec.OldColumnName.Name, ident.Name)) + return nil + } + return errors.Trace(err) + } + + err = d.doDDLJob(ctx, job) + // column not exists, but if_exists flags is true, so we ignore this error. + if infoschema.ErrColumnNotExists.Equal(err) && spec.IfExists { + ctx.GetSessionVars().StmtCtx.AppendNote(err) + return nil + } + err = d.callHookOnChanged(err) + return errors.Trace(err) +} + +// ModifyColumn does modification on an existing column, currently we only support limited kind of changes +// that do not need to change or check data on the table. +func (d *ddl) ModifyColumn(ctx sessionctx.Context, ident ast.Ident, spec *ast.AlterTableSpec) error { + specNewColumn := spec.NewColumns[0] + if len(specNewColumn.Name.Schema.O) != 0 && ident.Schema.L != specNewColumn.Name.Schema.L { + return ErrWrongDBName.GenWithStackByArgs(specNewColumn.Name.Schema.O) + } + if len(specNewColumn.Name.Table.O) != 0 && ident.Name.L != specNewColumn.Name.Table.L { + return ErrWrongTableName.GenWithStackByArgs(specNewColumn.Name.Table.O) + } + + originalColName := specNewColumn.Name.Name + job, err := d.getModifiableColumnJob(ctx, ident, originalColName, spec) + if err != nil { + if infoschema.ErrColumnNotExists.Equal(err) && spec.IfExists { + ctx.GetSessionVars().StmtCtx.AppendNote(infoschema.ErrColumnNotExists.GenWithStackByArgs(originalColName, ident.Name)) + return nil + } + return errors.Trace(err) + } + + err = d.doDDLJob(ctx, job) + // column not exists, but if_exists flags is true, so we ignore this error. + if infoschema.ErrColumnNotExists.Equal(err) && spec.IfExists { + ctx.GetSessionVars().StmtCtx.AppendNote(err) + return nil + } + err = d.callHookOnChanged(err) + return errors.Trace(err) +} + +func (d *ddl) AlterColumn(ctx sessionctx.Context, ident ast.Ident, spec *ast.AlterTableSpec) error { + specNewColumn := spec.NewColumns[0] + is := d.infoHandle.Get() + schema, ok := is.SchemaByName(ident.Schema) + if !ok { + return infoschema.ErrTableNotExists.GenWithStackByArgs(ident.Schema, ident.Name) + } + t, err := is.TableByName(ident.Schema, ident.Name) + if err != nil { + return infoschema.ErrTableNotExists.GenWithStackByArgs(ident.Schema, ident.Name) + } + + colName := specNewColumn.Name.Name + // Check whether alter column has existed. + col := table.FindCol(t.Cols(), colName.L) + if col == nil { + return ErrBadField.GenWithStackByArgs(colName, ident.Name) + } + + // Clean the NoDefaultValueFlag value. + col.Flag &= ^mysql.NoDefaultValueFlag + if len(specNewColumn.Options) == 0 { + err = col.SetDefaultValue(nil) + if err != nil { + return errors.Trace(err) + } + setNoDefaultValueFlag(col, false) + } else { + hasDefaultValue, err := setDefaultValue(ctx, col, specNewColumn.Options[0]) + if err != nil { + return errors.Trace(err) + } + if err = checkDefaultValue(ctx, col, hasDefaultValue); err != nil { + return errors.Trace(err) + } + } + + job := &model.Job{ + SchemaID: schema.ID, + TableID: t.Meta().ID, + SchemaName: schema.Name.L, + Type: model.ActionSetDefaultValue, + BinlogInfo: &model.HistoryInfo{}, + Args: []interface{}{col}, + } + + err = d.doDDLJob(ctx, job) + err = d.callHookOnChanged(err) + return errors.Trace(err) +} + +// AlterTableComment updates the table comment information. +func (d *ddl) AlterTableComment(ctx sessionctx.Context, ident ast.Ident, spec *ast.AlterTableSpec) error { + is := d.infoHandle.Get() + schema, ok := is.SchemaByName(ident.Schema) + if !ok { + return infoschema.ErrDatabaseNotExists.GenWithStackByArgs(ident.Schema) + } + + tb, err := is.TableByName(ident.Schema, ident.Name) + if err != nil { + return errors.Trace(infoschema.ErrTableNotExists.GenWithStackByArgs(ident.Schema, ident.Name)) + } + + job := &model.Job{ + SchemaID: schema.ID, + TableID: tb.Meta().ID, + SchemaName: schema.Name.L, + Type: model.ActionModifyTableComment, + BinlogInfo: &model.HistoryInfo{}, + Args: []interface{}{spec.Comment}, + } + + err = d.doDDLJob(ctx, job) + err = d.callHookOnChanged(err) + return errors.Trace(err) +} + +// AlterTableCharset changes the table charset and collate. +func (d *ddl) AlterTableCharsetAndCollate(ctx sessionctx.Context, ident ast.Ident, toCharset, toCollate string) error { + // use the last one. + if toCharset == "" && toCollate == "" { + return ErrUnknownCharacterSet.GenWithStackByArgs(toCharset) + } + + is := d.infoHandle.Get() + schema, ok := is.SchemaByName(ident.Schema) + if !ok { + return infoschema.ErrDatabaseNotExists.GenWithStackByArgs(ident.Schema) + } + + tb, err := is.TableByName(ident.Schema, ident.Name) + if err != nil { + return errors.Trace(infoschema.ErrTableNotExists.GenWithStackByArgs(ident.Schema, ident.Name)) + } + + if toCharset == "" { + // charset does not change. + toCharset = tb.Meta().Charset + } + + if toCollate == "" { + // get the default collation of the charset. + toCollate, err = charset.GetDefaultCollation(toCharset) + if err != nil { + return errors.Trace(err) + } + } + doNothing, err := checkAlterTableCharset(tb.Meta(), schema, toCharset, toCollate) + if err != nil { + return err + } + if doNothing { + return nil + } + + job := &model.Job{ + SchemaID: schema.ID, + TableID: tb.Meta().ID, + SchemaName: schema.Name.L, + Type: model.ActionModifyTableCharsetAndCollate, + BinlogInfo: &model.HistoryInfo{}, + Args: []interface{}{toCharset, toCollate}, + } + err = d.doDDLJob(ctx, job) + err = d.callHookOnChanged(err) + return errors.Trace(err) +} + +// checkAlterTableCharset uses to check is it possible to change the charset of table. +// This function returns 2 variable: +// doNothing: if doNothing is true, means no need to change any more, because the target charset is same with the charset of table. +// err: if err is not nil, means it is not possible to change table charset to target charset. +func checkAlterTableCharset(tblInfo *model.TableInfo, dbInfo *model.DBInfo, toCharset, toCollate string) (doNothing bool, err error) { + origCharset := tblInfo.Charset + origCollate := tblInfo.Collate + // Old version schema charset maybe modified when load schema if TreatOldVersionUTF8AsUTF8MB4 was enable. + // So even if the origCharset equal toCharset, we still need to do the ddl for old version schema. + if origCharset == toCharset && origCollate == toCollate && tblInfo.Version >= model.TableInfoVersion2 { + // nothing to do. + doNothing = true + for _, col := range tblInfo.Columns { + if col.Charset == charset.CharsetBin { + continue + } + if col.Charset == toCharset && col.Collate == toCollate { + continue + } + doNothing = false + } + if doNothing { + return doNothing, nil + } + } + + if len(origCharset) == 0 { + // The table charset may be "", if the table is create in old TiDB version, such as v2.0.8. + // This DDL will update the table charset to default charset. + origCharset, origCollate, err = ResolveCharsetCollation("", "", dbInfo.Charset, dbInfo.Collate) + if err != nil { + return doNothing, err + } + } + + if err = modifiableCharsetAndCollation(toCharset, toCollate, origCharset, origCollate); err != nil { + return doNothing, err + } + + for _, col := range tblInfo.Columns { + if col.Tp == mysql.TypeVarchar { + if err = IsTooBigFieldLength(col.Flen, col.Name.O, toCharset); err != nil { + return doNothing, err + } + } + if col.Charset == charset.CharsetBin { + continue + } + if len(col.Charset) == 0 { + continue + } + if err = modifiableCharsetAndCollation(toCharset, toCollate, col.Charset, col.Collate); err != nil { + return doNothing, err + } + } + return doNothing, nil +} + +// DropTable will proceed even if some table in the list does not exists. +func (d *ddl) DropTable(ctx sessionctx.Context, ti ast.Ident) (err error) { + schema, tb, err := d.getSchemaAndTableByIdent(ctx, ti) + if err != nil { + return errors.Trace(err) + } + + job := &model.Job{ + SchemaID: schema.ID, + TableID: tb.Meta().ID, + SchemaName: schema.Name.L, + Type: model.ActionDropTable, + BinlogInfo: &model.HistoryInfo{}, + } + + err = d.doDDLJob(ctx, job) + err = d.callHookOnChanged(err) + return errors.Trace(err) +} + +func getAnonymousIndex(t table.Table, colName model.CIStr) model.CIStr { + id := 2 + l := len(t.Indices()) + indexName := colName + for i := 0; i < l; i++ { + if t.Indices()[i].Meta().Name.L == indexName.L { + indexName = model.NewCIStr(fmt.Sprintf("%s_%d", colName.O, id)) + i = -1 + id++ + } + } + return indexName +} + +func (d *ddl) CreatePrimaryKey() error { + return ErrUnsupportedModifyPrimaryKey.GenWithStack("Unsupported add primary key, alter-primary-key is false") +} + +func (d *ddl) CreateIndex(ctx sessionctx.Context, ti ast.Ident, keyType ast.IndexKeyType, indexName model.CIStr, + idxColNames []*ast.IndexPartSpecification, indexOption *ast.IndexOption, ifNotExists bool) error { + + // not support Spatial and FullText index + if keyType == ast.IndexKeyTypeFullText || keyType == ast.IndexKeyTypeSpatial { + return errUnsupportedIndexType.GenWithStack("FULLTEXT and SPATIAL index is not supported") + } + unique := keyType == ast.IndexKeyTypeUnique + schema, t, err := d.getSchemaAndTableByIdent(ctx, ti) + if err != nil { + return errors.Trace(err) + } + + // Deal with anonymous index. + if len(indexName.L) == 0 { + indexName = getAnonymousIndex(t, idxColNames[0].Column.Name) + } + + if indexInfo := t.Meta().FindIndexByName(indexName.L); indexInfo != nil { + err = ErrDupKeyName.GenWithStack("index already exist %s", indexName) + if ifNotExists { + ctx.GetSessionVars().StmtCtx.AppendNote(err) + return nil + } + return err + } + + if err = checkTooLongIndex(indexName); err != nil { + return errors.Trace(err) + } + + tblInfo := t.Meta() + // Check before the job is put to the queue. + // This check is redundant, but useful. If DDL check fail before the job is put + // to job queue, the fail path logic is super fast. + // After DDL job is put to the queue, and if the check fail, TiDB will run the DDL cancel logic. + // The recover step causes DDL wait a few seconds, makes the unit test painfully slow. + _, err = buildIndexColumns(tblInfo.Columns, idxColNames) + if err != nil { + return errors.Trace(err) + } + // May be truncate comment here, when index comment too long and sql_mode is't strict. + if _, err = validateCommentLength(ctx.GetSessionVars(), indexName.String(), indexOption); err != nil { + return errors.Trace(err) + } + + job := &model.Job{ + SchemaID: schema.ID, + TableID: t.Meta().ID, + SchemaName: schema.Name.L, + Type: model.ActionAddIndex, + BinlogInfo: &model.HistoryInfo{}, + Args: []interface{}{unique, indexName, idxColNames, indexOption}, + Priority: ctx.GetSessionVars().DDLReorgPriority, + } + + err = d.doDDLJob(ctx, job) + // key exists, but if_not_exists flags is true, so we ignore this error. + if ErrDupKeyName.Equal(err) && ifNotExists { + ctx.GetSessionVars().StmtCtx.AppendNote(err) + return nil + } + err = d.callHookOnChanged(err) + return errors.Trace(err) +} + +func (d *ddl) DropIndex(ctx sessionctx.Context, ti ast.Ident, indexName model.CIStr, ifExists bool) error { + return d.dropIndex(ctx, ti, false, indexName, ifExists) +} + +func (d *ddl) dropIndex(ctx sessionctx.Context, ti ast.Ident, isPK bool, indexName model.CIStr, ifExists bool) error { + is := d.infoHandle.Get() + schema, ok := is.SchemaByName(ti.Schema) + if !ok { + return errors.Trace(infoschema.ErrDatabaseNotExists) + } + t, err := is.TableByName(ti.Schema, ti.Name) + if err != nil { + return errors.Trace(infoschema.ErrTableNotExists.GenWithStackByArgs(ti.Schema, ti.Name)) + } + + indexInfo := t.Meta().FindIndexByName(indexName.L) + if isPK { + return ErrUnsupportedModifyPrimaryKey.GenWithStack("Unsupported drop primary key when alter-primary-key is false") + } + if indexInfo == nil { + err = ErrCantDropFieldOrKey.GenWithStack("index %s doesn't exist", indexName) + if ifExists { + ctx.GetSessionVars().StmtCtx.AppendNote(err) + return nil + } + return err + } + + // Check for drop index on auto_increment column. + err = checkDropIndexOnAutoIncrementColumn(t.Meta(), indexInfo) + if err != nil { + return errors.Trace(err) + } + + jobTp := model.ActionDropIndex + if isPK { + jobTp = model.ActionDropPrimaryKey + } + + job := &model.Job{ + SchemaID: schema.ID, + TableID: t.Meta().ID, + SchemaName: schema.Name.L, + Type: jobTp, + BinlogInfo: &model.HistoryInfo{}, + Args: []interface{}{indexName}, + } + + err = d.doDDLJob(ctx, job) + // index not exists, but if_exists flags is true, so we ignore this error. + if ErrCantDropFieldOrKey.Equal(err) && ifExists { + ctx.GetSessionVars().StmtCtx.AppendNote(err) + return nil + } + err = d.callHookOnChanged(err) + return errors.Trace(err) +} + +func isDroppableColumn(tblInfo *model.TableInfo, colName model.CIStr) error { + if len(tblInfo.Columns) == 1 { + return ErrCantRemoveAllFields.GenWithStack("can't drop only column %s in table %s", + colName, tblInfo.Name) + } + // We don't support dropping column with index covered now. + // We must drop the index first, then drop the column. + if isColumnWithIndex(colName.L, tblInfo.Indices) { + return errCantDropColWithIndex.GenWithStack("can't drop column %s with index covered now", colName) + } + return nil +} + +// validateCommentLength checks comment length of table, column, index and partition. +// If comment length is more than the standard length truncate it +// and store the comment length upto the standard comment length size. +func validateCommentLength(vars *variable.SessionVars, indexName string, indexOption *ast.IndexOption) (string, error) { + if indexOption == nil { + return "", nil + } + + maxLen := MaxCommentLength + if len(indexOption.Comment) > maxLen { + err := errTooLongIndexComment.GenWithStackByArgs(indexName, maxLen) + if vars.StrictSQLMode { + return "", err + } + vars.StmtCtx.AppendWarning(err) + indexOption.Comment = indexOption.Comment[:maxLen] + } + return indexOption.Comment, nil +} + +// extractCollateFromOption take collates(may multiple) in option into consideration +// when handle charset and collate of a column, rather than handling it separately. +func extractCollateFromOption(def *ast.ColumnDef) []string { + var specifiedCollates []string + for i := 0; i < len(def.Options); i++ { + op := def.Options[i] + if op.Tp == ast.ColumnOptionCollate { + specifiedCollates = append(specifiedCollates, op.StrValue) + def.Options = append(def.Options[:i], def.Options[i+1:]...) + // maintain the correct index + i-- + } + } + return specifiedCollates +} diff --git a/ddl/ddl_test.go b/ddl/ddl_test.go new file mode 100644 index 0000000..1c3570d --- /dev/null +++ b/ddl/ddl_test.go @@ -0,0 +1,232 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ddl + +import ( + "context" + "os" + "testing" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/log" + "github.com/pingcap/tidb/config" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta" + "github.com/pingcap/tidb/meta/autoid" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util" + "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/mock" + "github.com/pingcap/tidb/util/testleak" + "go.uber.org/zap" +) + +type DDLForTest interface { + // SetHook sets the hook. + SetHook(h Callback) + // SetInterceptoror sets the interceptor. + SetInterceptoror(h Interceptor) +} + +// SetHook implements DDL.SetHook interface. +func (d *ddl) SetHook(h Callback) { + d.mu.Lock() + defer d.mu.Unlock() + + d.mu.hook = h +} + +// SetInterceptoror implements DDL.SetInterceptoror interface. +func (d *ddl) SetInterceptoror(i Interceptor) { + d.mu.Lock() + defer d.mu.Unlock() + + d.mu.interceptor = i +} + +// generalWorker returns the general worker. +func (d *ddl) generalWorker() *worker { + return d.workers[generalWorker] +} + +// restartWorkers is like the function of d.start. But it won't initialize the "workers" and create a new worker. +// It only starts the original workers. +func (d *ddl) restartWorkers(ctx context.Context) { + d.quitCh = make(chan struct{}) + + err := d.ownerManager.CampaignOwner(ctx) + terror.Log(err) + for _, worker := range d.workers { + worker.wg.Add(1) + worker.quitCh = make(chan struct{}) + w := worker + go util.WithRecovery(func() { w.start(d.ddlCtx) }, + func(r interface{}) { + if r != nil { + log.Error("[ddl] restart DDL worker meet panic", zap.String("worker", w.String()), zap.String("ID", d.uuid)) + } + }) + asyncNotify(worker.ddlJobCh) + } +} + +func TestT(t *testing.T) { + CustomVerboseFlag = true + *CustomParallelSuiteFlag = true + logLevel := os.Getenv("log_level") + logutil.InitLogger(logutil.NewLogConfig(logLevel, "", logutil.EmptyFileLogConfig, false)) + autoid.SetStep(5000) + ReorgWaitTimeout = 30 * time.Millisecond + + cfg := config.GetGlobalConfig() + newCfg := *cfg + config.StoreGlobalConfig(&newCfg) + + testleak.BeforeTest() + TestingT(t) + testleak.AfterTestT(t)() +} + +func testCreateStore(c *C, name string) kv.Storage { + store, err := mockstore.NewMockTikvStore() + c.Assert(err, IsNil) + return store +} + +func testNewContext(d *ddl) sessionctx.Context { + ctx := mock.NewContext() + ctx.Store = d.store + return ctx +} + +func getSchemaVer(c *C, ctx sessionctx.Context) int64 { + err := ctx.NewTxn(context.Background()) + c.Assert(err, IsNil) + txn, err := ctx.Txn(true) + c.Assert(err, IsNil) + m := meta.NewMeta(txn) + ver, err := m.GetSchemaVersion() + c.Assert(err, IsNil) + return ver +} + +type historyJobArgs struct { + ver int64 + db *model.DBInfo + tbl *model.TableInfo + tblIDs map[int64]struct{} +} + +func checkEqualTable(c *C, t1, t2 *model.TableInfo) { + c.Assert(t1.ID, Equals, t2.ID) + c.Assert(t1.Name, Equals, t2.Name) + c.Assert(t1.Charset, Equals, t2.Charset) + c.Assert(t1.Collate, Equals, t2.Collate) + c.Assert(t1.PKIsHandle, DeepEquals, t2.PKIsHandle) + c.Assert(t1.Comment, DeepEquals, t2.Comment) + c.Assert(t1.AutoIncID, DeepEquals, t2.AutoIncID) +} + +func checkHistoryJob(c *C, job *model.Job) { + c.Assert(job.State, Equals, model.JobStateSynced) +} + +func checkHistoryJobArgs(c *C, ctx sessionctx.Context, id int64, args *historyJobArgs) { + txn, err := ctx.Txn(true) + c.Assert(err, IsNil) + t := meta.NewMeta(txn) + historyJob, err := t.GetHistoryDDLJob(id) + c.Assert(err, IsNil) + c.Assert(historyJob.BinlogInfo.FinishedTS, Greater, uint64(0)) + + if args.tbl != nil { + c.Assert(historyJob.BinlogInfo.SchemaVersion, Equals, args.ver) + checkEqualTable(c, historyJob.BinlogInfo.TableInfo, args.tbl) + return + } + + // for handling schema job + c.Assert(historyJob.BinlogInfo.SchemaVersion, Equals, args.ver) + c.Assert(historyJob.BinlogInfo.DBInfo, DeepEquals, args.db) + // only for creating schema job + if args.db != nil && len(args.tblIDs) == 0 { + return + } +} + +func buildCreateIdxJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, unique bool, indexName string, colName string) *model.Job { + return &model.Job{ + SchemaID: dbInfo.ID, + TableID: tblInfo.ID, + Type: model.ActionAddIndex, + BinlogInfo: &model.HistoryInfo{}, + Args: []interface{}{unique, model.NewCIStr(indexName), + []*ast.IndexPartSpecification{{ + Column: &ast.ColumnName{Name: model.NewCIStr(colName)}, + Length: types.UnspecifiedLength}}}, + } +} + +func testCreateIndex(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, unique bool, indexName string, colName string) *model.Job { + job := buildCreateIdxJob(dbInfo, tblInfo, unique, indexName, colName) + err := d.doDDLJob(ctx, job) + c.Assert(err, IsNil) + v := getSchemaVer(c, ctx) + checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) + return job +} + +func testAddColumn(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, args []interface{}) *model.Job { + job := &model.Job{ + SchemaID: dbInfo.ID, + TableID: tblInfo.ID, + Type: model.ActionAddColumn, + Args: args, + BinlogInfo: &model.HistoryInfo{}, + } + err := d.doDDLJob(ctx, job) + c.Assert(err, IsNil) + v := getSchemaVer(c, ctx) + checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) + return job +} + +func buildDropIdxJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, indexName string) *model.Job { + tp := model.ActionDropIndex + if indexName == "primary" { + tp = model.ActionDropPrimaryKey + } + return &model.Job{ + SchemaID: dbInfo.ID, + TableID: tblInfo.ID, + Type: tp, + BinlogInfo: &model.HistoryInfo{}, + Args: []interface{}{model.NewCIStr(indexName)}, + } +} + +func testDropIndex(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, indexName string) *model.Job { + job := buildDropIdxJob(dbInfo, tblInfo, indexName) + err := d.doDDLJob(ctx, job) + c.Assert(err, IsNil) + v := getSchemaVer(c, ctx) + checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) + return job +} diff --git a/ddl/ddl_worker.go b/ddl/ddl_worker.go new file mode 100644 index 0000000..9f523c5 --- /dev/null +++ b/ddl/ddl_worker.go @@ -0,0 +1,629 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ddl + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/ddl/util" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/variable" + tidbutil "github.com/pingcap/tidb/util" + "github.com/pingcap/tidb/util/admin" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +var ( + // ddlWorkerID is used for generating the next DDL worker ID. + ddlWorkerID = int32(0) + // WaitTimeWhenErrorOccured is waiting interval when processing DDL jobs encounter errors. + WaitTimeWhenErrorOccured = 1 * time.Second +) + +type workerType byte + +const ( + // generalWorker is the worker who handles all DDL statements except “add index”. + generalWorker workerType = 0 + // addIdxWorker is the worker who handles the operation of adding indexes. + addIdxWorker workerType = 1 + // waitDependencyJobInterval is the interval when the dependency job doesn't be done. + waitDependencyJobInterval = 200 * time.Millisecond + // noneDependencyJob means a job has no dependency-job. + noneDependencyJob = 0 +) + +// worker is used for handling DDL jobs. +// Now we have two kinds of workers. +type worker struct { + id int32 + tp workerType + ddlJobCh chan struct{} + quitCh chan struct{} + wg sync.WaitGroup + + sessPool *sessionPool // sessPool is used to new sessions to execute SQL in ddl package. + reorgCtx *reorgCtx // reorgCtx is used for reorganization. + logCtx context.Context +} + +func newWorker(tp workerType, sessPool *sessionPool) *worker { + worker := &worker{ + id: atomic.AddInt32(&ddlWorkerID, 1), + tp: tp, + ddlJobCh: make(chan struct{}, 1), + quitCh: make(chan struct{}), + reorgCtx: &reorgCtx{notifyCancelReorgJob: 0}, + sessPool: sessPool, + } + + worker.logCtx = logutil.WithKeyValue(context.Background(), "worker", worker.String()) + return worker +} + +func (w *worker) typeStr() string { + var str string + switch w.tp { + case generalWorker: + str = "general" + case addIdxWorker: + str = model.AddIndexStr + default: + str = "unknow" + } + return str +} + +func (w *worker) String() string { + return fmt.Sprintf("worker %d, tp %s", w.id, w.typeStr()) +} + +func (w *worker) close() { + startTime := time.Now() + close(w.quitCh) + w.wg.Wait() + logutil.Logger(w.logCtx).Info("[ddl] DDL worker closed", zap.Duration("take time", time.Since(startTime))) +} + +// start is used for async online schema changing, it will try to become the owner firstly, +// then wait or pull the job queue to handle a schema change job. +func (w *worker) start(d *ddlCtx) { + logutil.Logger(w.logCtx).Info("[ddl] start DDL worker") + defer w.wg.Done() + + // We use 4 * lease time to check owner's timeout, so here, we will update owner's status + // every 2 * lease time. If lease is 0, we will use default 1s. + // But we use etcd to speed up, normally it takes less than 1s now, so we use 1s as the max value. + checkTime := chooseLeaseTime(2*d.lease, 1*time.Second) + + ticker := time.NewTicker(checkTime) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + logutil.Logger(w.logCtx).Debug("[ddl] wait to check DDL status again", zap.Duration("interval", checkTime)) + case <-w.ddlJobCh: + case <-w.quitCh: + return + } + + err := w.handleDDLJobQueue(d) + if err != nil { + logutil.Logger(w.logCtx).Error("[ddl] handle DDL job failed", zap.Error(err)) + } + } +} + +func asyncNotify(ch chan struct{}) { + select { + case ch <- struct{}{}: + default: + } +} + +// buildJobDependence sets the curjob's dependency-ID. +// The dependency-job's ID must less than the current job's ID, and we need the largest one in the list. +func buildJobDependence(t *meta.Meta, curJob *model.Job) error { + // Jobs in the same queue are ordered. If we want to find a job's dependency-job, we need to look for + // it from the other queue. So if the job is "ActionAddIndex" job, we need find its dependency-job from DefaultJobList. + var jobs []*model.Job + var err error + switch curJob.Type { + case model.ActionAddIndex, model.ActionAddPrimaryKey: + jobs, err = t.GetAllDDLJobsInQueue(meta.DefaultJobListKey) + default: + jobs, err = t.GetAllDDLJobsInQueue(meta.AddIndexJobListKey) + } + if err != nil { + return errors.Trace(err) + } + + for _, job := range jobs { + if curJob.ID < job.ID { + continue + } + isDependent, err := curJob.IsDependentOn(job) + if err != nil { + return errors.Trace(err) + } + if isDependent { + logutil.BgLogger().Info("[ddl] current DDL job depends on other job", zap.String("currentJob", curJob.String()), zap.String("dependentJob", job.String())) + curJob.DependencyID = job.ID + break + } + } + return nil +} + +// addDDLJob gets a global job ID and puts the DDL job in the DDL queue. +func (d *ddl) addDDLJob(ctx sessionctx.Context, job *model.Job) error { + job.Version = currentVersion + job.Query, _ = ctx.Value(sessionctx.QueryString).(string) + err := kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error { + t := newMetaWithQueueTp(txn, job.Type.String()) + var err error + job.ID, err = t.GenGlobalID() + if err != nil { + return errors.Trace(err) + } + job.StartTS = txn.StartTS() + if err = buildJobDependence(t, job); err != nil { + return errors.Trace(err) + } + err = t.EnQueueDDLJob(job) + + return errors.Trace(err) + }) + + return errors.Trace(err) +} + +// getHistoryDDLJob gets a DDL job with job's ID from history queue. +func (d *ddl) getHistoryDDLJob(id int64) (*model.Job, error) { + var job *model.Job + + err := kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { + t := meta.NewMeta(txn) + var err1 error + job, err1 = t.GetHistoryDDLJob(id) + return errors.Trace(err1) + }) + + return job, errors.Trace(err) +} + +// getFirstDDLJob gets the first DDL job form DDL queue. +func (w *worker) getFirstDDLJob(t *meta.Meta) (*model.Job, error) { + job, err := t.GetDDLJobByIdx(0) + return job, errors.Trace(err) +} + +// handleUpdateJobError handles the too large DDL job. +func (w *worker) handleUpdateJobError(t *meta.Meta, job *model.Job, err error) error { + if err == nil { + return nil + } + if kv.ErrEntryTooLarge.Equal(err) { + logutil.Logger(w.logCtx).Warn("[ddl] update DDL job failed", zap.String("job", job.String()), zap.Error(err)) + // Reduce this txn entry size. + job.BinlogInfo.Clean() + job.Error = toTError(err) + job.SchemaState = model.StateNone + job.State = model.JobStateCancelled + err = w.finishDDLJob(t, job) + } + return errors.Trace(err) +} + +// updateDDLJob updates the DDL job information. +// Every time we enter another state except final state, we must call this function. +func (w *worker) updateDDLJob(t *meta.Meta, job *model.Job, meetErr bool) error { + updateRawArgs := true + // If there is an error when running job and the RawArgs hasn't been decoded by DecodeArgs, + // so we shouldn't replace RawArgs with the marshaling Args. + if meetErr && (job.RawArgs != nil && job.Args == nil) { + logutil.Logger(w.logCtx).Info("[ddl] meet something wrong before update DDL job, shouldn't update raw args", + zap.String("job", job.String())) + updateRawArgs = false + } + return errors.Trace(t.UpdateDDLJob(0, job, updateRawArgs)) +} + +// finishDDLJob deletes the finished DDL job in the ddl queue and puts it to history queue. +// If the DDL job need to handle in background, it will prepare a background job. +func (w *worker) finishDDLJob(t *meta.Meta, job *model.Job) (err error) { + _, err = t.DeQueueDDLJob() + if err != nil { + return errors.Trace(err) + } + + job.BinlogInfo.FinishedTS = t.StartTS + logutil.Logger(w.logCtx).Info("[ddl] finish DDL job", zap.String("job", job.String())) + updateRawArgs := true + if job.Type == model.ActionAddPrimaryKey && !job.IsCancelled() { + // ActionAddPrimaryKey needs to check the warnings information in job.Args. + // Notice: warnings is used to support non-strict mode. + updateRawArgs = false + } + err = t.AddHistoryDDLJob(job, updateRawArgs) + return errors.Trace(err) +} + +func isDependencyJobDone(t *meta.Meta, job *model.Job) (bool, error) { + if job.DependencyID == noneDependencyJob { + return true, nil + } + + historyJob, err := t.GetHistoryDDLJob(job.DependencyID) + if err != nil { + return false, errors.Trace(err) + } + if historyJob == nil { + return false, nil + } + logutil.BgLogger().Info("[ddl] current DDL job dependent job is finished", zap.String("currentJob", job.String()), zap.Int64("dependentJobID", job.DependencyID)) + job.DependencyID = noneDependencyJob + return true, nil +} + +func newMetaWithQueueTp(txn kv.Transaction, tp string) *meta.Meta { + if tp == model.AddIndexStr || tp == model.AddPrimaryKeyStr { + return meta.NewMeta(txn, meta.AddIndexJobListKey) + } + return meta.NewMeta(txn) +} + +// handleDDLJobQueue handles DDL jobs in DDL Job queue. +func (w *worker) handleDDLJobQueue(d *ddlCtx) error { + once := true + waitDependencyJobCnt := 0 + for { + if isChanClosed(w.quitCh) { + return nil + } + + var ( + job *model.Job + schemaVer int64 + runJobErr error + ) + waitTime := 2 * d.lease + err := kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { + // We are not owner, return and retry checking later. + if !d.isOwner() { + return nil + } + + var err error + t := newMetaWithQueueTp(txn, w.typeStr()) + // We become the owner. Get the first job and run it. + job, err = w.getFirstDDLJob(t) + if job == nil || err != nil { + return errors.Trace(err) + } + if isDone, err1 := isDependencyJobDone(t, job); err1 != nil || !isDone { + return errors.Trace(err1) + } + + if once { + w.waitSchemaSynced(d, job, waitTime) + once = false + return nil + } + + if job.IsDone() || job.IsRollbackDone() { + if !job.IsRollbackDone() { + job.State = model.JobStateSynced + } + err = w.finishDDLJob(t, job) + return errors.Trace(err) + } + + d.mu.RLock() + d.mu.hook.OnJobRunBefore(job) + d.mu.RUnlock() + + // If running job meets error, we will save this error in job Error + // and retry later if the job is not cancelled. + tidbutil.WithRecovery(func() { + schemaVer, runJobErr = w.runDDLJob(d, t, job) + }, func(r interface{}) { + if r != nil { + // If run ddl job panic, just cancel the ddl jobs. + job.State = model.JobStateCancelling + } + }) + if job.IsCancelled() { + txn.Reset() + err = w.finishDDLJob(t, job) + return errors.Trace(err) + } + err = w.updateDDLJob(t, job, runJobErr != nil) + if err = w.handleUpdateJobError(t, job, err); err != nil { + return errors.Trace(err) + } + return nil + }) + + if runJobErr != nil { + // wait a while to retry again. If we don't wait here, DDL will retry this job immediately, + // which may act like a deadlock. + logutil.Logger(w.logCtx).Info("[ddl] run DDL job failed, sleeps a while then retries it.", + zap.Duration("waitTime", WaitTimeWhenErrorOccured), zap.Error(runJobErr)) + time.Sleep(WaitTimeWhenErrorOccured) + } + + if err != nil { + return errors.Trace(err) + } else if job == nil { + // No job now, return and retry getting later. + return nil + } + w.waitDependencyJobFinished(job, &waitDependencyJobCnt) + + d.mu.RLock() + d.mu.hook.OnJobUpdated(job) + d.mu.RUnlock() + + // Here means the job enters another state (delete only, write only, public, etc...) or is cancelled. + // If the job is done or still running or rolling back, we will wait 2 * lease time to guarantee other servers to update + // the newest schema. + w.waitSchemaChanged(nil, d, waitTime, schemaVer, job) + if job.IsSynced() || job.IsCancelled() { + asyncNotify(d.ddlJobDoneCh) + } + } +} + +// waitDependencyJobFinished waits for the dependency-job to be finished. +// If the dependency job isn't finished yet, we'd better wait a moment. +func (w *worker) waitDependencyJobFinished(job *model.Job, cnt *int) { + if job.DependencyID != noneDependencyJob { + intervalCnt := int(3 * time.Second / waitDependencyJobInterval) + if *cnt%intervalCnt == 0 { + logutil.Logger(w.logCtx).Info("[ddl] DDL job need to wait dependent job, sleeps a while, then retries it.", + zap.Int64("jobID", job.ID), + zap.Int64("dependentJobID", job.DependencyID), + zap.Duration("waitTime", waitDependencyJobInterval)) + } + time.Sleep(waitDependencyJobInterval) + *cnt++ + } else { + *cnt = 0 + } +} + +func chooseLeaseTime(t, max time.Duration) time.Duration { + if t == 0 || t > max { + return max + } + return t +} + +// runDDLJob runs a DDL job. It returns the current schema version in this transaction and the error. +func (w *worker) runDDLJob(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error) { + // Mock for run ddl job panic. + failpoint.Inject("mockPanicInRunDDLJob", func(val failpoint.Value) {}) + + logutil.Logger(w.logCtx).Info("[ddl] run DDL job", zap.String("job", job.String())) + if job.IsFinished() { + return + } + // The cause of this job state is that the job is cancelled by client. + if job.IsCancelling() { + return convertJob2RollbackJob(w, d, t, job) + } + + if !job.IsRollingback() && !job.IsCancelling() { + job.State = model.JobStateRunning + } + + switch job.Type { + case model.ActionCreateSchema: + ver, err = onCreateSchema(d, t, job) + case model.ActionModifySchemaCharsetAndCollate: + ver, err = onModifySchemaCharsetAndCollate(t, job) + case model.ActionDropSchema: + ver, err = onDropSchema(t, job) + case model.ActionCreateTable: + ver, err = onCreateTable(d, t, job) + case model.ActionDropTable: + ver, err = onDropTableOrView(t, job) + case model.ActionAddColumn: + ver, err = onAddColumn(d, t, job) + case model.ActionDropColumn: + ver, err = onDropColumn(t, job) + case model.ActionModifyColumn: + ver, err = w.onModifyColumn(t, job) + case model.ActionSetDefaultValue: + ver, err = onSetDefaultValue(t, job) + case model.ActionAddIndex: + ver, err = w.onCreateIndex(d, t, job, false) + case model.ActionAddPrimaryKey: + ver, err = w.onCreateIndex(d, t, job, true) + case model.ActionDropIndex, model.ActionDropPrimaryKey: + ver, err = onDropIndex(t, job) + case model.ActionShardRowID: + ver, err = w.onShardRowID(d, t, job) + case model.ActionModifyTableComment: + ver, err = onModifyTableComment(t, job) + case model.ActionModifyTableCharsetAndCollate: + ver, err = onModifyTableCharsetAndCollate(t, job) + default: + // Invalid job, cancel it. + job.State = model.JobStateCancelled + err = errInvalidDDLJob.GenWithStack("invalid ddl job type: %v", job.Type) + } + + // Save errors in job, so that others can know errors happened. + if err != nil { + job.Error = toTError(err) + job.ErrorCount++ + + // If job is cancelled, we shouldn't return an error and shouldn't load DDL variables. + if job.State == model.JobStateCancelled { + logutil.Logger(w.logCtx).Info("[ddl] DDL job is cancelled normally", zap.Error(err)) + return ver, nil + } + logutil.Logger(w.logCtx).Error("[ddl] run DDL job error", zap.Error(err)) + + // Load global ddl variables. + if err1 := loadDDLVars(w); err1 != nil { + logutil.Logger(w.logCtx).Error("[ddl] load DDL global variable failed", zap.Error(err1)) + } + // Check error limit to avoid falling into an infinite loop. + if job.ErrorCount > variable.GetDDLErrorCountLimit() && job.State == model.JobStateRunning && admin.IsJobRollbackable(job) { + logutil.Logger(w.logCtx).Warn("[ddl] DDL job error count exceed the limit, cancelling it now", zap.Int64("jobID", job.ID), zap.Int64("errorCountLimit", variable.GetDDLErrorCountLimit())) + job.State = model.JobStateCancelling + } + } + return +} + +func loadDDLVars(w *worker) error { + // Get sessionctx from context resource pool. + var ctx sessionctx.Context + ctx, err := w.sessPool.get() + if err != nil { + return errors.Trace(err) + } + defer w.sessPool.put(ctx) + return util.LoadDDLVars(ctx) +} + +func toTError(err error) *terror.Error { + originErr := errors.Cause(err) + tErr, ok := originErr.(*terror.Error) + if ok { + return tErr + } + + // TODO: Add the error code. + return terror.ClassDDL.New(terror.CodeUnknown, err.Error()) +} + +// waitSchemaChanged waits for the completion of updating all servers' schema. In order to make sure that happens, +// we wait 2 * lease time. +func (w *worker) waitSchemaChanged(ctx context.Context, d *ddlCtx, waitTime time.Duration, latestSchemaVersion int64, job *model.Job) { + if !job.IsRunning() && !job.IsRollingback() && !job.IsDone() && !job.IsRollbackDone() { + return + } + if waitTime == 0 { + return + } + + timeStart := time.Now() + var err error + defer func() { + + }() + + if latestSchemaVersion == 0 { + logutil.Logger(w.logCtx).Info("[ddl] schema version doesn't change") + return + } + + if ctx == nil { + var cancelFunc context.CancelFunc + ctx, cancelFunc = context.WithTimeout(context.Background(), waitTime) + defer cancelFunc() + } + err = d.schemaSyncer.OwnerUpdateGlobalVersion(ctx, latestSchemaVersion) + if err != nil { + logutil.Logger(w.logCtx).Info("[ddl] update latest schema version failed", zap.Int64("ver", latestSchemaVersion), zap.Error(err)) + if terror.ErrorEqual(err, context.DeadlineExceeded) { + // If err is context.DeadlineExceeded, it means waitTime(2 * lease) is elapsed. So all the schemas are synced by ticker. + // There is no need to use etcd to sync. The function returns directly. + return + } + } + + // OwnerCheckAllVersions returns only when context is timeout(2 * lease) or all TiDB schemas are synced. + err = d.schemaSyncer.OwnerCheckAllVersions(ctx, latestSchemaVersion) + if err != nil { + logutil.Logger(w.logCtx).Info("[ddl] wait latest schema version to deadline", zap.Int64("ver", latestSchemaVersion), zap.Error(err)) + if terror.ErrorEqual(err, context.DeadlineExceeded) { + return + } + d.schemaSyncer.NotifyCleanExpiredPaths() + // Wait until timeout. + select { + case <-ctx.Done(): + return + } + } + logutil.Logger(w.logCtx).Info("[ddl] wait latest schema version changed", + zap.Int64("ver", latestSchemaVersion), + zap.Duration("take time", time.Since(timeStart)), + zap.String("job", job.String())) +} + +// waitSchemaSynced handles the following situation: +// If the job enters a new state, and the worker crashs when it's in the process of waiting for 2 * lease time, +// Then the worker restarts quickly, we may run the job immediately again, +// but in this case we don't wait enough 2 * lease time to let other servers update the schema. +// So here we get the latest schema version to make sure all servers' schema version update to the latest schema version +// in a cluster, or to wait for 2 * lease time. +func (w *worker) waitSchemaSynced(d *ddlCtx, job *model.Job, waitTime time.Duration) { + if !job.IsRunning() && !job.IsRollingback() && !job.IsDone() && !job.IsRollbackDone() { + return + } + // TODO: Make ctx exits when the d is close. + ctx, cancelFunc := context.WithTimeout(context.Background(), waitTime) + defer cancelFunc() + + latestSchemaVersion, err := d.schemaSyncer.MustGetGlobalVersion(ctx) + if err != nil { + logutil.Logger(w.logCtx).Warn("[ddl] get global version failed", zap.Error(err)) + return + } + w.waitSchemaChanged(ctx, d, waitTime, latestSchemaVersion, job) +} + +// updateSchemaVersion increments the schema version by 1 and sets SchemaDiff. +func updateSchemaVersion(t *meta.Meta, job *model.Job) (int64, error) { + schemaVersion, err := t.GenSchemaVersion() + if err != nil { + return 0, errors.Trace(err) + } + diff := &model.SchemaDiff{ + Version: schemaVersion, + Type: job.Type, + SchemaID: job.SchemaID, + TableID: job.TableID, + } + err = t.SetSchemaDiff(diff) + return schemaVersion, errors.Trace(err) +} + +func isChanClosed(quitCh chan struct{}) bool { + select { + case <-quitCh: + return true + default: + return false + } +} diff --git a/ddl/ddl_worker_test.go b/ddl/ddl_worker_test.go new file mode 100644 index 0000000..14e61d3 --- /dev/null +++ b/ddl/ddl_worker_test.go @@ -0,0 +1,723 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ddl + +import ( + "context" + "sync" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/admin" + "github.com/pingcap/tidb/util/mock" + "github.com/pingcap/tidb/util/sqlexec" +) + +var _ = Suite(&testDDLSuite{}) + +type testDDLSuite struct{} + +const testLease = 5 * time.Millisecond + +func (s *testDDLSuite) SetUpSuite(c *C) { + WaitTimeWhenErrorOccured = 1 * time.Microsecond +} + +func (s *testDDLSuite) TearDownSuite(c *C) { +} + +func (s *testDDLSuite) TestCheckOwner(c *C) { + store := testCreateStore(c, "test_owner") + defer store.Close() + + d1 := newDDL( + context.Background(), + WithStore(store), + WithLease(testLease), + ) + defer d1.Stop() + time.Sleep(testLease) + testCheckOwner(c, d1, true) + + c.Assert(d1.GetLease(), Equals, testLease) +} + +func (s *testDDLSuite) TestSchemaError(c *C) { + store := testCreateStore(c, "test_schema_error") + defer store.Close() + + d := newDDL( + context.Background(), + WithStore(store), + WithLease(testLease), + ) + defer d.Stop() + ctx := testNewContext(d) + + doDDLJobErr(c, 1, 0, model.ActionCreateSchema, []interface{}{1}, ctx, d) +} + +func (s *testDDLSuite) TestTableError(c *C) { + store := testCreateStore(c, "test_table_error") + defer store.Close() + + d := newDDL( + context.Background(), + WithStore(store), + WithLease(testLease), + ) + defer d.Stop() + ctx := testNewContext(d) + + // Schema ID is wrong, so dropping table is failed. + doDDLJobErr(c, -1, 1, model.ActionDropTable, nil, ctx, d) + // Table ID is wrong, so dropping table is failed. + dbInfo := testSchemaInfo(c, d, "test") + testCreateSchema(c, testNewContext(d), d, dbInfo) + job := doDDLJobErr(c, dbInfo.ID, -1, model.ActionDropTable, nil, ctx, d) + + // Table ID or schema ID is wrong, so getting table is failed. + tblInfo := testTableInfo(c, d, "t", 3) + testCreateTable(c, ctx, d, dbInfo, tblInfo) + err := kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { + job.SchemaID = -1 + job.TableID = -1 + t := meta.NewMeta(txn) + _, err1 := getTableInfoAndCancelFaultJob(t, job, job.SchemaID) + c.Assert(err1, NotNil) + job.SchemaID = dbInfo.ID + _, err1 = getTableInfoAndCancelFaultJob(t, job, job.SchemaID) + c.Assert(err1, NotNil) + return nil + }) + c.Assert(err, IsNil) + + // Args is wrong, so creating table is failed. + doDDLJobErr(c, 1, 1, model.ActionCreateTable, []interface{}{1}, ctx, d) + // Schema ID is wrong, so creating table is failed. + doDDLJobErr(c, -1, tblInfo.ID, model.ActionCreateTable, []interface{}{tblInfo}, ctx, d) + // Table exists, so creating table is failed. + tblInfo.ID = tblInfo.ID + 1 + doDDLJobErr(c, dbInfo.ID, tblInfo.ID, model.ActionCreateTable, []interface{}{tblInfo}, ctx, d) + +} + +func (s *testDDLSuite) TestInvalidDDLJob(c *C) { + store := testCreateStore(c, "test_invalid_ddl_job_type_error") + defer store.Close() + d := newDDL( + context.Background(), + WithStore(store), + WithLease(testLease), + ) + defer d.Stop() + ctx := testNewContext(d) + + job := &model.Job{ + SchemaID: 0, + TableID: 0, + Type: model.ActionNone, + BinlogInfo: &model.HistoryInfo{}, + Args: []interface{}{}, + } + err := d.doDDLJob(ctx, job) + c.Assert(err.Error(), Equals, "[ddl:8204]invalid ddl job type: none") +} + +func (s *testDDLSuite) TestIndexError(c *C) { + store := testCreateStore(c, "test_index_error") + defer store.Close() + + d := newDDL( + context.Background(), + WithStore(store), + WithLease(testLease), + ) + defer d.Stop() + ctx := testNewContext(d) + + // Schema ID is wrong. + doDDLJobErr(c, -1, 1, model.ActionAddIndex, nil, ctx, d) + doDDLJobErr(c, -1, 1, model.ActionDropIndex, nil, ctx, d) + + dbInfo := testSchemaInfo(c, d, "test") + tblInfo := testTableInfo(c, d, "t", 3) + testCreateSchema(c, ctx, d, dbInfo) + testCreateTable(c, ctx, d, dbInfo, tblInfo) + + // for adding index + doDDLJobErr(c, dbInfo.ID, tblInfo.ID, model.ActionAddIndex, []interface{}{1}, ctx, d) + doDDLJobErr(c, dbInfo.ID, tblInfo.ID, model.ActionAddIndex, + []interface{}{false, model.NewCIStr("t"), 1, + []*ast.IndexPartSpecification{{Column: &ast.ColumnName{Name: model.NewCIStr("c")}, Length: 256}}}, ctx, d) + doDDLJobErr(c, dbInfo.ID, tblInfo.ID, model.ActionAddIndex, + []interface{}{false, model.NewCIStr("c1_index"), 1, + []*ast.IndexPartSpecification{{Column: &ast.ColumnName{Name: model.NewCIStr("c")}, Length: 256}}}, ctx, d) + testCreateIndex(c, ctx, d, dbInfo, tblInfo, false, "c1_index", "c1") + doDDLJobErr(c, dbInfo.ID, tblInfo.ID, model.ActionAddIndex, + []interface{}{false, model.NewCIStr("c1_index"), 1, + []*ast.IndexPartSpecification{{Column: &ast.ColumnName{Name: model.NewCIStr("c1")}, Length: 256}}}, ctx, d) + + // for dropping index + doDDLJobErr(c, dbInfo.ID, tblInfo.ID, model.ActionDropIndex, []interface{}{1}, ctx, d) + testDropIndex(c, ctx, d, dbInfo, tblInfo, "c1_index") + doDDLJobErr(c, dbInfo.ID, tblInfo.ID, model.ActionDropIndex, []interface{}{model.NewCIStr("c1_index")}, ctx, d) +} + +func testCheckOwner(c *C, d *ddl, expectedVal bool) { + c.Assert(d.isOwner(), Equals, expectedVal) +} + +func testCheckJobDone(c *C, d *ddl, job *model.Job, isAdd bool) { + kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { + t := meta.NewMeta(txn) + historyJob, err := t.GetHistoryDDLJob(job.ID) + c.Assert(err, IsNil) + checkHistoryJob(c, historyJob) + if isAdd { + c.Assert(historyJob.SchemaState, Equals, model.StatePublic) + } else { + c.Assert(historyJob.SchemaState, Equals, model.StateNone) + } + + return nil + }) +} + +func testCheckJobCancelled(c *C, d *ddl, job *model.Job, state *model.SchemaState) { + kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { + t := meta.NewMeta(txn) + historyJob, err := t.GetHistoryDDLJob(job.ID) + c.Assert(err, IsNil) + c.Assert(historyJob.IsCancelled() || historyJob.IsRollbackDone(), IsTrue, Commentf("history job %s", historyJob)) + if state != nil { + c.Assert(historyJob.SchemaState, Equals, *state) + } + return nil + }) +} + +func doDDLJobErrWithSchemaState(ctx sessionctx.Context, d *ddl, c *C, schemaID, tableID int64, tp model.ActionType, + args []interface{}, state *model.SchemaState) *model.Job { + job := &model.Job{ + SchemaID: schemaID, + TableID: tableID, + Type: tp, + Args: args, + BinlogInfo: &model.HistoryInfo{}, + } + err := d.doDDLJob(ctx, job) + // TODO: Add the detail error check. + c.Assert(err, NotNil, Commentf("err:%v", err)) + testCheckJobCancelled(c, d, job, state) + + return job +} + +func doDDLJobSuccess(ctx sessionctx.Context, d *ddl, c *C, schemaID, tableID int64, tp model.ActionType, + args []interface{}) { + job := &model.Job{ + SchemaID: schemaID, + TableID: tableID, + Type: tp, + Args: args, + BinlogInfo: &model.HistoryInfo{}, + } + err := d.doDDLJob(ctx, job) + c.Assert(err, IsNil) +} + +func doDDLJobErr(c *C, schemaID, tableID int64, tp model.ActionType, args []interface{}, + ctx sessionctx.Context, d *ddl) *model.Job { + return doDDLJobErrWithSchemaState(ctx, d, c, schemaID, tableID, tp, args, nil) +} + +func checkCancelState(txn kv.Transaction, job *model.Job, test *testCancelJob) error { + var checkErr error + addIndexFirstReorg := (test.act == model.ActionAddIndex || test.act == model.ActionAddPrimaryKey) && + job.SchemaState == model.StateWriteReorganization && job.SnapshotVer == 0 + // If the action is adding index and the state is writing reorganization, it wants to test the case of cancelling the job when backfilling indexes. + // When the job satisfies this case of addIndexFirstReorg, the worker hasn't started to backfill indexes. + if test.cancelState == job.SchemaState && !addIndexFirstReorg && !job.IsRollingback() { + errs, err := admin.CancelJobs(txn, test.jobIDs) + if err != nil { + checkErr = errors.Trace(err) + return checkErr + } + // It only tests cancel one DDL job. + if !terror.ErrorEqual(errs[0], test.cancelRetErrs[0]) { + checkErr = errors.Trace(errs[0]) + return checkErr + } + } + return checkErr +} + +type testCancelJob struct { + jobIDs []int64 + cancelRetErrs []error // cancelRetErrs is the first return value of CancelJobs. + act model.ActionType // act is the job action. + cancelState model.SchemaState +} + +func buildCancelJobTests(firstID int64) []testCancelJob { + noErrs := []error{nil} + tests := []testCancelJob{ + {act: model.ActionAddIndex, jobIDs: []int64{firstID + 1}, cancelRetErrs: noErrs, cancelState: model.StateDeleteOnly}, + {act: model.ActionAddIndex, jobIDs: []int64{firstID + 2}, cancelRetErrs: noErrs, cancelState: model.StateWriteOnly}, + {act: model.ActionAddIndex, jobIDs: []int64{firstID + 3}, cancelRetErrs: noErrs, cancelState: model.StateWriteReorganization}, + {act: model.ActionAddIndex, jobIDs: []int64{firstID + 4}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 4)}, cancelState: model.StatePublic}, + + // Test cancel drop index job , see TestCancelDropIndex. + {act: model.ActionAddColumn, jobIDs: []int64{firstID + 5}, cancelRetErrs: noErrs, cancelState: model.StateDeleteOnly}, + {act: model.ActionAddColumn, jobIDs: []int64{firstID + 6}, cancelRetErrs: noErrs, cancelState: model.StateWriteOnly}, + {act: model.ActionAddColumn, jobIDs: []int64{firstID + 7}, cancelRetErrs: noErrs, cancelState: model.StateWriteReorganization}, + {act: model.ActionAddColumn, jobIDs: []int64{firstID + 8}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 8)}, cancelState: model.StatePublic}, + + // Test create table, watch out, table id will alloc a globalID. + {act: model.ActionCreateTable, jobIDs: []int64{firstID + 10}, cancelRetErrs: noErrs, cancelState: model.StateNone}, + // Test create database, watch out, database id will alloc a globalID. + {act: model.ActionCreateSchema, jobIDs: []int64{firstID + 12}, cancelRetErrs: noErrs, cancelState: model.StateNone}, + + {act: model.ActionDropColumn, jobIDs: []int64{firstID + 13}, cancelRetErrs: []error{admin.ErrCannotCancelDDLJob.GenWithStackByArgs(firstID + 13)}, cancelState: model.StateDeleteOnly}, + {act: model.ActionDropColumn, jobIDs: []int64{firstID + 14}, cancelRetErrs: []error{admin.ErrCannotCancelDDLJob.GenWithStackByArgs(firstID + 14)}, cancelState: model.StateWriteOnly}, + {act: model.ActionDropColumn, jobIDs: []int64{firstID + 15}, cancelRetErrs: []error{admin.ErrCannotCancelDDLJob.GenWithStackByArgs(firstID + 15)}, cancelState: model.StateWriteReorganization}, + {act: model.ActionRebaseAutoID, jobIDs: []int64{firstID + 16}, cancelRetErrs: noErrs, cancelState: model.StateNone}, + {act: model.ActionShardRowID, jobIDs: []int64{firstID + 17}, cancelRetErrs: noErrs, cancelState: model.StateNone}, + + {act: model.ActionModifyColumn, jobIDs: []int64{firstID + 18}, cancelRetErrs: noErrs, cancelState: model.StateNone}, + {act: model.ActionAddForeignKey, jobIDs: []int64{firstID + 19}, cancelRetErrs: noErrs, cancelState: model.StateNone}, + {act: model.ActionAddForeignKey, jobIDs: []int64{firstID + 20}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 20)}, cancelState: model.StatePublic}, + {act: model.ActionDropForeignKey, jobIDs: []int64{firstID + 21}, cancelRetErrs: noErrs, cancelState: model.StateNone}, + {act: model.ActionDropForeignKey, jobIDs: []int64{firstID + 22}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 22)}, cancelState: model.StatePublic}, + + {act: model.ActionRenameTable, jobIDs: []int64{firstID + 23}, cancelRetErrs: noErrs, cancelState: model.StateNone}, + {act: model.ActionRenameTable, jobIDs: []int64{firstID + 24}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 24)}, cancelState: model.StatePublic}, + + {act: model.ActionModifyTableCharsetAndCollate, jobIDs: []int64{firstID + 19}, cancelRetErrs: noErrs, cancelState: model.StateNone}, + {act: model.ActionModifyTableCharsetAndCollate, jobIDs: []int64{firstID + 20}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 26)}, cancelState: model.StatePublic}, + {act: model.ActionTruncateTablePartition, jobIDs: []int64{firstID + 27}, cancelRetErrs: noErrs, cancelState: model.StateNone}, + {act: model.ActionTruncateTablePartition, jobIDs: []int64{firstID + 28}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 28)}, cancelState: model.StatePublic}, + {act: model.ActionModifySchemaCharsetAndCollate, jobIDs: []int64{firstID + 30}, cancelRetErrs: noErrs, cancelState: model.StateNone}, + {act: model.ActionModifySchemaCharsetAndCollate, jobIDs: []int64{firstID + 31}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 31)}, cancelState: model.StatePublic}, + + {act: model.ActionAddPrimaryKey, jobIDs: []int64{firstID + 32}, cancelRetErrs: noErrs, cancelState: model.StateDeleteOnly}, + {act: model.ActionAddPrimaryKey, jobIDs: []int64{firstID + 33}, cancelRetErrs: noErrs, cancelState: model.StateWriteOnly}, + {act: model.ActionAddPrimaryKey, jobIDs: []int64{firstID + 34}, cancelRetErrs: noErrs, cancelState: model.StateWriteReorganization}, + {act: model.ActionAddPrimaryKey, jobIDs: []int64{firstID + 35}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 35)}, cancelState: model.StatePublic}, + {act: model.ActionDropPrimaryKey, jobIDs: []int64{firstID + 36}, cancelRetErrs: noErrs, cancelState: model.StateWriteOnly}, + {act: model.ActionDropPrimaryKey, jobIDs: []int64{firstID + 37}, cancelRetErrs: []error{admin.ErrCannotCancelDDLJob.GenWithStackByArgs(firstID + 37)}, cancelState: model.StateDeleteOnly}, + } + + return tests +} + +func (s *testDDLSuite) checkAddIdx(c *C, d *ddl, schemaID int64, tableID int64, idxName string, success bool) { + checkIdxExist(c, d, schemaID, tableID, idxName, success) +} + +func checkIdxExist(c *C, d *ddl, schemaID int64, tableID int64, idxName string, expectedExist bool) { + changedTable := testGetTable(c, d, schemaID, tableID) + var found bool + for _, idxInfo := range changedTable.Meta().Indices { + if idxInfo.Name.O == idxName { + found = true + break + } + } + c.Assert(found, Equals, expectedExist) +} + +func (s *testDDLSuite) checkAddColumn(c *C, d *ddl, schemaID int64, tableID int64, colName string, success bool) { + changedTable := testGetTable(c, d, schemaID, tableID) + var found bool + for _, colInfo := range changedTable.Meta().Columns { + if colInfo.Name.O == colName { + found = true + break + } + } + c.Assert(found, Equals, success) +} + +func (s *testDDLSuite) checkCancelDropColumn(c *C, d *ddl, schemaID int64, tableID int64, colName string, success bool) { + changedTable := testGetTable(c, d, schemaID, tableID) + notFound := true + for _, colInfo := range changedTable.Meta().Columns { + if colInfo.Name.O == colName { + notFound = false + break + } + } + c.Assert(notFound, Equals, success) +} + +func (s *testDDLSuite) TestCancelJob(c *C) { + store := testCreateStore(c, "test_cancel_job") + defer store.Close() + d := newDDL( + context.Background(), + WithStore(store), + WithLease(testLease), + ) + defer d.Stop() + dbInfo := testSchemaInfo(c, d, "test_cancel_job") + testCreateSchema(c, testNewContext(d), d, dbInfo) + // create a partition table. + partitionTblInfo := testTableInfo(c, d, "t_partition", 5) + // Skip using sessPool. Make sure adding primary key can be successful. + partitionTblInfo.Columns[0].Flag |= mysql.NotNullFlag + // create table t (c1 int, c2 int, c3 int, c4 int, c5 int); + tblInfo := testTableInfo(c, d, "t", 5) + ctx := testNewContext(d) + err := ctx.NewTxn(context.Background()) + c.Assert(err, IsNil) + testCreateTable(c, ctx, d, dbInfo, partitionTblInfo) + tableAutoID := int64(100) + shardRowIDBits := uint64(5) + tblInfo.AutoIncID = tableAutoID + tblInfo.ShardRowIDBits = shardRowIDBits + job := testCreateTable(c, ctx, d, dbInfo, tblInfo) + // insert t values (1, 2, 3, 4, 5); + originTable := testGetTable(c, d, dbInfo.ID, tblInfo.ID) + row := types.MakeDatums(1, 2, 3, 4, 5) + _, err = originTable.AddRecord(ctx, row) + c.Assert(err, IsNil) + txn, err := ctx.Txn(true) + c.Assert(err, IsNil) + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + + tc := &TestDDLCallback{} + // set up hook + firstJobID := job.ID + tests := buildCancelJobTests(firstJobID) + var checkErr error + var mu sync.Mutex + var test *testCancelJob + updateTest := func(t *testCancelJob) { + mu.Lock() + test = t + mu.Unlock() + } + hookCancelFunc := func(job *model.Job) { + if job.State == model.JobStateSynced || job.State == model.JobStateCancelled || job.State == model.JobStateCancelling { + return + } + // This hook only valid for the related test job. + // This is use to avoid parallel test fail. + mu.Lock() + if len(test.jobIDs) > 0 && test.jobIDs[0] != job.ID { + mu.Unlock() + return + } + mu.Unlock() + if checkErr != nil { + return + } + + hookCtx := mock.NewContext() + hookCtx.Store = store + err1 := hookCtx.NewTxn(context.Background()) + if err1 != nil { + checkErr = errors.Trace(err1) + return + } + txn, err1 = hookCtx.Txn(true) + if err1 != nil { + checkErr = errors.Trace(err1) + return + } + mu.Lock() + checkErr = checkCancelState(txn, job, test) + mu.Unlock() + if checkErr != nil { + return + } + err1 = txn.Commit(context.Background()) + if err1 != nil { + checkErr = errors.Trace(err1) + return + } + } + tc.onJobUpdated = hookCancelFunc + tc.onJobRunBefore = hookCancelFunc + d.SetHook(tc) + + // for adding index + updateTest(&tests[0]) + idxOrigName := "idx" + validArgs := []interface{}{false, model.NewCIStr(idxOrigName), + []*ast.IndexPartSpecification{{ + Column: &ast.ColumnName{Name: model.NewCIStr("c1")}, + Length: -1, + }}, nil} + + // When the job satisfies this test case, the option will be rollback, so the job's schema state is none. + cancelState := model.StateNone + doDDLJobErrWithSchemaState(ctx, d, c, dbInfo.ID, tblInfo.ID, model.ActionAddIndex, validArgs, &cancelState) + c.Check(errors.ErrorStack(checkErr), Equals, "") + s.checkAddIdx(c, d, dbInfo.ID, tblInfo.ID, idxOrigName, false) + updateTest(&tests[1]) + doDDLJobErrWithSchemaState(ctx, d, c, dbInfo.ID, tblInfo.ID, model.ActionAddIndex, validArgs, &cancelState) + c.Check(errors.ErrorStack(checkErr), Equals, "") + s.checkAddIdx(c, d, dbInfo.ID, tblInfo.ID, idxOrigName, false) + updateTest(&tests[2]) + doDDLJobErrWithSchemaState(ctx, d, c, dbInfo.ID, tblInfo.ID, model.ActionAddIndex, validArgs, &cancelState) + c.Check(errors.ErrorStack(checkErr), Equals, "") + s.checkAddIdx(c, d, dbInfo.ID, tblInfo.ID, idxOrigName, false) + updateTest(&tests[3]) + testCreateIndex(c, ctx, d, dbInfo, tblInfo, false, "idx", "c2") + c.Check(errors.ErrorStack(checkErr), Equals, "") + txn, err = ctx.Txn(true) + c.Assert(err, IsNil) + c.Assert(txn.Commit(context.Background()), IsNil) + s.checkAddIdx(c, d, dbInfo.ID, tblInfo.ID, idxOrigName, true) + + // for add column + updateTest(&tests[4]) + addingColName := "colA" + newColumnDef := &ast.ColumnDef{ + Name: &ast.ColumnName{Name: model.NewCIStr(addingColName)}, + Tp: &types.FieldType{Tp: mysql.TypeLonglong}, + Options: []*ast.ColumnOption{}, + } + col, _, err := buildColumnAndConstraint(ctx, 2, newColumnDef, nil) + c.Assert(err, IsNil) + + addColumnArgs := []interface{}{col, 0} + doDDLJobErrWithSchemaState(ctx, d, c, dbInfo.ID, tblInfo.ID, model.ActionAddColumn, addColumnArgs, &cancelState) + c.Check(errors.ErrorStack(checkErr), Equals, "") + s.checkAddColumn(c, d, dbInfo.ID, tblInfo.ID, addingColName, false) + + updateTest(&tests[5]) + doDDLJobErrWithSchemaState(ctx, d, c, dbInfo.ID, tblInfo.ID, model.ActionAddColumn, addColumnArgs, &cancelState) + c.Check(errors.ErrorStack(checkErr), Equals, "") + s.checkAddColumn(c, d, dbInfo.ID, tblInfo.ID, addingColName, false) + + updateTest(&tests[6]) + doDDLJobErrWithSchemaState(ctx, d, c, dbInfo.ID, tblInfo.ID, model.ActionAddColumn, addColumnArgs, &cancelState) + c.Check(errors.ErrorStack(checkErr), Equals, "") + s.checkAddColumn(c, d, dbInfo.ID, tblInfo.ID, addingColName, false) + + updateTest(&tests[7]) + testAddColumn(c, ctx, d, dbInfo, tblInfo, addColumnArgs) + c.Check(errors.ErrorStack(checkErr), Equals, "") + s.checkAddColumn(c, d, dbInfo.ID, tblInfo.ID, addingColName, true) + + // for create table + tblInfo1 := testTableInfo(c, d, "t1", 2) + updateTest(&tests[8]) + doDDLJobErrWithSchemaState(ctx, d, c, dbInfo.ID, tblInfo1.ID, model.ActionCreateTable, []interface{}{tblInfo1}, &cancelState) + c.Check(checkErr, IsNil) + testCheckTableState(c, d, dbInfo, tblInfo1, model.StateNone) + + // for create database + dbInfo1 := testSchemaInfo(c, d, "test_cancel_job1") + updateTest(&tests[9]) + doDDLJobErrWithSchemaState(ctx, d, c, dbInfo1.ID, 0, model.ActionCreateSchema, []interface{}{dbInfo1}, &cancelState) + c.Check(checkErr, IsNil) + testCheckSchemaState(c, d, dbInfo1, model.StateNone) + + // for drop column. + updateTest(&tests[10]) + dropColName := "c3" + s.checkCancelDropColumn(c, d, dbInfo.ID, tblInfo.ID, dropColName, false) + testDropColumn(c, ctx, d, dbInfo, tblInfo, dropColName, false) + c.Check(errors.ErrorStack(checkErr), Equals, "") + s.checkCancelDropColumn(c, d, dbInfo.ID, tblInfo.ID, dropColName, true) + + updateTest(&tests[11]) + dropColName = "c4" + s.checkCancelDropColumn(c, d, dbInfo.ID, tblInfo.ID, dropColName, false) + testDropColumn(c, ctx, d, dbInfo, tblInfo, dropColName, false) + c.Check(errors.ErrorStack(checkErr), Equals, "") + s.checkCancelDropColumn(c, d, dbInfo.ID, tblInfo.ID, dropColName, true) + + updateTest(&tests[12]) + dropColName = "c5" + s.checkCancelDropColumn(c, d, dbInfo.ID, tblInfo.ID, dropColName, false) + testDropColumn(c, ctx, d, dbInfo, tblInfo, dropColName, false) + c.Check(errors.ErrorStack(checkErr), Equals, "") + s.checkCancelDropColumn(c, d, dbInfo.ID, tblInfo.ID, dropColName, true) + + // cancel rebase auto id + updateTest(&tests[13]) + rebaseIDArgs := []interface{}{int64(200)} + doDDLJobErrWithSchemaState(ctx, d, c, dbInfo.ID, tblInfo.ID, model.ActionRebaseAutoID, rebaseIDArgs, &cancelState) + c.Check(errors.ErrorStack(checkErr), Equals, "") + changedTable := testGetTable(c, d, dbInfo.ID, tblInfo.ID) + c.Assert(changedTable.Meta().AutoIncID, Equals, tableAutoID) + + // cancel shard bits + updateTest(&tests[14]) + shardRowIDArgs := []interface{}{uint64(7)} + doDDLJobErrWithSchemaState(ctx, d, c, dbInfo.ID, tblInfo.ID, model.ActionShardRowID, shardRowIDArgs, &cancelState) + c.Check(checkErr, IsNil) + changedTable = testGetTable(c, d, dbInfo.ID, tblInfo.ID) + c.Assert(changedTable.Meta().ShardRowIDBits, Equals, shardRowIDBits) + + // modify column + col.DefaultValue = "1" + updateTest(&tests[15]) + modifyColumnArgs := []interface{}{col, col.Name, byte(0)} + doDDLJobErrWithSchemaState(ctx, d, c, dbInfo.ID, tblInfo.ID, test.act, modifyColumnArgs, &test.cancelState) + c.Check(checkErr, IsNil) + changedTable = testGetTable(c, d, dbInfo.ID, tblInfo.ID) + changedCol := model.FindColumnInfo(changedTable.Meta().Columns, col.Name.L) + c.Assert(changedCol.DefaultValue, IsNil) + + // test modify table charset failed caused by canceled. + test = &tests[22] + modifyTableCharsetArgs := []interface{}{"utf8mb4", "utf8mb4_bin"} + doDDLJobErrWithSchemaState(ctx, d, c, dbInfo.ID, tblInfo.ID, test.act, modifyTableCharsetArgs, &test.cancelState) + c.Check(checkErr, IsNil) + changedTable = testGetTable(c, d, dbInfo.ID, tblInfo.ID) + c.Assert(changedTable.Meta().Charset, Equals, "utf8") + c.Assert(changedTable.Meta().Collate, Equals, "utf8_bin") + + // test modify table charset successfully. + test = &tests[23] + doDDLJobSuccess(ctx, d, c, dbInfo.ID, tblInfo.ID, test.act, modifyTableCharsetArgs) + c.Check(checkErr, IsNil) + changedTable = testGetTable(c, d, dbInfo.ID, tblInfo.ID) + c.Assert(changedTable.Meta().Charset, Equals, "utf8mb4") + c.Assert(changedTable.Meta().Collate, Equals, "utf8mb4_bin") +} + +func (s *testDDLSuite) TestIgnorableSpec(c *C) { + specs := []ast.AlterTableType{ + ast.AlterTableOption, + ast.AlterTableAddColumns, + ast.AlterTableAddConstraint, + ast.AlterTableDropColumn, + ast.AlterTableDropPrimaryKey, + ast.AlterTableDropIndex, + ast.AlterTableModifyColumn, + ast.AlterTableChangeColumn, + ast.AlterTableAlterColumn, + } + for _, spec := range specs { + c.Assert(isIgnorableSpec(spec), IsFalse) + } + + ignorableSpecs := []ast.AlterTableType{ + ast.AlterTableLock, + ast.AlterTableAlgorithm, + } + for _, spec := range ignorableSpecs { + c.Assert(isIgnorableSpec(spec), IsTrue) + } +} + +func (s *testDDLSuite) TestBuildJobDependence(c *C) { + store := testCreateStore(c, "test_set_job_relation") + defer store.Close() + + // Add some non-add-index jobs. + job1 := &model.Job{ID: 1, TableID: 1, Type: model.ActionAddColumn} + job2 := &model.Job{ID: 2, TableID: 1, Type: model.ActionCreateTable} + job3 := &model.Job{ID: 3, TableID: 2, Type: model.ActionDropColumn} + job6 := &model.Job{ID: 6, TableID: 1, Type: model.ActionDropTable} + job7 := &model.Job{ID: 7, TableID: 2, Type: model.ActionModifyColumn} + job9 := &model.Job{ID: 9, SchemaID: 111, Type: model.ActionDropSchema} + kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { + t := meta.NewMeta(txn) + err := t.EnQueueDDLJob(job1) + c.Assert(err, IsNil) + err = t.EnQueueDDLJob(job2) + c.Assert(err, IsNil) + err = t.EnQueueDDLJob(job3) + c.Assert(err, IsNil) + err = t.EnQueueDDLJob(job6) + c.Assert(err, IsNil) + err = t.EnQueueDDLJob(job7) + c.Assert(err, IsNil) + err = t.EnQueueDDLJob(job9) + c.Assert(err, IsNil) + return nil + }) + job4 := &model.Job{ID: 4, TableID: 1, Type: model.ActionAddIndex} + kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { + t := meta.NewMeta(txn) + err := buildJobDependence(t, job4) + c.Assert(err, IsNil) + c.Assert(job4.DependencyID, Equals, int64(2)) + return nil + }) + job5 := &model.Job{ID: 5, TableID: 2, Type: model.ActionAddIndex} + kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { + t := meta.NewMeta(txn) + err := buildJobDependence(t, job5) + c.Assert(err, IsNil) + c.Assert(job5.DependencyID, Equals, int64(3)) + return nil + }) + job8 := &model.Job{ID: 8, TableID: 3, Type: model.ActionAddIndex} + kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { + t := meta.NewMeta(txn) + err := buildJobDependence(t, job8) + c.Assert(err, IsNil) + c.Assert(job8.DependencyID, Equals, int64(0)) + return nil + }) + job10 := &model.Job{ID: 10, SchemaID: 111, TableID: 3, Type: model.ActionAddIndex} + kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { + t := meta.NewMeta(txn) + err := buildJobDependence(t, job10) + c.Assert(err, IsNil) + c.Assert(job10.DependencyID, Equals, int64(9)) + return nil + }) + job12 := &model.Job{ID: 12, SchemaID: 112, TableID: 2, Type: model.ActionAddIndex} + kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { + t := meta.NewMeta(txn) + err := buildJobDependence(t, job12) + c.Assert(err, IsNil) + c.Assert(job12.DependencyID, Equals, int64(7)) + return nil + }) +} + +func (s *testDDLSuite) TestDDLPackageExecuteSQL(c *C) { + store := testCreateStore(c, "test_run_sql") + defer store.Close() + + d := newDDL( + context.Background(), + WithStore(store), + WithLease(testLease), + ) + testCheckOwner(c, d, true) + defer d.Stop() + worker := d.generalWorker() + c.Assert(worker, NotNil) + + // In test environment, worker.ctxPool will be nil, and get will return mock.Context. + // We just test that can use it to call sqlexec.SQLExecutor.Execute. + sess, err := worker.sessPool.get() + c.Assert(err, IsNil) + defer worker.sessPool.put(sess) + se := sess.(sqlexec.SQLExecutor) + _, _ = se.Execute(context.Background(), "create table t(a int);") +} diff --git a/ddl/fail_test.go b/ddl/fail_test.go new file mode 100644 index 0000000..06e6269 --- /dev/null +++ b/ddl/fail_test.go @@ -0,0 +1,71 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ddl + +import ( + "context" + + . "github.com/pingcap/check" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/types" +) + +func (s *testColumnChangeSuite) TestFailBeforeDecodeArgs(c *C) { + d := newDDL( + context.Background(), + WithStore(s.store), + WithLease(testLease), + ) + defer d.Stop() + // create table t_fail (c1 int, c2 int); + tblInfo := testTableInfo(c, d, "t_fail", 2) + ctx := testNewContext(d) + err := ctx.NewTxn(context.Background()) + c.Assert(err, IsNil) + testCreateTable(c, ctx, d, s.dbInfo, tblInfo) + // insert t_fail values (1, 2); + originTable := testGetTable(c, d, s.dbInfo.ID, tblInfo.ID) + row := types.MakeDatums(1, 2) + _, err = originTable.AddRecord(ctx, row) + c.Assert(err, IsNil) + txn, err := ctx.Txn(true) + c.Assert(err, IsNil) + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + + tc := &TestDDLCallback{} + first := true + stateCnt := 0 + tc.onJobRunBefore = func(job *model.Job) { + // It can be other schema states except failed schema state. + // This schema state can only appear once. + if job.SchemaState == model.StateWriteOnly { + stateCnt++ + } else if job.SchemaState == model.StateWriteReorganization { + if first { + c.Assert(failpoint.Enable("github.com/pingcap/tidb/ddl/errorBeforeDecodeArgs", `return(true)`), IsNil) + first = false + } else { + c.Assert(failpoint.Disable("github.com/pingcap/tidb/ddl/errorBeforeDecodeArgs"), IsNil) + } + } + } + d.SetHook(tc) + defaultValue := int64(3) + job := testCreateColumn(c, ctx, d, s.dbInfo, tblInfo, "c3", defaultValue) + // Make sure the schema state only appears once. + c.Assert(stateCnt, Equals, 1) + testCheckJobDone(c, d, job, true) +} diff --git a/ddl/failtest/fail_db_test.go b/ddl/failtest/fail_db_test.go new file mode 100644 index 0000000..88e3073 --- /dev/null +++ b/ddl/failtest/fail_db_test.go @@ -0,0 +1,301 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ddl_test + +import ( + "context" + "fmt" + "math/rand" + "os" + "sync/atomic" + "testing" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/testutil" + ddlutil "github.com/pingcap/tidb/ddl/util" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/store/mockstore/mocktikv" + "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/testkit" + "github.com/pingcap/tidb/util/testleak" +) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + logLevel := os.Getenv("log_level") + logutil.InitLogger(logutil.NewLogConfig(logLevel, "", logutil.EmptyFileLogConfig, false)) + testleak.BeforeTest() + TestingT(t) + testleak.AfterTestT(t)() +} + +var _ = Suite(&testFailDBSuite{}) + +type testFailDBSuite struct { + cluster *mocktikv.Cluster + mvccStore mocktikv.MVCCStore + lease time.Duration + store kv.Storage + dom *domain.Domain + se session.Session + p *parser.Parser +} + +func (s *testFailDBSuite) SetUpSuite(c *C) { + s.lease = 200 * time.Millisecond + ddl.WaitTimeWhenErrorOccured = 1 * time.Microsecond + var err error + s.cluster = mocktikv.NewCluster() + mocktikv.BootstrapWithSingleStore(s.cluster) + s.mvccStore = mocktikv.MustNewMVCCStore() + s.store, err = mockstore.NewMockTikvStore( + mockstore.WithCluster(s.cluster), + mockstore.WithMVCCStore(s.mvccStore), + ) + c.Assert(err, IsNil) + session.SetSchemaLease(s.lease) + s.dom, err = session.BootstrapSession(s.store) + c.Assert(err, IsNil) + s.se, err = session.CreateSession4Test(s.store) + c.Assert(err, IsNil) + s.p = parser.New() +} + +func (s *testFailDBSuite) TearDownSuite(c *C) { + _, err := s.se.Execute(context.Background(), "drop database if exists test_db_state") + c.Assert(err, IsNil) + s.se.Close() + s.dom.Close() + s.store.Close() +} + +func (s *testFailDBSuite) TestUpdateHandleFailed(c *C) { + c.Assert(failpoint.Enable("github.com/pingcap/tidb/ddl/errorUpdateReorgHandle", `1*return`), IsNil) + defer func() { + c.Assert(failpoint.Disable("github.com/pingcap/tidb/ddl/errorUpdateReorgHandle"), IsNil) + }() + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("create database if not exists test_handle_failed") + defer tk.MustExec("drop database test_handle_failed") + tk.MustExec("use test_handle_failed") + tk.MustExec("create table t(a int primary key, b int)") + tk.MustExec("insert into t values(-1, 1)") + tk.MustExec("alter table t add index idx_b(b)") + result := tk.MustQuery("select count(*) from t use index(idx_b)") + result.Check(testkit.Rows("1")) + +} + +func (s *testFailDBSuite) TestAddIndexFailed(c *C) { + c.Assert(failpoint.Enable("github.com/pingcap/tidb/ddl/mockAddIndexErr", `1*return`), IsNil) + defer func() { + c.Assert(failpoint.Disable("github.com/pingcap/tidb/ddl/mockAddIndexErr"), IsNil) + }() + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("create database if not exists test_add_index_failed") + defer tk.MustExec("drop database test_add_index_failed") + tk.MustExec("use test_add_index_failed") + + tk.MustExec("create table t(a bigint PRIMARY KEY, b int)") + for i := 0; i < 1000; i++ { + tk.MustExec(fmt.Sprintf("insert into t values(%v, %v)", i, i)) + } + + // Get table ID for split. + dom := domain.GetDomain(tk.Se) + is := dom.InfoSchema() + tbl, err := is.TableByName(model.NewCIStr("test_add_index_failed"), model.NewCIStr("t")) + c.Assert(err, IsNil) + tblID := tbl.Meta().ID + + // Split the table. + s.cluster.SplitTable(s.mvccStore, tblID, 100) + + tk.MustExec("alter table t add index idx_b(b)") + +} + +// TestFailSchemaSyncer test when the schema syncer is done, +// should prohibit DML executing until the syncer is restartd by loadSchemaInLoop. +func (s *testFailDBSuite) TestFailSchemaSyncer(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int)") + defer tk.MustExec("drop table if exists t") + originalRetryTimes := domain.SchemaOutOfDateRetryTimes + domain.SchemaOutOfDateRetryTimes = 1 + defer func() { + domain.SchemaOutOfDateRetryTimes = originalRetryTimes + }() + c.Assert(s.dom.SchemaValidator.IsStarted(), IsTrue) + mockSyncer, ok := s.dom.DDL().SchemaSyncer().(*ddl.MockSchemaSyncer) + c.Assert(ok, IsTrue) + + // make reload failed. + c.Assert(failpoint.Enable("github.com/pingcap/tidb/domain/ErrorMockReloadFailed", `return(true)`), IsNil) + mockSyncer.CloseSession() + // wait the schemaValidator is stopped. + for i := 0; i < 50; i++ { + if !s.dom.SchemaValidator.IsStarted() { + break + } + time.Sleep(20 * time.Millisecond) + } + + c.Assert(s.dom.SchemaValidator.IsStarted(), IsFalse) + _, err := tk.Exec("insert into t values(1)") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[domain:8027]Information schema is out of date: schema failed to update in 1 lease, please make sure TiDB can connect to TiKV") + c.Assert(failpoint.Disable("github.com/pingcap/tidb/domain/ErrorMockReloadFailed"), IsNil) + // wait the schemaValidator is started. + for i := 0; i < 50; i++ { + if s.dom.SchemaValidator.IsStarted() { + break + } + time.Sleep(100 * time.Millisecond) + } + c.Assert(s.dom.SchemaValidator.IsStarted(), IsTrue) + _, err = tk.Exec("insert into t values(1)") + c.Assert(err, IsNil) +} + +func (s *testFailDBSuite) TestGenGlobalIDFail(c *C) { + defer func() { + c.Assert(failpoint.Disable("github.com/pingcap/tidb/ddl/mockGenGlobalIDFail"), IsNil) + }() + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("create database if not exists gen_global_id_fail") + tk.MustExec("use gen_global_id_fail") + + sql1 := "create table t1(a bigint PRIMARY KEY, b int)" + + testcases := []struct { + sql string + table string + mockErr bool + }{ + {sql1, "t1", true}, + {sql1, "t1", false}, + } + + for idx, test := range testcases { + if test.mockErr { + c.Assert(failpoint.Enable("github.com/pingcap/tidb/ddl/mockGenGlobalIDFail", `return(true)`), IsNil) + _, err := tk.Exec(test.sql) + c.Assert(err, NotNil, Commentf("the %dth test case '%s' fail", idx, test.sql)) + } else { + c.Assert(failpoint.Enable("github.com/pingcap/tidb/ddl/mockGenGlobalIDFail", `return(false)`), IsNil) + tk.MustExec(test.sql) + tk.MustExec(fmt.Sprintf("insert into %s values (%d, 42)", test.table, rand.Intn(65536))) + + } + } + +} + +func batchInsert(tk *testkit.TestKit, tbl string, start, end int) { + dml := fmt.Sprintf("insert into %s values", tbl) + for i := start; i < end; i++ { + dml += fmt.Sprintf("(%d, %d, %d)", i, i, i) + if i != end-1 { + dml += "," + } + } + tk.MustExec(dml) +} + +func (s *testFailDBSuite) TestAddIndexWorkerNum(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("create database if not exists test_db") + tk.MustExec("use test_db") + tk.MustExec("drop table if exists test_add_index") + tk.MustExec("create table test_add_index (c1 bigint, c2 bigint, c3 bigint, primary key(c1))") + + done := make(chan error, 1) + start := -10 + // first add some rows + for i := start; i < 4090; i += 100 { + batchInsert(tk, "test_add_index", i, i+100) + } + + is := s.dom.InfoSchema() + schemaName := model.NewCIStr("test_db") + tableName := model.NewCIStr("test_add_index") + tbl, err := is.TableByName(schemaName, tableName) + c.Assert(err, IsNil) + + splitCount := 100 + // Split table to multi region. + s.cluster.SplitTable(s.mvccStore, tbl.Meta().ID, splitCount) + + err = ddlutil.LoadDDLReorgVars(tk.Se) + c.Assert(err, IsNil) + originDDLAddIndexWorkerCnt := variable.GetDDLReorgWorkerCounter() + lastSetWorkerCnt := originDDLAddIndexWorkerCnt + atomic.StoreInt32(&ddl.TestCheckWorkerNumber, lastSetWorkerCnt) + ddl.TestCheckWorkerNumber = lastSetWorkerCnt + defer tk.MustExec(fmt.Sprintf("set @@global.tidb_ddl_reorg_worker_cnt=%d", originDDLAddIndexWorkerCnt)) + + c.Assert(failpoint.Enable("github.com/pingcap/tidb/ddl/checkIndexWorkerNum", `return(true)`), IsNil) + defer func() { + c.Assert(failpoint.Disable("github.com/pingcap/tidb/ddl/checkIndexWorkerNum"), IsNil) + }() + + testutil.SessionExecInGoroutine(c, s.store, "create index c3_index on test_add_index (c3)", done) + checkNum := 0 + +LOOP: + for { + select { + case err = <-done: + if err == nil { + break LOOP + } + c.Assert(err, IsNil, Commentf("err:%v", errors.ErrorStack(err))) + case <-ddl.TestCheckWorkerNumCh: + lastSetWorkerCnt = int32(rand.Intn(8) + 8) + tk.MustExec(fmt.Sprintf("set @@global.tidb_ddl_reorg_worker_cnt=%d", lastSetWorkerCnt)) + atomic.StoreInt32(&ddl.TestCheckWorkerNumber, lastSetWorkerCnt) + checkNum++ + } + } + c.Assert(checkNum, Greater, 5) + + tk.MustExec("drop table test_add_index") +} + +// TestRunDDLJobPanic tests recover panic when run ddl job panic. +func (s *testFailDBSuite) TestRunDDLJobPanic(c *C) { + defer func() { + c.Assert(failpoint.Disable("github.com/pingcap/tidb/ddl/mockPanicInRunDDLJob"), IsNil) + }() + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + c.Assert(failpoint.Enable("github.com/pingcap/tidb/ddl/mockPanicInRunDDLJob", `1*panic("panic test")`), IsNil) + _, err := tk.Exec("create table t(c1 int, c2 int)") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[ddl:8214]Cancelled DDL job") +} diff --git a/ddl/index.go b/ddl/index.go new file mode 100644 index 0000000..09eb3c2 --- /dev/null +++ b/ddl/index.go @@ -0,0 +1,1353 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ddl + +import ( + "context" + "math" + "strconv" + "sync/atomic" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + ddlutil "github.com/pingcap/tidb/ddl/util" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta" + "github.com/pingcap/tidb/meta/autoid" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/charset" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/store/tikv" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/table/tables" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util" + "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/rowDecoder" + "go.uber.org/zap" +) + +const ( + maxPrefixLength = 3072 + // MaxCommentLength is exported for testing. + MaxCommentLength = 1024 +) + +func buildIndexColumns(columns []*model.ColumnInfo, idxColNames []*ast.IndexPartSpecification) ([]*model.IndexColumn, error) { + // Build offsets. + idxColumns := make([]*model.IndexColumn, 0, len(idxColNames)) + + // The sum of length of all index columns. + sumLength := 0 + for _, ic := range idxColNames { + col := model.FindColumnInfo(columns, ic.Column.Name.L) + if col == nil { + return nil, errKeyColumnDoesNotExits.GenWithStack("column does not exist: %s", ic.Column.Name) + } + + if err := checkIndexColumn(col, ic); err != nil { + return nil, err + } + + indexColumnLength, err := getIndexColumnLength(col, ic.Length) + if err != nil { + return nil, err + } + sumLength += indexColumnLength + + // The sum of all lengths must be shorter than the max length for prefix. + if sumLength > maxPrefixLength { + return nil, errors.Trace(errTooLongKey) + } + + idxColumns = append(idxColumns, &model.IndexColumn{ + Name: col.Name, + Offset: col.Offset, + Length: ic.Length, + }) + } + + return idxColumns, nil +} + +func checkPKOnGeneratedColumn(tblInfo *model.TableInfo, idxColNames []*ast.IndexPartSpecification) (*model.ColumnInfo, error) { + var lastCol *model.ColumnInfo + for _, colName := range idxColNames { + lastCol = getColumnInfoByName(tblInfo, colName.Column.Name.L) + if lastCol == nil { + return nil, errKeyColumnDoesNotExits.GenWithStackByArgs(colName.Column.Name) + } + } + + return lastCol, nil +} + +func checkIndexPrefixLength(columns []*model.ColumnInfo, idxColumns []*model.IndexColumn) error { + // The sum of length of all index columns. + sumLength := 0 + for _, ic := range idxColumns { + col := model.FindColumnInfo(columns, ic.Name.L) + if col == nil { + return errKeyColumnDoesNotExits.GenWithStack("column does not exist: %s", ic.Name) + } + + indexColumnLength, err := getIndexColumnLength(col, ic.Length) + if err != nil { + return err + } + sumLength += indexColumnLength + // The sum of all lengths must be shorter than the max length for prefix. + if sumLength > maxPrefixLength { + return errors.Trace(errTooLongKey) + } + } + return nil +} + +func checkIndexColumn(col *model.ColumnInfo, ic *ast.IndexPartSpecification) error { + if col.Flen == 0 && (types.IsTypeChar(col.FieldType.Tp) || types.IsTypeVarchar(col.FieldType.Tp)) { + return errors.Trace(errWrongKeyColumn.GenWithStackByArgs(ic.Column.Name)) + } + + // JSON column cannot index. + if col.FieldType.Tp == mysql.TypeJSON { + return errors.Trace(errJSONUsedAsKey.GenWithStackByArgs(col.Name.O)) + } + + // Length must be specified for BLOB and TEXT column indexes. + if types.IsTypeBlob(col.FieldType.Tp) && ic.Length == types.UnspecifiedLength { + return errors.Trace(errBlobKeyWithoutLength) + } + + // Length can only be specified for specifiable types. + if ic.Length != types.UnspecifiedLength && !types.IsTypePrefixable(col.FieldType.Tp) { + return errors.Trace(errIncorrectPrefixKey) + } + + // Key length must be shorter or equal to the column length. + if ic.Length != types.UnspecifiedLength && + types.IsTypeChar(col.FieldType.Tp) && col.Flen < ic.Length { + return errors.Trace(errIncorrectPrefixKey) + } + + // Specified length must be shorter than the max length for prefix. + if ic.Length > maxPrefixLength { + return errors.Trace(errTooLongKey) + } + return nil +} + +// getIndexColumnLength calculate the bytes number required in an index column. +func getIndexColumnLength(col *model.ColumnInfo, colLen int) (int, error) { + length := types.UnspecifiedLength + if colLen != types.UnspecifiedLength { + length = colLen + } else if col.Flen != types.UnspecifiedLength { + length = col.Flen + } + + switch col.Tp { + case mysql.TypeBit: + return (length + 7) >> 3, nil + case mysql.TypeVarchar, mysql.TypeString: + // Different charsets occupy different numbers of bytes on each character. + desc, err := charset.GetCharsetDesc(col.Charset) + if err != nil { + return 0, errUnsupportedCharset.GenWithStackByArgs(col.Charset, col.Collate) + } + return desc.Maxlen * length, nil + case mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeBlob, mysql.TypeLongBlob: + return length, nil + case mysql.TypeTiny, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong, mysql.TypeDouble, mysql.TypeShort: + return mysql.DefaultLengthOfMysqlTypes[col.Tp], nil + case mysql.TypeFloat: + if length <= mysql.MaxFloatPrecisionLength { + return mysql.DefaultLengthOfMysqlTypes[mysql.TypeFloat], nil + } + return mysql.DefaultLengthOfMysqlTypes[mysql.TypeDouble], nil + case mysql.TypeDecimal, mysql.TypeNewDecimal: + return calcBytesLengthForDecimal(length), nil + case mysql.TypeYear, mysql.TypeDate, mysql.TypeDuration, mysql.TypeDatetime, mysql.TypeTimestamp: + return mysql.DefaultLengthOfMysqlTypes[col.Tp], nil + default: + return length, nil + } +} + +// Decimal using a binary format that packs nine decimal (base 10) digits into four bytes. +func calcBytesLengthForDecimal(m int) int { + return (m / 9 * 4) + ((m%9)+1)/2 +} + +func buildIndexInfo(tblInfo *model.TableInfo, indexName model.CIStr, idxColNames []*ast.IndexPartSpecification, state model.SchemaState) (*model.IndexInfo, error) { + if err := checkTooLongIndex(indexName); err != nil { + return nil, errors.Trace(err) + } + + idxColumns, err := buildIndexColumns(tblInfo.Columns, idxColNames) + if err != nil { + return nil, errors.Trace(err) + } + + // Create index info. + idxInfo := &model.IndexInfo{ + Name: indexName, + Columns: idxColumns, + State: state, + } + return idxInfo, nil +} + +func addIndexColumnFlag(tblInfo *model.TableInfo, indexInfo *model.IndexInfo) { + if indexInfo.Primary { + for _, col := range indexInfo.Columns { + tblInfo.Columns[col.Offset].Flag |= mysql.PriKeyFlag + } + return + } + + col := indexInfo.Columns[0] + if indexInfo.Unique && len(indexInfo.Columns) == 1 { + tblInfo.Columns[col.Offset].Flag |= mysql.UniqueKeyFlag + } else { + tblInfo.Columns[col.Offset].Flag |= mysql.MultipleKeyFlag + } +} + +func dropIndexColumnFlag(tblInfo *model.TableInfo, indexInfo *model.IndexInfo) { + if indexInfo.Primary { + for _, col := range indexInfo.Columns { + tblInfo.Columns[col.Offset].Flag &= ^mysql.PriKeyFlag + } + } else if indexInfo.Unique && len(indexInfo.Columns) == 1 { + tblInfo.Columns[indexInfo.Columns[0].Offset].Flag &= ^mysql.UniqueKeyFlag + } else { + tblInfo.Columns[indexInfo.Columns[0].Offset].Flag &= ^mysql.MultipleKeyFlag + } + + col := indexInfo.Columns[0] + // other index may still cover this col + for _, index := range tblInfo.Indices { + if index.Name.L == indexInfo.Name.L { + continue + } + + if index.Columns[0].Name.L != col.Name.L { + continue + } + + addIndexColumnFlag(tblInfo, index) + } +} + +func getNullColInfos(tblInfo *model.TableInfo, indexInfo *model.IndexInfo) ([]*model.ColumnInfo, error) { + nullCols := make([]*model.ColumnInfo, 0, len(indexInfo.Columns)) + for _, colName := range indexInfo.Columns { + col := model.FindColumnInfo(tblInfo.Columns, colName.Name.L) + if !mysql.HasNotNullFlag(col.Flag) || mysql.HasPreventNullInsertFlag(col.Flag) { + nullCols = append(nullCols, col) + } + } + return nullCols, nil +} + +func checkPrimaryKeyNotNull(w *worker, sqlMode mysql.SQLMode, t *meta.Meta, job *model.Job, + tblInfo *model.TableInfo, indexInfo *model.IndexInfo) (warnings []string, err error) { + if !indexInfo.Primary { + return nil, nil + } + + dbInfo, err := t.GetDatabase(job.SchemaID) + if err != nil { + return nil, err + } + nullCols, err := getNullColInfos(tblInfo, indexInfo) + if err != nil { + return nil, err + } + if len(nullCols) == 0 { + return nil, nil + } + + err = modifyColsFromNull2NotNull(w, dbInfo, tblInfo, nullCols, model.NewCIStr(""), false) + if err == nil { + return nil, nil + } + _, err = convertAddIdxJob2RollbackJob(t, job, tblInfo, indexInfo, err) + // TODO: Support non-strict mode. + // warnings = append(warnings, ErrWarnDataTruncated.GenWithStackByArgs(oldCol.Name.L, 0).Error()) + return nil, err +} + +func (w *worker) onCreateIndex(d *ddlCtx, t *meta.Meta, job *model.Job, isPK bool) (ver int64, err error) { + // Handle the rolling back job. + if job.IsRollingback() { + ver, err = onDropIndex(t, job) + if err != nil { + return ver, errors.Trace(err) + } + return ver, nil + } + + // Handle normal job. + schemaID := job.SchemaID + tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID) + if err != nil { + return ver, errors.Trace(err) + } + + var ( + unique bool + indexName model.CIStr + idxColNames []*ast.IndexPartSpecification + indexOption *ast.IndexOption + sqlMode mysql.SQLMode + warnings []string + ) + if isPK { + // Notice: sqlMode and warnings is used to support non-strict mode. + err = job.DecodeArgs(&unique, &indexName, &idxColNames, &indexOption, &sqlMode, &warnings) + } else { + err = job.DecodeArgs(&unique, &indexName, &idxColNames, &indexOption) + } + if err != nil { + job.State = model.JobStateCancelled + return ver, errors.Trace(err) + } + + indexInfo := tblInfo.FindIndexByName(indexName.L) + if indexInfo != nil && indexInfo.State == model.StatePublic { + job.State = model.JobStateCancelled + err = ErrDupKeyName.GenWithStack("index already exist %s", indexName) + if isPK { + err = infoschema.ErrMultiplePriKey + } + return ver, err + } + + if indexInfo == nil { + indexInfo, err = buildIndexInfo(tblInfo, indexName, idxColNames, model.StateNone) + if err != nil { + job.State = model.JobStateCancelled + return ver, errors.Trace(err) + } + if indexOption != nil { + indexInfo.Comment = indexOption.Comment + if indexOption.Tp == model.IndexTypeInvalid { + // Use btree as default index type. + indexInfo.Tp = model.IndexTypeBtree + } else { + indexInfo.Tp = indexOption.Tp + } + } else { + // Use btree as default index type. + indexInfo.Tp = model.IndexTypeBtree + } + indexInfo.Primary = false + if isPK { + if _, err = checkPKOnGeneratedColumn(tblInfo, idxColNames); err != nil { + job.State = model.JobStateCancelled + return ver, err + } + indexInfo.Primary = true + } + indexInfo.Unique = unique + indexInfo.ID = allocateIndexID(tblInfo) + tblInfo.Indices = append(tblInfo.Indices, indexInfo) + logutil.BgLogger().Info("[ddl] run add index job", zap.String("job", job.String()), zap.Reflect("indexInfo", indexInfo)) + } + originalState := indexInfo.State + switch indexInfo.State { + case model.StateNone: + // none -> delete only + job.SchemaState = model.StateDeleteOnly + indexInfo.State = model.StateDeleteOnly + ver, err = updateVersionAndTableInfoWithCheck(t, job, tblInfo, originalState != indexInfo.State) + case model.StateDeleteOnly: + // delete only -> write only + job.SchemaState = model.StateWriteOnly + indexInfo.State = model.StateWriteOnly + _, err = checkPrimaryKeyNotNull(w, sqlMode, t, job, tblInfo, indexInfo) + if err != nil { + break + } + ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != indexInfo.State) + case model.StateWriteOnly: + // write only -> reorganization + job.SchemaState = model.StateWriteReorganization + indexInfo.State = model.StateWriteReorganization + _, err = checkPrimaryKeyNotNull(w, sqlMode, t, job, tblInfo, indexInfo) + if err != nil { + break + } + // Initialize SnapshotVer to 0 for later reorganization check. + job.SnapshotVer = 0 + ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != indexInfo.State) + case model.StateWriteReorganization: + // reorganization -> public + tbl, err := getTable(d.store, schemaID, tblInfo) + if err != nil { + return ver, errors.Trace(err) + } + + reorgInfo, err := getReorgInfo(d, t, job, tbl) + if err != nil || reorgInfo.first { + // If we run reorg firstly, we should update the job snapshot version + // and then run the reorg next time. + return ver, errors.Trace(err) + } + + err = w.runReorgJob(t, reorgInfo, d.lease, func() (addIndexErr error) { + defer func() { + r := recover() + if r != nil { + buf := util.GetStack() + logutil.BgLogger().Error("[ddl] add table index panic", zap.Any("panic", r), zap.String("stack", string(buf))) + + addIndexErr = errCancelledDDLJob.GenWithStack("add table `%v` index `%v` panic", tblInfo.Name, indexInfo.Name) + } + }() + return w.addTableIndex(tbl, indexInfo, reorgInfo) + }) + if err != nil { + if errWaitReorgTimeout.Equal(err) { + // if timeout, we should return, check for the owner and re-wait job done. + return ver, nil + } + if kv.ErrKeyExists.Equal(err) || errCancelledDDLJob.Equal(err) || errCantDecodeIndex.Equal(err) { + logutil.BgLogger().Warn("[ddl] run add index job failed, convert job to rollback", zap.String("job", job.String()), zap.Error(err)) + ver, err = convertAddIdxJob2RollbackJob(t, job, tblInfo, indexInfo, err) + } + // Clean up the channel of notifyCancelReorgJob. Make sure it can't affect other jobs. + w.reorgCtx.cleanNotifyReorgCancel() + return ver, errors.Trace(err) + } + // Clean up the channel of notifyCancelReorgJob. Make sure it can't affect other jobs. + w.reorgCtx.cleanNotifyReorgCancel() + + indexInfo.State = model.StatePublic + // Set column index flag. + addIndexColumnFlag(tblInfo, indexInfo) + if isPK { + if err = updateColsNull2NotNull(tblInfo, indexInfo); err != nil { + return ver, errors.Trace(err) + } + } + ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != indexInfo.State) + if err != nil { + return ver, errors.Trace(err) + } + // Finish this job. + job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo) + default: + err = ErrInvalidDDLState.GenWithStackByArgs("index", tblInfo.State) + } + + return ver, errors.Trace(err) +} + +func onDropIndex(t *meta.Meta, job *model.Job) (ver int64, _ error) { + tblInfo, indexInfo, err := checkDropIndex(t, job) + if err != nil { + return ver, errors.Trace(err) + } + + originalState := indexInfo.State + switch indexInfo.State { + case model.StatePublic: + // public -> write only + job.SchemaState = model.StateWriteOnly + indexInfo.State = model.StateWriteOnly + ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != indexInfo.State) + case model.StateWriteOnly: + // write only -> delete only + job.SchemaState = model.StateDeleteOnly + indexInfo.State = model.StateDeleteOnly + ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != indexInfo.State) + case model.StateDeleteOnly: + // delete only -> reorganization + job.SchemaState = model.StateDeleteReorganization + indexInfo.State = model.StateDeleteReorganization + ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != indexInfo.State) + case model.StateDeleteReorganization: + // reorganization -> absent + newIndices := make([]*model.IndexInfo, 0, len(tblInfo.Indices)) + for _, idx := range tblInfo.Indices { + if idx.Name.L != indexInfo.Name.L { + newIndices = append(newIndices, idx) + } + } + tblInfo.Indices = newIndices + // Set column index flag. + dropIndexColumnFlag(tblInfo, indexInfo) + + ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != model.StateNone) + if err != nil { + return ver, errors.Trace(err) + } + + // Finish this job. + if job.IsRollingback() { + job.FinishTableJob(model.JobStateRollbackDone, model.StateNone, ver, tblInfo) + job.Args[0] = indexInfo.ID + // the partition ids were append by convertAddIdxJob2RollbackJob, it is weird, but for the compatibility, + // we should keep appending the partitions in the convertAddIdxJob2RollbackJob. + } else { + job.FinishTableJob(model.JobStateDone, model.StateNone, ver, tblInfo) + job.Args = append(job.Args, indexInfo.ID) + } + default: + err = ErrInvalidDDLState.GenWithStackByArgs("index", indexInfo.State) + } + return ver, errors.Trace(err) +} + +func checkDropIndex(t *meta.Meta, job *model.Job) (*model.TableInfo, *model.IndexInfo, error) { + schemaID := job.SchemaID + tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID) + if err != nil { + return nil, nil, errors.Trace(err) + } + + var indexName model.CIStr + if err = job.DecodeArgs(&indexName); err != nil { + job.State = model.JobStateCancelled + return nil, nil, errors.Trace(err) + } + + indexInfo := tblInfo.FindIndexByName(indexName.L) + if indexInfo == nil { + job.State = model.JobStateCancelled + return nil, nil, ErrCantDropFieldOrKey.GenWithStack("index %s doesn't exist", indexName) + } + + // Double check for drop index on auto_increment column. + err = checkDropIndexOnAutoIncrementColumn(tblInfo, indexInfo) + if err != nil { + job.State = model.JobStateCancelled + return nil, nil, autoid.ErrWrongAutoKey + } + + return tblInfo, indexInfo, nil +} + +func checkDropIndexOnAutoIncrementColumn(tblInfo *model.TableInfo, indexInfo *model.IndexInfo) error { + cols := tblInfo.Columns + for _, idxCol := range indexInfo.Columns { + if !mysql.HasAutoIncrementFlag(cols[idxCol.Offset].Flag) { + continue + } + // check the count of index on auto_increment column. + count := 0 + for _, idx := range tblInfo.Indices { + for _, c := range idx.Columns { + if c.Name.L == idxCol.Name.L { + count++ + break + } + } + } + if count < 2 { + return autoid.ErrWrongAutoKey + } + } + return nil +} + +const ( + // DefaultTaskHandleCnt is default batch size of adding indices. + DefaultTaskHandleCnt = 128 +) + +// indexRecord is the record information of an index. +type indexRecord struct { + handle int64 + key []byte // It's used to lock a record. Record it to reduce the encoding time. + vals []types.Datum // It's the index values. + skip bool // skip indicates that the index key is already exists, we should not add it. +} + +type addIndexWorker struct { + id int + ddlWorker *worker + batchCnt int + sessCtx sessionctx.Context + taskCh chan *reorgIndexTask + resultCh chan *addIndexResult + index table.Index + table table.Table + closed bool + priority int + + // The following attributes are used to reduce memory allocation. + defaultVals []types.Datum + idxRecords []*indexRecord + rowMap map[int64]types.Datum + rowDecoder *decoder.RowDecoder + idxKeyBufs [][]byte + batchCheckKeys []kv.Key + distinctCheckFlags []bool +} + +type reorgIndexTask struct { + physicalTableID int64 + startHandle int64 + endHandle int64 + // endIncluded indicates whether the range include the endHandle. + // When the last handle is math.MaxInt64, set endIncluded to true to + // tell worker backfilling index of endHandle. + endIncluded bool +} + +func (r *reorgIndexTask) String() string { + rightParenthesis := ")" + if r.endIncluded { + rightParenthesis = "]" + } + return "physicalTableID" + strconv.FormatInt(r.physicalTableID, 10) + "_" + "[" + strconv.FormatInt(r.startHandle, 10) + "," + strconv.FormatInt(r.endHandle, 10) + rightParenthesis +} + +type addIndexResult struct { + addedCount int + scanCount int + nextHandle int64 + err error +} + +// addIndexTaskContext is the context of the batch adding indices. +// After finishing the batch adding indices, result in addIndexTaskContext will be merged into addIndexResult. +type addIndexTaskContext struct { + nextHandle int64 + done bool + addedCount int + scanCount int +} + +// mergeAddIndexCtxToResult merge partial result in taskCtx into result. +func mergeAddIndexCtxToResult(taskCtx *addIndexTaskContext, result *addIndexResult) { + result.nextHandle = taskCtx.nextHandle + result.addedCount += taskCtx.addedCount + result.scanCount += taskCtx.scanCount +} + +func newAddIndexWorker(sessCtx sessionctx.Context, worker *worker, id int, t table.PhysicalTable, indexInfo *model.IndexInfo, decodeColMap map[int64]decoder.Column) *addIndexWorker { + index := tables.NewIndex(t.GetPhysicalID(), t.Meta(), indexInfo) + rowDecoder := decoder.NewRowDecoder(t, decodeColMap) + return &addIndexWorker{ + id: id, + ddlWorker: worker, + batchCnt: int(variable.GetDDLReorgBatchSize()), + sessCtx: sessCtx, + taskCh: make(chan *reorgIndexTask, 1), + resultCh: make(chan *addIndexResult, 1), + index: index, + table: t, + rowDecoder: rowDecoder, + priority: kv.PriorityLow, + defaultVals: make([]types.Datum, len(t.Cols())), + rowMap: make(map[int64]types.Datum, len(decodeColMap)), + } +} + +func (w *addIndexWorker) close() { + if !w.closed { + w.closed = true + close(w.taskCh) + } +} + +// getIndexRecord gets index columns values from raw binary value row. +func (w *addIndexWorker) getIndexRecord(handle int64, recordKey []byte, rawRecord []byte) (*indexRecord, error) { + t := w.table + cols := t.Cols() + idxInfo := w.index.Meta() + _, err := w.rowDecoder.DecodeAndEvalRowWithMap(w.sessCtx, handle, rawRecord, time.UTC, time.Local, w.rowMap) + if err != nil { + return nil, errors.Trace(errCantDecodeIndex.GenWithStackByArgs(err)) + } + idxVal := make([]types.Datum, len(idxInfo.Columns)) + for j, v := range idxInfo.Columns { + col := cols[v.Offset] + if col.IsPKHandleColumn(t.Meta()) { + if mysql.HasUnsignedFlag(col.Flag) { + idxVal[j].SetUint64(uint64(handle)) + } else { + idxVal[j].SetInt64(handle) + } + continue + } + idxColumnVal, ok := w.rowMap[col.ID] + if ok { + idxVal[j] = idxColumnVal + // Make sure there is no dirty data. + delete(w.rowMap, col.ID) + continue + } + idxColumnVal, err = tables.GetColDefaultValue(w.sessCtx, col, w.defaultVals) + if err != nil { + return nil, errors.Trace(err) + } + + idxVal[j] = idxColumnVal + } + // If there are generated column, rowDecoder will use column value that not in idxInfo.Columns to calculate + // the generated value, so we need to clear up the reusing map. + w.cleanRowMap() + idxRecord := &indexRecord{handle: handle, key: recordKey, vals: idxVal} + return idxRecord, nil +} + +func (w *addIndexWorker) cleanRowMap() { + for id := range w.rowMap { + delete(w.rowMap, id) + } +} + +// getNextHandle gets next handle of entry that we are going to process. +func (w *addIndexWorker) getNextHandle(taskRange reorgIndexTask, taskDone bool) (nextHandle int64) { + if !taskDone { + // The task is not done. So we need to pick the last processed entry's handle and add one. + return w.idxRecords[len(w.idxRecords)-1].handle + 1 + } + + // The task is done. So we need to choose a handle outside this range. + // Some corner cases should be considered: + // - The end of task range is MaxInt64. + // - The end of the task is excluded in the range. + if taskRange.endHandle == math.MaxInt64 || !taskRange.endIncluded { + return taskRange.endHandle + } + + return taskRange.endHandle + 1 +} + +// fetchRowColVals fetch w.batchCnt count rows that need to backfill indices, and build the corresponding indexRecord slice. +// fetchRowColVals returns: +// 1. The corresponding indexRecord slice. +// 2. Next handle of entry that we need to process. +// 3. Boolean indicates whether the task is done. +// 4. error occurs in fetchRowColVals. nil if no error occurs. +func (w *addIndexWorker) fetchRowColVals(txn kv.Transaction, taskRange reorgIndexTask) ([]*indexRecord, int64, bool, error) { + // TODO: use tableScan to prune columns. + w.idxRecords = w.idxRecords[:0] + startTime := time.Now() + + // taskDone means that the added handle is out of taskRange.endHandle. + taskDone := false + err := iterateSnapshotRows(w.sessCtx.GetStore(), w.priority, w.table, txn.StartTS(), taskRange.startHandle, taskRange.endHandle, taskRange.endIncluded, + func(handle int64, recordKey kv.Key, rawRow []byte) (bool, error) { + if !taskRange.endIncluded { + taskDone = handle >= taskRange.endHandle + } else { + taskDone = handle > taskRange.endHandle + } + + if taskDone || len(w.idxRecords) >= w.batchCnt { + return false, nil + } + + idxRecord, err1 := w.getIndexRecord(handle, recordKey, rawRow) + if err1 != nil { + return false, errors.Trace(err1) + } + + w.idxRecords = append(w.idxRecords, idxRecord) + if handle == taskRange.endHandle { + // If taskRange.endIncluded == false, we will not reach here when handle == taskRange.endHandle + taskDone = true + return false, nil + } + return true, nil + }) + + if len(w.idxRecords) == 0 { + taskDone = true + } + + logutil.BgLogger().Debug("[ddl] txn fetches handle info", zap.Uint64("txnStartTS", txn.StartTS()), zap.String("taskRange", taskRange.String()), zap.Duration("takeTime", time.Since(startTime))) + return w.idxRecords, w.getNextHandle(taskRange, taskDone), taskDone, errors.Trace(err) +} + +func (w *addIndexWorker) initBatchCheckBufs(batchCount int) { + if len(w.idxKeyBufs) < batchCount { + w.idxKeyBufs = make([][]byte, batchCount) + } + + w.batchCheckKeys = w.batchCheckKeys[:0] + w.distinctCheckFlags = w.distinctCheckFlags[:0] +} + +func (w *addIndexWorker) checkUniqueKey(txn kv.Transaction, idxRecords []*indexRecord) error { + idxInfo := w.index.Meta() + if !idxInfo.Unique { + // non-unique key need not to check, just overwrite it, + // because in most case, backfilling indices is not exists. + return nil + } + + w.initBatchCheckBufs(len(idxRecords)) + stmtCtx := w.sessCtx.GetSessionVars().StmtCtx + for i, record := range idxRecords { + idxKey, distinct, err := w.index.GenIndexKey(stmtCtx, record.vals, record.handle, w.idxKeyBufs[i]) + if err != nil { + return errors.Trace(err) + } + // save the buffer to reduce memory allocations. + w.idxKeyBufs[i] = idxKey + + w.batchCheckKeys = append(w.batchCheckKeys, idxKey) + w.distinctCheckFlags = append(w.distinctCheckFlags, distinct) + } + + vals := make(map[string][]byte, len(w.batchCheckKeys)) + for _, key := range w.batchCheckKeys { + val, err := txn.Get(context.Background(), key) + if kv.ErrNotExist.Equal(err) { + continue + } + if err != nil { + return err + } + vals[string(key)] = val + } + + // 1. unique-key/primary-key is duplicate and the handle is equal, skip it. + // 2. unique-key/primary-key is duplicate and the handle is not equal, return duplicate error. + // 3. non-unique-key is duplicate, skip it. + for i, key := range w.batchCheckKeys { + if val, found := vals[string(key)]; found { + if w.distinctCheckFlags[i] { + handle, err1 := tables.DecodeHandle(val) + if err1 != nil { + return errors.Trace(err1) + } + + if handle != idxRecords[i].handle { + return errors.Trace(kv.ErrKeyExists) + } + } + idxRecords[i].skip = true + } else { + // The keys in w.batchCheckKeys also maybe duplicate, + // so we need to backfill the not found key into `batchVals` map. + if w.distinctCheckFlags[i] { + vals[string(key)] = tables.EncodeHandle(idxRecords[i].handle) + } + } + } + // Constrains is already checked. + stmtCtx.BatchCheck = true + return nil +} + +// backfillIndexInTxn will backfill table index in a transaction, lock corresponding rowKey, if the value of rowKey is changed, +// indicate that index columns values may changed, index is not allowed to be added, so the txn will rollback and retry. +// backfillIndexInTxn will add w.batchCnt indices once, default value of w.batchCnt is 128. +// TODO: make w.batchCnt can be modified by system variable. +func (w *addIndexWorker) backfillIndexInTxn(handleRange reorgIndexTask) (taskCtx addIndexTaskContext, errInTxn error) { + failpoint.Inject("errorMockPanic", func(val failpoint.Value) { + if val.(bool) { + panic("panic test") + } + }) + + errInTxn = kv.RunInNewTxn(w.sessCtx.GetStore(), true, func(txn kv.Transaction) error { + taskCtx.addedCount = 0 + taskCtx.scanCount = 0 + txn.SetOption(kv.Priority, w.priority) + + idxRecords, nextHandle, taskDone, err := w.fetchRowColVals(txn, handleRange) + if err != nil { + return errors.Trace(err) + } + taskCtx.nextHandle = nextHandle + taskCtx.done = taskDone + + err = w.checkUniqueKey(txn, idxRecords) + if err != nil { + return errors.Trace(err) + } + + for _, idxRecord := range idxRecords { + taskCtx.scanCount++ + // The index is already exists, we skip it, no needs to backfill it. + // The following update, delete, insert on these rows, TiDB can handle it correctly. + if idxRecord.skip { + continue + } + + // Lock the row key to notify us that someone delete or update the row, + // then we should not backfill the index of it, otherwise the adding index is redundant. + err := txn.LockKeys(context.Background(), new(kv.LockCtx), idxRecord.key) + if err != nil { + return errors.Trace(err) + } + + // Create the index. + handle, err := w.index.Create(w.sessCtx, txn, idxRecord.vals, idxRecord.handle) + if err != nil { + if kv.ErrKeyExists.Equal(err) && idxRecord.handle == handle { + // Index already exists, skip it. + continue + } + + return errors.Trace(err) + } + taskCtx.addedCount++ + } + + return nil + }) + + return +} + +// handleBackfillTask backfills range [task.startHandle, task.endHandle) handle's index to table. +func (w *addIndexWorker) handleBackfillTask(d *ddlCtx, task *reorgIndexTask) *addIndexResult { + handleRange := *task + result := &addIndexResult{addedCount: 0, nextHandle: handleRange.startHandle, err: nil} + lastLogCount := 0 + lastLogTime := time.Now() + startTime := lastLogTime + + for { + // Give job chance to be canceled, if we not check it here, + // if there is panic in w.backfillIndexInTxn we will never cancel the job. + // Because reorgIndexTask may run a long time, + // we should check whether this ddl job is still runnable. + err := w.ddlWorker.isReorgRunnable(d) + if err != nil { + result.err = err + return result + } + + taskCtx, err := w.backfillIndexInTxn(handleRange) + if err != nil { + result.err = err + return result + } + mergeAddIndexCtxToResult(&taskCtx, result) + w.ddlWorker.reorgCtx.increaseRowCount(int64(taskCtx.addedCount)) + + if num := result.scanCount - lastLogCount; num >= 30000 { + lastLogCount = result.scanCount + logutil.BgLogger().Info("[ddl] add index worker back fill index", zap.Int("workerID", w.id), zap.Int("addedCount", result.addedCount), + zap.Int("scanCount", result.scanCount), zap.Int64("nextHandle", taskCtx.nextHandle), zap.Float64("speed(rows/s)", float64(num)/time.Since(lastLogTime).Seconds())) + lastLogTime = time.Now() + } + + handleRange.startHandle = taskCtx.nextHandle + if taskCtx.done { + break + } + } + logutil.BgLogger().Info("[ddl] add index worker finish task", zap.Int("workerID", w.id), + zap.String("task", task.String()), zap.Int("addedCount", result.addedCount), zap.Int("scanCount", result.scanCount), zap.Int64("nextHandle", result.nextHandle), zap.String("takeTime", time.Since(startTime).String())) + return result +} + +func (w *addIndexWorker) run(d *ddlCtx) { + logutil.BgLogger().Info("[ddl] add index worker start", zap.Int("workerID", w.id)) + defer func() { + r := recover() + if r != nil { + buf := util.GetStack() + logutil.BgLogger().Error("[ddl] add index worker panic", zap.Any("panic", r), zap.String("stack", string(buf))) + + } + w.resultCh <- &addIndexResult{err: errReorgPanic} + }() + for { + task, more := <-w.taskCh + if !more { + break + } + + logutil.BgLogger().Debug("[ddl] add index worker got task", zap.Int("workerID", w.id), zap.String("task", task.String())) + failpoint.Inject("mockAddIndexErr", func() { + if w.id == 0 { + result := &addIndexResult{addedCount: 0, nextHandle: 0, err: errors.Errorf("mock add index error")} + w.resultCh <- result + failpoint.Continue() + } + }) + + // Dynamic change batch size. + w.batchCnt = int(variable.GetDDLReorgBatchSize()) + result := w.handleBackfillTask(d, task) + w.resultCh <- result + } + logutil.BgLogger().Info("[ddl] add index worker exit", zap.Int("workerID", w.id)) +} + +func makeupDecodeColMap(t table.Table, indexInfo *model.IndexInfo) (map[int64]decoder.Column, error) { + cols := t.Cols() + indexedCols := make([]*table.Column, len(indexInfo.Columns)) + for i, v := range indexInfo.Columns { + indexedCols[i] = cols[v.Offset] + } + + decodeColMap, err := decoder.BuildFullDecodeColMap(indexedCols) + if err != nil { + return nil, err + } + + return decodeColMap, nil +} + +// splitTableRanges uses PD region's key ranges to split the backfilling table key range space, +// to speed up adding index in table with disperse handle. +// The `t` should be a non-partitioned table or a partition. +func splitTableRanges(t table.PhysicalTable, store kv.Storage, startHandle, endHandle int64) ([]kv.KeyRange, error) { + startRecordKey := t.RecordKey(startHandle) + endRecordKey := t.RecordKey(endHandle).Next() + + logutil.BgLogger().Info("[ddl] split table range from PD", zap.Int64("physicalTableID", t.GetPhysicalID()), zap.Int64("startHandle", startHandle), zap.Int64("endHandle", endHandle)) + kvRange := kv.KeyRange{StartKey: startRecordKey, EndKey: endRecordKey} + s, ok := store.(tikv.Storage) + if !ok { + // Only support split ranges in tikv.Storage now. + return []kv.KeyRange{kvRange}, nil + } + + maxSleep := 10000 // ms + bo := tikv.NewBackoffer(context.Background(), maxSleep) + ranges, err := tikv.SplitRegionRanges(bo, s.GetRegionCache(), []kv.KeyRange{kvRange}) + if err != nil { + return nil, errors.Trace(err) + } + if len(ranges) == 0 { + return nil, errors.Trace(errInvalidSplitRegionRanges) + } + return ranges, nil +} + +func decodeHandleRange(keyRange kv.KeyRange) (int64, int64, error) { + _, startHandle, err := tablecodec.DecodeRecordKey(keyRange.StartKey) + if err != nil { + return 0, 0, errors.Trace(err) + } + _, endHandle, err := tablecodec.DecodeRecordKey(keyRange.EndKey) + if err != nil { + return 0, 0, errors.Trace(err) + } + + return startHandle, endHandle, nil +} + +func closeAddIndexWorkers(workers []*addIndexWorker) { + for _, worker := range workers { + worker.close() + } +} + +func (w *worker) waitTaskResults(workers []*addIndexWorker, taskCnt int, totalAddedCount *int64, startHandle int64) (int64, int64, error) { + var ( + addedCount int64 + nextHandle = startHandle + firstErr error + ) + for i := 0; i < taskCnt; i++ { + worker := workers[i] + result := <-worker.resultCh + if firstErr == nil && result.err != nil { + firstErr = result.err + // We should wait all working workers exits, any way. + continue + } + + if result.err != nil { + logutil.BgLogger().Warn("[ddl] add index worker failed", zap.Int("workerID", worker.id), + zap.Error(result.err)) + } + + if firstErr == nil { + *totalAddedCount += int64(result.addedCount) + addedCount += int64(result.addedCount) + nextHandle = result.nextHandle + } + } + + return nextHandle, addedCount, errors.Trace(firstErr) +} + +// handleReorgTasks sends tasks to workers, and waits for all the running workers to return results, +// there are taskCnt running workers. +func (w *worker) handleReorgTasks(reorgInfo *reorgInfo, totalAddedCount *int64, workers []*addIndexWorker, batchTasks []*reorgIndexTask) error { + for i, task := range batchTasks { + workers[i].taskCh <- task + } + + startHandle := batchTasks[0].startHandle + taskCnt := len(batchTasks) + startTime := time.Now() + nextHandle, taskAddedCount, err := w.waitTaskResults(workers, taskCnt, totalAddedCount, startHandle) + elapsedTime := time.Since(startTime) + if err == nil { + err = w.isReorgRunnable(reorgInfo.d) + } + + if err != nil { + // update the reorg handle that has been processed. + err1 := kv.RunInNewTxn(reorgInfo.d.store, true, func(txn kv.Transaction) error { + return errors.Trace(reorgInfo.UpdateReorgMeta(txn, nextHandle, reorgInfo.EndHandle, reorgInfo.PhysicalTableID)) + }) + + logutil.BgLogger().Warn("[ddl] add index worker handle batch tasks failed", zap.Int64("totalAddedCount", *totalAddedCount), zap.Int64("startHandle", startHandle), zap.Int64("nextHandle", nextHandle), + zap.Int64("batchAddedCount", taskAddedCount), zap.String("taskFailedError", err.Error()), zap.String("takeTime", elapsedTime.String()), zap.NamedError("updateHandleError", err1)) + return errors.Trace(err) + } + + // nextHandle will be updated periodically in runReorgJob, so no need to update it here. + w.reorgCtx.setNextHandle(nextHandle) + + logutil.BgLogger().Info("[ddl] add index worker handle batch tasks successful", zap.Int64("totalAddedCount", *totalAddedCount), zap.Int64("startHandle", startHandle), + zap.Int64("nextHandle", nextHandle), zap.Int64("batchAddedCount", taskAddedCount), zap.String("takeTime", elapsedTime.String())) + return nil +} + +// sendRangeTaskToWorkers sends tasks to workers, and returns remaining kvRanges that is not handled. +func (w *worker) sendRangeTaskToWorkers(t table.Table, workers []*addIndexWorker, reorgInfo *reorgInfo, totalAddedCount *int64, kvRanges []kv.KeyRange) ([]kv.KeyRange, error) { + batchTasks := make([]*reorgIndexTask, 0, len(workers)) + physicalTableID := reorgInfo.PhysicalTableID + + // Build reorg indices tasks. + for _, keyRange := range kvRanges { + startHandle, endHandle, err := decodeHandleRange(keyRange) + if err != nil { + return nil, errors.Trace(err) + } + + endKey := t.RecordKey(endHandle) + endIncluded := false + if endKey.Cmp(keyRange.EndKey) < 0 { + endIncluded = true + } + task := &reorgIndexTask{physicalTableID, startHandle, endHandle, endIncluded} + batchTasks = append(batchTasks, task) + + if len(batchTasks) >= len(workers) { + break + } + } + + if len(batchTasks) == 0 { + return nil, nil + } + + // Wait tasks finish. + err := w.handleReorgTasks(reorgInfo, totalAddedCount, workers, batchTasks) + if err != nil { + return nil, errors.Trace(err) + } + + if len(batchTasks) < len(kvRanges) { + // there are kvRanges not handled. + remains := kvRanges[len(batchTasks):] + return remains, nil + } + + return nil, nil +} + +var ( + // TestCheckWorkerNumCh use for test adjust add index worker. + TestCheckWorkerNumCh = make(chan struct{}) + // TestCheckWorkerNumber use for test adjust add index worker. + TestCheckWorkerNumber = int32(16) +) + +func loadDDLReorgVars(w *worker) error { + // Get sessionctx from context resource pool. + var ctx sessionctx.Context + ctx, err := w.sessPool.get() + if err != nil { + return errors.Trace(err) + } + defer w.sessPool.put(ctx) + return ddlutil.LoadDDLReorgVars(ctx) +} + +// addPhysicalTableIndex handles the add index reorganization state for a non-partitioned table or a partition. +// For a partitioned table, it should be handled partition by partition. +// +// How to add index in reorganization state? +// Concurrently process the defaultTaskHandleCnt tasks. Each task deals with a handle range of the index record. +// The handle range is split from PD regions now. Each worker deal with a region table key range one time. +// Each handle range by estimation, concurrent processing needs to perform after the handle range has been acquired. +// The operation flow is as follows: +// 1. Open numbers of defaultWorkers goroutines. +// 2. Split table key range from PD regions. +// 3. Send tasks to running workers by workers's task channel. Each task deals with a region key ranges. +// 4. Wait all these running tasks finished, then continue to step 3, until all tasks is done. +// The above operations are completed in a transaction. +// Finally, update the concurrent processing of the total number of rows, and store the completed handle value. +func (w *worker) addPhysicalTableIndex(t table.PhysicalTable, indexInfo *model.IndexInfo, reorgInfo *reorgInfo) error { + job := reorgInfo.Job + logutil.BgLogger().Info("[ddl] start to add table index", zap.String("job", job.String()), zap.String("reorgInfo", reorgInfo.String())) + totalAddedCount := job.GetRowCount() + + startHandle, endHandle := reorgInfo.StartHandle, reorgInfo.EndHandle + decodeColMap, err := makeupDecodeColMap(t, indexInfo) + if err != nil { + return errors.Trace(err) + } + + // variable.ddlReorgWorkerCounter can be modified by system variable "tidb_ddl_reorg_worker_cnt". + workerCnt := variable.GetDDLReorgWorkerCounter() + idxWorkers := make([]*addIndexWorker, 0, workerCnt) + defer func() { + closeAddIndexWorkers(idxWorkers) + }() + + for { + kvRanges, err := splitTableRanges(t, reorgInfo.d.store, startHandle, endHandle) + if err != nil { + return errors.Trace(err) + } + + // For dynamic adjust add index worker number. + if err := loadDDLReorgVars(w); err != nil { + logutil.BgLogger().Error("[ddl] load DDL reorganization variable failed", zap.Error(err)) + } + workerCnt = variable.GetDDLReorgWorkerCounter() + // If only have 1 range, we can only start 1 worker. + if len(kvRanges) < int(workerCnt) { + workerCnt = int32(len(kvRanges)) + } + // Enlarge the worker size. + for i := len(idxWorkers); i < int(workerCnt); i++ { + sessCtx := newContext(reorgInfo.d.store) + idxWorker := newAddIndexWorker(sessCtx, w, i, t, indexInfo, decodeColMap) + idxWorker.priority = job.Priority + idxWorkers = append(idxWorkers, idxWorker) + go idxWorkers[i].run(reorgInfo.d) + } + // Shrink the worker size. + if len(idxWorkers) > int(workerCnt) { + workers := idxWorkers[workerCnt:] + idxWorkers = idxWorkers[:workerCnt] + closeAddIndexWorkers(workers) + } + + failpoint.Inject("checkIndexWorkerNum", func(val failpoint.Value) { + if val.(bool) { + num := int(atomic.LoadInt32(&TestCheckWorkerNumber)) + if num != 0 { + if num > len(kvRanges) { + if len(idxWorkers) != len(kvRanges) { + failpoint.Return(errors.Errorf("check index worker num error, len kv ranges is: %v, check index worker num is: %v, actual index num is: %v", len(kvRanges), num, len(idxWorkers))) + } + } else if num != len(idxWorkers) { + failpoint.Return(errors.Errorf("check index worker num error, len kv ranges is: %v, check index worker num is: %v, actual index num is: %v", len(kvRanges), num, len(idxWorkers))) + } + TestCheckWorkerNumCh <- struct{}{} + } + } + }) + + logutil.BgLogger().Info("[ddl] start add index workers to reorg index", zap.Int("workerCnt", len(idxWorkers)), + zap.Int("regionCnt", len(kvRanges)), zap.Int64("startHandle", startHandle), zap.Int64("endHandle", endHandle)) + remains, err := w.sendRangeTaskToWorkers(t, idxWorkers, reorgInfo, &totalAddedCount, kvRanges) + if err != nil { + return errors.Trace(err) + } + + if len(remains) == 0 { + break + } + startHandle, _, err = decodeHandleRange(remains[0]) + if err != nil { + return errors.Trace(err) + } + } + return nil +} + +// addTableIndex handles the add index reorganization state for a table. +func (w *worker) addTableIndex(t table.Table, idx *model.IndexInfo, reorgInfo *reorgInfo) error { + return w.addPhysicalTableIndex(t.(table.PhysicalTable), idx, reorgInfo) +} + +func allocateIndexID(tblInfo *model.TableInfo) int64 { + tblInfo.MaxIndexID++ + return tblInfo.MaxIndexID +} + +// recordIterFunc is used for low-level record iteration. +type recordIterFunc func(h int64, rowKey kv.Key, rawRecord []byte) (more bool, err error) + +func iterateSnapshotRows(store kv.Storage, priority int, t table.Table, version uint64, startHandle int64, endHandle int64, endIncluded bool, fn recordIterFunc) error { + ver := kv.Version{Ver: version} + + snap, err := store.GetSnapshot(ver) + if err != nil { + return errors.Trace(err) + } + firstKey := t.RecordKey(startHandle) + + // Calculate the exclusive upper bound + var upperBound kv.Key + if endIncluded { + if endHandle == math.MaxInt64 { + upperBound = t.RecordKey(endHandle).PrefixNext() + } else { + // PrefixNext is time costing. Try to avoid it if possible. + upperBound = t.RecordKey(endHandle + 1) + } + } else { + upperBound = t.RecordKey(endHandle) + } + + it, err := snap.Iter(firstKey, upperBound) + if err != nil { + return errors.Trace(err) + } + defer it.Close() + + for it.Valid() { + if !it.Key().HasPrefix(t.RecordPrefix()) { + break + } + + var handle int64 + handle, err = tablecodec.DecodeRowKey(it.Key()) + if err != nil { + return errors.Trace(err) + } + rk := t.RecordKey(handle) + + more, err := fn(handle, rk, it.Value()) + if !more || err != nil { + return errors.Trace(err) + } + + err = kv.NextUntil(it, util.RowKeyPrefixFilter(rk)) + if err != nil { + if kv.ErrNotExist.Equal(err) { + break + } + return errors.Trace(err) + } + } + + return nil +} diff --git a/ddl/index_change_test.go b/ddl/index_change_test.go new file mode 100644 index 0000000..344c5e1 --- /dev/null +++ b/ddl/index_change_test.go @@ -0,0 +1,432 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ddl + +import ( + "context" + + . "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/types" +) + +var _ = Suite(&testIndexChangeSuite{}) + +type testIndexChangeSuite struct { + store kv.Storage + dbInfo *model.DBInfo +} + +func (s *testIndexChangeSuite) SetUpSuite(c *C) { + s.store = testCreateStore(c, "test_index_change") + s.dbInfo = &model.DBInfo{ + Name: model.NewCIStr("test_index_change"), + ID: 1, + } + err := kv.RunInNewTxn(s.store, true, func(txn kv.Transaction) error { + t := meta.NewMeta(txn) + return errors.Trace(t.CreateDatabase(s.dbInfo)) + }) + c.Check(err, IsNil, Commentf("err %v", errors.ErrorStack(err))) +} + +func (s *testIndexChangeSuite) TearDownSuite(c *C) { + s.store.Close() +} + +func (s *testIndexChangeSuite) TestIndexChange(c *C) { + d := newDDL( + context.Background(), + WithStore(s.store), + WithLease(testLease), + ) + defer d.Stop() + // create table t (c1 int primary key, c2 int); + tblInfo := testTableInfo(c, d, "t", 2) + tblInfo.Columns[0].Flag = mysql.PriKeyFlag | mysql.NotNullFlag + tblInfo.PKIsHandle = true + ctx := testNewContext(d) + err := ctx.NewTxn(context.Background()) + c.Assert(err, IsNil) + testCreateTable(c, ctx, d, s.dbInfo, tblInfo) + originTable := testGetTable(c, d, s.dbInfo.ID, tblInfo.ID) + + // insert t values (1, 1), (2, 2), (3, 3) + _, err = originTable.AddRecord(ctx, types.MakeDatums(1, 1)) + c.Assert(err, IsNil) + _, err = originTable.AddRecord(ctx, types.MakeDatums(2, 2)) + c.Assert(err, IsNil) + _, err = originTable.AddRecord(ctx, types.MakeDatums(3, 3)) + c.Assert(err, IsNil) + + txn, err := ctx.Txn(true) + c.Assert(err, IsNil) + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + + tc := &TestDDLCallback{} + // set up hook + prevState := model.StateNone + var ( + deleteOnlyTable table.Table + writeOnlyTable table.Table + publicTable table.Table + checkErr error + ) + tc.onJobUpdated = func(job *model.Job) { + if job.SchemaState == prevState { + return + } + ctx1 := testNewContext(d) + prevState = job.SchemaState + switch job.SchemaState { + case model.StateDeleteOnly: + deleteOnlyTable, err = getCurrentTable(d, s.dbInfo.ID, tblInfo.ID) + if err != nil { + checkErr = errors.Trace(err) + } + case model.StateWriteOnly: + writeOnlyTable, err = getCurrentTable(d, s.dbInfo.ID, tblInfo.ID) + if err != nil { + checkErr = errors.Trace(err) + } + err = s.checkAddWriteOnly(d, ctx1, deleteOnlyTable, writeOnlyTable) + if err != nil { + checkErr = errors.Trace(err) + } + case model.StatePublic: + if job.GetRowCount() != 3 { + checkErr = errors.Errorf("job's row count %d != 3", job.GetRowCount()) + } + publicTable, err = getCurrentTable(d, s.dbInfo.ID, tblInfo.ID) + if err != nil { + checkErr = errors.Trace(err) + } + err = s.checkAddPublic(d, ctx1, writeOnlyTable, publicTable) + if err != nil { + checkErr = errors.Trace(err) + } + } + } + d.SetHook(tc) + testCreateIndex(c, ctx, d, s.dbInfo, originTable.Meta(), false, "c2", "c2") + c.Check(errors.ErrorStack(checkErr), Equals, "") + txn, err = ctx.Txn(true) + c.Assert(err, IsNil) + c.Assert(txn.Commit(context.Background()), IsNil) + d.Stop() + prevState = model.StateNone + var noneTable table.Table + tc.onJobUpdated = func(job *model.Job) { + if job.SchemaState == prevState { + return + } + prevState = job.SchemaState + var err error + ctx1 := testNewContext(d) + switch job.SchemaState { + case model.StateWriteOnly: + writeOnlyTable, err = getCurrentTable(d, s.dbInfo.ID, tblInfo.ID) + if err != nil { + checkErr = errors.Trace(err) + } + err = s.checkDropWriteOnly(d, ctx1, publicTable, writeOnlyTable) + if err != nil { + checkErr = errors.Trace(err) + } + case model.StateDeleteOnly: + deleteOnlyTable, err = getCurrentTable(d, s.dbInfo.ID, tblInfo.ID) + if err != nil { + checkErr = errors.Trace(err) + } + err = s.checkDropDeleteOnly(d, ctx1, writeOnlyTable, deleteOnlyTable) + if err != nil { + checkErr = errors.Trace(err) + } + case model.StateNone: + noneTable, err = getCurrentTable(d, s.dbInfo.ID, tblInfo.ID) + if err != nil { + checkErr = errors.Trace(err) + } + if len(noneTable.Indices()) != 0 { + checkErr = errors.New("index should have been dropped") + } + } + } + d.start(context.Background(), nil) + testDropIndex(c, ctx, d, s.dbInfo, publicTable.Meta(), "c2") + c.Check(errors.ErrorStack(checkErr), Equals, "") +} + +func checkIndexExists(ctx sessionctx.Context, tbl table.Table, indexValue interface{}, handle int64, exists bool) error { + idx := tbl.Indices()[0] + txn, err := ctx.Txn(true) + if err != nil { + return errors.Trace(err) + } + doesExist, _, err := idx.Exist(ctx.GetSessionVars().StmtCtx, txn, types.MakeDatums(indexValue), handle) + if err != nil { + return errors.Trace(err) + } + if exists != doesExist { + if exists { + return errors.New("index should exists") + } + return errors.New("index should not exists") + } + return nil +} + +func (s *testIndexChangeSuite) checkAddWriteOnly(d *ddl, ctx sessionctx.Context, delOnlyTbl, writeOnlyTbl table.Table) error { + // DeleteOnlyTable: insert t values (4, 4); + err := ctx.NewTxn(context.Background()) + if err != nil { + return errors.Trace(err) + } + _, err = delOnlyTbl.AddRecord(ctx, types.MakeDatums(4, 4)) + if err != nil { + return errors.Trace(err) + } + err = checkIndexExists(ctx, writeOnlyTbl, 4, 4, false) + if err != nil { + return errors.Trace(err) + } + + // WriteOnlyTable: insert t values (5, 5); + _, err = writeOnlyTbl.AddRecord(ctx, types.MakeDatums(5, 5)) + if err != nil { + return errors.Trace(err) + } + err = checkIndexExists(ctx, writeOnlyTbl, 5, 5, true) + if err != nil { + return errors.Trace(err) + } + + // WriteOnlyTable: update t set c2 = 1 where c1 = 4 and c2 = 4 + err = writeOnlyTbl.UpdateRecord(ctx, 4, types.MakeDatums(4, 4), types.MakeDatums(4, 1), touchedSlice(writeOnlyTbl)) + if err != nil { + return errors.Trace(err) + } + err = checkIndexExists(ctx, writeOnlyTbl, 1, 4, true) + if err != nil { + return errors.Trace(err) + } + + // DeleteOnlyTable: update t set c2 = 3 where c1 = 4 and c2 = 1 + err = delOnlyTbl.UpdateRecord(ctx, 4, types.MakeDatums(4, 1), types.MakeDatums(4, 3), touchedSlice(writeOnlyTbl)) + if err != nil { + return errors.Trace(err) + } + // old value index not exists. + err = checkIndexExists(ctx, writeOnlyTbl, 1, 4, false) + if err != nil { + return errors.Trace(err) + } + // new value index not exists. + err = checkIndexExists(ctx, writeOnlyTbl, 3, 4, false) + if err != nil { + return errors.Trace(err) + } + + // WriteOnlyTable: delete t where c1 = 4 and c2 = 3 + err = writeOnlyTbl.RemoveRecord(ctx, 4, types.MakeDatums(4, 3)) + if err != nil { + return errors.Trace(err) + } + err = checkIndexExists(ctx, writeOnlyTbl, 3, 4, false) + if err != nil { + return errors.Trace(err) + } + + // DeleteOnlyTable: delete t where c1 = 5 + err = delOnlyTbl.RemoveRecord(ctx, 5, types.MakeDatums(5, 5)) + if err != nil { + return errors.Trace(err) + } + err = checkIndexExists(ctx, writeOnlyTbl, 5, 5, false) + if err != nil { + return errors.Trace(err) + } + return nil +} + +func (s *testIndexChangeSuite) checkAddPublic(d *ddl, ctx sessionctx.Context, writeTbl, publicTbl table.Table) error { + // WriteOnlyTable: insert t values (6, 6) + err := ctx.NewTxn(context.Background()) + if err != nil { + return errors.Trace(err) + } + _, err = writeTbl.AddRecord(ctx, types.MakeDatums(6, 6)) + if err != nil { + return errors.Trace(err) + } + err = checkIndexExists(ctx, publicTbl, 6, 6, true) + if err != nil { + return errors.Trace(err) + } + // PublicTable: insert t values (7, 7) + _, err = publicTbl.AddRecord(ctx, types.MakeDatums(7, 7)) + if err != nil { + return errors.Trace(err) + } + err = checkIndexExists(ctx, publicTbl, 7, 7, true) + if err != nil { + return errors.Trace(err) + } + + // WriteOnlyTable: update t set c2 = 5 where c1 = 7 and c2 = 7 + err = writeTbl.UpdateRecord(ctx, 7, types.MakeDatums(7, 7), types.MakeDatums(7, 5), touchedSlice(writeTbl)) + if err != nil { + return errors.Trace(err) + } + err = checkIndexExists(ctx, publicTbl, 5, 7, true) + if err != nil { + return errors.Trace(err) + } + err = checkIndexExists(ctx, publicTbl, 7, 7, false) + if err != nil { + return errors.Trace(err) + } + // WriteOnlyTable: delete t where c1 = 6 + err = writeTbl.RemoveRecord(ctx, 6, types.MakeDatums(6, 6)) + if err != nil { + return errors.Trace(err) + } + err = checkIndexExists(ctx, publicTbl, 6, 6, false) + if err != nil { + return errors.Trace(err) + } + + var rows [][]types.Datum + publicTbl.IterRecords(ctx, publicTbl.FirstKey(), publicTbl.Cols(), + func(h int64, data []types.Datum, cols []*table.Column) (bool, error) { + rows = append(rows, data) + return true, nil + }) + if len(rows) == 0 { + return errors.New("table is empty") + } + for _, row := range rows { + idxVal := row[1].GetInt64() + handle := row[0].GetInt64() + err = checkIndexExists(ctx, publicTbl, idxVal, handle, true) + if err != nil { + return errors.Trace(err) + } + } + txn, err := ctx.Txn(true) + if err != nil { + return errors.Trace(err) + } + return txn.Commit(context.Background()) +} + +func (s *testIndexChangeSuite) checkDropWriteOnly(d *ddl, ctx sessionctx.Context, publicTbl, writeTbl table.Table) error { + // WriteOnlyTable insert t values (8, 8) + err := ctx.NewTxn(context.Background()) + if err != nil { + return errors.Trace(err) + } + _, err = writeTbl.AddRecord(ctx, types.MakeDatums(8, 8)) + if err != nil { + return errors.Trace(err) + } + + err = checkIndexExists(ctx, publicTbl, 8, 8, true) + if err != nil { + return errors.Trace(err) + } + + // WriteOnlyTable update t set c2 = 7 where c1 = 8 and c2 = 8 + err = writeTbl.UpdateRecord(ctx, 8, types.MakeDatums(8, 8), types.MakeDatums(8, 7), touchedSlice(writeTbl)) + if err != nil { + return errors.Trace(err) + } + + err = checkIndexExists(ctx, publicTbl, 7, 8, true) + if err != nil { + return errors.Trace(err) + } + + // WriteOnlyTable delete t where c1 = 8 + err = writeTbl.RemoveRecord(ctx, 8, types.MakeDatums(8, 7)) + if err != nil { + return errors.Trace(err) + } + + err = checkIndexExists(ctx, publicTbl, 7, 8, false) + if err != nil { + return errors.Trace(err) + } + txn, err := ctx.Txn(true) + if err != nil { + return errors.Trace(err) + } + return txn.Commit(context.Background()) +} + +func (s *testIndexChangeSuite) checkDropDeleteOnly(d *ddl, ctx sessionctx.Context, writeTbl, delTbl table.Table) error { + // WriteOnlyTable insert t values (9, 9) + err := ctx.NewTxn(context.Background()) + if err != nil { + return errors.Trace(err) + } + _, err = writeTbl.AddRecord(ctx, types.MakeDatums(9, 9)) + if err != nil { + return errors.Trace(err) + } + + err = checkIndexExists(ctx, writeTbl, 9, 9, true) + if err != nil { + return errors.Trace(err) + } + + // DeleteOnlyTable insert t values (10, 10) + _, err = delTbl.AddRecord(ctx, types.MakeDatums(10, 10)) + if err != nil { + return errors.Trace(err) + } + + err = checkIndexExists(ctx, writeTbl, 10, 10, false) + if err != nil { + return errors.Trace(err) + } + + // DeleteOnlyTable update t set c2 = 10 where c1 = 9 + err = delTbl.UpdateRecord(ctx, 9, types.MakeDatums(9, 9), types.MakeDatums(9, 10), touchedSlice(delTbl)) + if err != nil { + return errors.Trace(err) + } + + err = checkIndexExists(ctx, writeTbl, 9, 9, false) + if err != nil { + return errors.Trace(err) + } + + err = checkIndexExists(ctx, writeTbl, 10, 9, false) + if err != nil { + return errors.Trace(err) + } + txn, err := ctx.Txn(true) + if err != nil { + return errors.Trace(err) + } + return txn.Commit(context.Background()) +} diff --git a/ddl/mock.go b/ddl/mock.go new file mode 100644 index 0000000..c6924b6 --- /dev/null +++ b/ddl/mock.go @@ -0,0 +1,121 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://wwm.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ddl + +import ( + "context" + "sync/atomic" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/ddl/util" + "go.etcd.io/etcd/clientv3" +) + +var _ util.SchemaSyncer = &MockSchemaSyncer{} + +const mockCheckVersInterval = 2 * time.Millisecond + +// MockSchemaSyncer is a mock schema syncer, it is exported for tesing. +type MockSchemaSyncer struct { + selfSchemaVersion int64 + globalVerCh chan clientv3.WatchResponse + mockSession chan struct{} +} + +// NewMockSchemaSyncer creates a new mock SchemaSyncer. +func NewMockSchemaSyncer() util.SchemaSyncer { + return &MockSchemaSyncer{} +} + +// Init implements SchemaSyncer.Init interface. +func (s *MockSchemaSyncer) Init(ctx context.Context) error { + s.globalVerCh = make(chan clientv3.WatchResponse, 1) + s.mockSession = make(chan struct{}, 1) + return nil +} + +// GlobalVersionCh implements SchemaSyncer.GlobalVersionCh interface. +func (s *MockSchemaSyncer) GlobalVersionCh() clientv3.WatchChan { + return s.globalVerCh +} + +// WatchGlobalSchemaVer implements SchemaSyncer.WatchGlobalSchemaVer interface. +func (s *MockSchemaSyncer) WatchGlobalSchemaVer(context.Context) {} + +// UpdateSelfVersion implements SchemaSyncer.UpdateSelfVersion interface. +func (s *MockSchemaSyncer) UpdateSelfVersion(ctx context.Context, version int64) error { + atomic.StoreInt64(&s.selfSchemaVersion, version) + return nil +} + +// Done implements SchemaSyncer.Done interface. +func (s *MockSchemaSyncer) Done() <-chan struct{} { + return s.mockSession +} + +// CloseSession mockSession, it is exported for testing. +func (s *MockSchemaSyncer) CloseSession() { + close(s.mockSession) +} + +// Restart implements SchemaSyncer.Restart interface. +func (s *MockSchemaSyncer) Restart(_ context.Context) error { + s.mockSession = make(chan struct{}, 1) + return nil +} + +// RemoveSelfVersionPath implements SchemaSyncer.RemoveSelfVersionPath interface. +func (s *MockSchemaSyncer) RemoveSelfVersionPath() error { return nil } + +// OwnerUpdateGlobalVersion implements SchemaSyncer.OwnerUpdateGlobalVersion interface. +func (s *MockSchemaSyncer) OwnerUpdateGlobalVersion(ctx context.Context, version int64) error { + select { + case s.globalVerCh <- clientv3.WatchResponse{}: + default: + } + return nil +} + +// MustGetGlobalVersion implements SchemaSyncer.MustGetGlobalVersion interface. +func (s *MockSchemaSyncer) MustGetGlobalVersion(ctx context.Context) (int64, error) { + return 0, nil +} + +// OwnerCheckAllVersions implements SchemaSyncer.OwnerCheckAllVersions interface. +func (s *MockSchemaSyncer) OwnerCheckAllVersions(ctx context.Context, latestVer int64) error { + ticker := time.NewTicker(mockCheckVersInterval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return errors.Trace(ctx.Err()) + case <-ticker.C: + ver := atomic.LoadInt64(&s.selfSchemaVersion) + if ver == latestVer { + return nil + } + } + } +} + +// NotifyCleanExpiredPaths implements SchemaSyncer.NotifyCleanExpiredPaths interface. +func (s *MockSchemaSyncer) NotifyCleanExpiredPaths() bool { return true } + +// StartCleanWork implements SchemaSyncer.StartCleanWork interface. +func (s *MockSchemaSyncer) StartCleanWork() {} + +// CloseCleanWork implements SchemaSyncer.CloseCleanWork interface. +func (s *MockSchemaSyncer) CloseCleanWork() {} diff --git a/ddl/options.go b/ddl/options.go new file mode 100644 index 0000000..dc712c7 --- /dev/null +++ b/ddl/options.go @@ -0,0 +1,78 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://wwm.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ddl + +import ( + "time" + + "github.com/ngaut/pools" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" + "go.etcd.io/etcd/clientv3" +) + +// Option represents an option to initialize the DDL module +type Option func(*Options) + +// Options represents all the options of the DDL module needs +type Options struct { + EtcdCli *clientv3.Client + Store kv.Storage + InfoHandle *infoschema.Handle + Hook Callback + Lease time.Duration + ResourcePool *pools.ResourcePool +} + +// WithEtcdClient specifies the `clientv3.Client` of DDL used to request the etcd service +func WithEtcdClient(client *clientv3.Client) Option { + return func(options *Options) { + options.EtcdCli = client + } +} + +// WithStore specifies the `kv.Storage` of DDL used to request the KV service +func WithStore(store kv.Storage) Option { + return func(options *Options) { + options.Store = store + } +} + +// WithInfoHandle specifies the `infoschema.Handle` +func WithInfoHandle(ih *infoschema.Handle) Option { + return func(options *Options) { + options.InfoHandle = ih + } +} + +// WithHook specifies the `Callback` of DDL used to notify the outer module when events are triggered +func WithHook(callback Callback) Option { + return func(options *Options) { + options.Hook = callback + } +} + +// WithLease specifies the schema lease duration +func WithLease(lease time.Duration) Option { + return func(options *Options) { + options.Lease = lease + } +} + +// WithResourcePool specifies the `pools.ResourcePool` of DDL used +func WithResourcePool(pools *pools.ResourcePool) Option { + return func(options *Options) { + options.ResourcePool = pools + } +} diff --git a/ddl/options_test.go b/ddl/options_test.go new file mode 100644 index 0000000..14a3785 --- /dev/null +++ b/ddl/options_test.go @@ -0,0 +1,60 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ddl_test + +import ( + "time" + + "github.com/ngaut/pools" + . "github.com/pingcap/check" + "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/util/mock" + "go.etcd.io/etcd/clientv3" +) + +type ddlOptionsSuite struct{} + +var _ = Suite(&ddlOptionsSuite{}) + +func (s *ddlOptionsSuite) TestOptions(c *C) { + client, err := clientv3.NewFromURL("test") + c.Assert(err, IsNil) + callback := &ddl.BaseCallback{} + lease := time.Second * 3 + store := &mock.Store{} + infoHandle := infoschema.NewHandle(store) + pools := &pools.ResourcePool{} + + options := []ddl.Option{ + ddl.WithEtcdClient(client), + ddl.WithHook(callback), + ddl.WithLease(lease), + ddl.WithStore(store), + ddl.WithInfoHandle(infoHandle), + ddl.WithResourcePool(pools), + } + + opt := &ddl.Options{} + for _, o := range options { + o(opt) + } + + c.Assert(opt.EtcdCli, Equals, client) + c.Assert(opt.Hook, Equals, callback) + c.Assert(opt.Lease, Equals, lease) + c.Assert(opt.Store, Equals, store) + c.Assert(opt.InfoHandle, Equals, infoHandle) + c.Assert(opt.ResourcePool, Equals, pools) +} diff --git a/ddl/reorg.go b/ddl/reorg.go new file mode 100644 index 0000000..76dd67c --- /dev/null +++ b/ddl/reorg.go @@ -0,0 +1,391 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ddl + +import ( + "context" + "fmt" + "math" + "strconv" + "sync/atomic" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/distsql" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/mock" + "github.com/pingcap/tidb/util/ranger" + "github.com/pingcap/tipb/go-tipb" + "go.uber.org/zap" +) + +// reorgCtx is for reorganization. +type reorgCtx struct { + // doneCh is used to notify. + // If the reorganization job is done, we will use this channel to notify outer. + // TODO: Now we use goroutine to simulate reorganization jobs, later we may + // use a persistent job list. + doneCh chan error + // rowCount is used to simulate a job's row count. + rowCount int64 + // notifyCancelReorgJob is used to notify the backfilling goroutine if the DDL job is cancelled. + // 0: job is not canceled. + // 1: job is canceled. + notifyCancelReorgJob int32 + // doneHandle is used to simulate the handle that has been processed. + doneHandle int64 +} + +// newContext gets a context. It is only used for adding column in reorganization state. +func newContext(store kv.Storage) sessionctx.Context { + c := mock.NewContext() + c.Store = store + c.GetSessionVars().SetStatusFlag(mysql.ServerStatusAutocommit, false) + c.GetSessionVars().StmtCtx.TimeZone = time.UTC + return c +} + +const defaultWaitReorgTimeout = 10 * time.Second + +// ReorgWaitTimeout is the timeout that wait ddl in write reorganization stage. +var ReorgWaitTimeout = 5 * time.Second + +func (rc *reorgCtx) notifyReorgCancel() { + atomic.StoreInt32(&rc.notifyCancelReorgJob, 1) +} + +func (rc *reorgCtx) cleanNotifyReorgCancel() { + atomic.StoreInt32(&rc.notifyCancelReorgJob, 0) +} + +func (rc *reorgCtx) isReorgCanceled() bool { + return atomic.LoadInt32(&rc.notifyCancelReorgJob) == 1 +} + +func (rc *reorgCtx) setRowCount(count int64) { + atomic.StoreInt64(&rc.rowCount, count) +} + +func (rc *reorgCtx) setNextHandle(doneHandle int64) { + atomic.StoreInt64(&rc.doneHandle, doneHandle) +} + +func (rc *reorgCtx) increaseRowCount(count int64) { + atomic.AddInt64(&rc.rowCount, count) +} + +func (rc *reorgCtx) getRowCountAndHandle() (int64, int64) { + row := atomic.LoadInt64(&rc.rowCount) + handle := atomic.LoadInt64(&rc.doneHandle) + return row, handle +} + +func (rc *reorgCtx) clean() { + rc.setRowCount(0) + rc.setNextHandle(0) + rc.doneCh = nil +} + +func (w *worker) runReorgJob(t *meta.Meta, reorgInfo *reorgInfo, lease time.Duration, f func() error) error { + job := reorgInfo.Job + if w.reorgCtx.doneCh == nil { + // start a reorganization job + w.wg.Add(1) + w.reorgCtx.doneCh = make(chan error, 1) + // initial reorgCtx + w.reorgCtx.setRowCount(job.GetRowCount()) + w.reorgCtx.setNextHandle(reorgInfo.StartHandle) + go func() { + defer w.wg.Done() + w.reorgCtx.doneCh <- f() + }() + } + + waitTimeout := defaultWaitReorgTimeout + // if lease is 0, we are using a local storage, + // and we can wait the reorganization to be done here. + // if lease > 0, we don't need to wait here because + // we should update some job's progress context and try checking again, + // so we use a very little timeout here. + if lease > 0 { + waitTimeout = ReorgWaitTimeout + } + + // wait reorganization job done or timeout + select { + case err := <-w.reorgCtx.doneCh: + rowCount, _ := w.reorgCtx.getRowCountAndHandle() + logutil.BgLogger().Info("[ddl] run reorg job done", zap.Int64("handled rows", rowCount)) + // Update a job's RowCount. + job.SetRowCount(rowCount) + w.reorgCtx.clean() + return errors.Trace(err) + case <-w.quitCh: + logutil.BgLogger().Info("[ddl] run reorg job quit") + w.reorgCtx.setNextHandle(0) + w.reorgCtx.setRowCount(0) + // We return errWaitReorgTimeout here too, so that outer loop will break. + return errWaitReorgTimeout + case <-time.After(waitTimeout): + rowCount, doneHandle := w.reorgCtx.getRowCountAndHandle() + // Update a job's RowCount. + job.SetRowCount(rowCount) + // Update a reorgInfo's handle. + err := t.UpdateDDLReorgStartHandle(job, doneHandle) + logutil.BgLogger().Info("[ddl] run reorg job wait timeout", zap.Duration("waitTime", waitTimeout), + zap.Int64("totalAddedRowCount", rowCount), zap.Int64("doneHandle", doneHandle), zap.Error(err)) + // If timeout, we will return, check the owner and retry to wait job done again. + return errWaitReorgTimeout + } +} + +func (w *worker) isReorgRunnable(d *ddlCtx) error { + if isChanClosed(w.quitCh) { + // Worker is closed. So it can't do the reorganizational job. + return errInvalidWorker.GenWithStack("worker is closed") + } + + if w.reorgCtx.isReorgCanceled() { + // Job is cancelled. So it can't be done. + return errCancelledDDLJob + } + + if !d.isOwner() { + // If it's not the owner, we will try later, so here just returns an error. + logutil.BgLogger().Info("[ddl] DDL worker is not the DDL owner", zap.String("ID", d.uuid)) + return errors.Trace(errNotOwner) + } + return nil +} + +type reorgInfo struct { + *model.Job + + // StartHandle is the first handle of the adding indices table. + StartHandle int64 + // EndHandle is the last handle of the adding indices table. + EndHandle int64 + d *ddlCtx + first bool + // PhysicalTableID is used for partitioned table. + // DDL reorganize for a partitioned table will handle partitions one by one, + // PhysicalTableID is used to trace the current partition we are handling. + // If the table is not partitioned, PhysicalTableID would be TableID. + PhysicalTableID int64 +} + +func (r *reorgInfo) String() string { + return "StartHandle:" + strconv.FormatInt(r.StartHandle, 10) + "," + + "EndHandle:" + strconv.FormatInt(r.EndHandle, 10) + "," + + "first:" + strconv.FormatBool(r.first) + "," + + "PhysicalTableID:" + strconv.FormatInt(r.PhysicalTableID, 10) +} + +func constructDescTableScanPB(physicalTableID int64, pbColumnInfos []*tipb.ColumnInfo) *tipb.Executor { + tblScan := &tipb.TableScan{ + TableId: physicalTableID, + Columns: pbColumnInfos, + Desc: true, + } + + return &tipb.Executor{Tp: tipb.ExecType_TypeTableScan, TblScan: tblScan} +} + +func constructLimitPB(count uint64) *tipb.Executor { + limitExec := &tipb.Limit{ + Limit: count, + } + return &tipb.Executor{Tp: tipb.ExecType_TypeLimit, Limit: limitExec} +} + +func buildDescTableScanDAG(ctx sessionctx.Context, tbl table.PhysicalTable, columns []*model.ColumnInfo, limit uint64) (*tipb.DAGRequest, error) { + dagReq := &tipb.DAGRequest{} + _, timeZoneOffset := time.Now().In(time.UTC).Zone() + dagReq.TimeZoneOffset = int64(timeZoneOffset) + for i := range columns { + dagReq.OutputOffsets = append(dagReq.OutputOffsets, uint32(i)) + } + dagReq.Flags |= model.FlagInSelectStmt + + pbColumnInfos := model.ColumnsToProto(columns, tbl.Meta().PKIsHandle) + tblScanExec := constructDescTableScanPB(tbl.GetPhysicalID(), pbColumnInfos) + dagReq.Executors = append(dagReq.Executors, tblScanExec) + dagReq.Executors = append(dagReq.Executors, constructLimitPB(limit)) + return dagReq, nil +} + +func getColumnsTypes(columns []*model.ColumnInfo) []*types.FieldType { + colTypes := make([]*types.FieldType, 0, len(columns)) + for _, col := range columns { + colTypes = append(colTypes, &col.FieldType) + } + return colTypes +} + +// buildDescTableScan builds a desc table scan upon tblInfo. +func (d *ddlCtx) buildDescTableScan(ctx context.Context, startTS uint64, tbl table.PhysicalTable, columns []*model.ColumnInfo, limit uint64) (distsql.SelectResult, error) { + sctx := newContext(d.store) + dagPB, err := buildDescTableScanDAG(sctx, tbl, columns, limit) + if err != nil { + return nil, errors.Trace(err) + } + ranges := ranger.FullIntRange(false) + var builder distsql.RequestBuilder + builder.SetTableRanges(tbl.GetPhysicalID(), ranges). + SetDAGRequest(dagPB). + SetStartTS(startTS). + SetKeepOrder(true). + SetConcurrency(1).SetDesc(true) + + builder.Request.NotFillCache = true + + kvReq, err := builder.Build() + if err != nil { + return nil, errors.Trace(err) + } + + return distsql.Select(ctx, sctx, kvReq, getColumnsTypes(columns)) +} + +// GetTableMaxRowID gets the last row id of the table partition. +func (d *ddlCtx) GetTableMaxRowID(startTS uint64, tbl table.PhysicalTable) (maxRowID int64, emptyTable bool, err error) { + maxRowID = int64(math.MaxInt64) + var columns []*model.ColumnInfo + if tbl.Meta().PKIsHandle { + for _, col := range tbl.Meta().Columns { + if mysql.HasPriKeyFlag(col.Flag) { + columns = []*model.ColumnInfo{col} + break + } + } + } else { + columns = []*model.ColumnInfo{model.NewExtraHandleColInfo()} + } + + ctx := context.Background() + // build a desc scan of tblInfo, which limit is 1, we can use it to retrieve the last handle of the table. + result, err := d.buildDescTableScan(ctx, startTS, tbl, columns, 1) + if err != nil { + return maxRowID, false, errors.Trace(err) + } + defer terror.Call(result.Close) + + chk := chunk.New(getColumnsTypes(columns), 1, 1) + err = result.Next(ctx, chk) + if err != nil { + return maxRowID, false, errors.Trace(err) + } + + if chk.NumRows() == 0 { + // empty table + return maxRowID, true, nil + } + row := chk.GetRow(0) + maxRowID = row.GetInt64(0) + return maxRowID, false, nil +} + +// getTableRange gets the start and end handle of a table (or partition). +func getTableRange(d *ddlCtx, tbl table.PhysicalTable, snapshotVer uint64, priority int) (startHandle, endHandle int64, err error) { + startHandle = math.MinInt64 + endHandle = math.MaxInt64 + // Get the start handle of this partition. + err = iterateSnapshotRows(d.store, priority, tbl, snapshotVer, math.MinInt64, math.MaxInt64, true, + func(h int64, rowKey kv.Key, rawRecord []byte) (bool, error) { + startHandle = h + return false, nil + }) + if err != nil { + return 0, 0, errors.Trace(err) + } + var emptyTable bool + // Get the end handle of this partition. + endHandle, emptyTable, err = d.GetTableMaxRowID(snapshotVer, tbl) + if err != nil { + return 0, 0, errors.Trace(err) + } + if endHandle < startHandle || emptyTable { + logutil.BgLogger().Info("[ddl] get table range, endHandle < startHandle", zap.String("table", fmt.Sprintf("%v", tbl.Meta())), + zap.Int64("partitionID", tbl.GetPhysicalID()), zap.Int64("endHandle", endHandle), zap.Int64("startHandle", startHandle)) + endHandle = startHandle + } + return +} + +func getReorgInfo(d *ddlCtx, t *meta.Meta, job *model.Job, tbl table.Table) (*reorgInfo, error) { + var ( + err error + start int64 + end int64 + pid int64 + info reorgInfo + ) + + if job.SnapshotVer == 0 { + info.first = true + // get the current version for reorganization if we don't have + var ver kv.Version + ver, err = d.store.CurrentVersion() + if err != nil { + return nil, errors.Trace(err) + } else if ver.Ver <= 0 { + return nil, errInvalidStoreVer.GenWithStack("invalid storage current version %d", ver.Ver) + } + tblInfo := tbl.Meta() + pid = tblInfo.ID + tb := tbl.(table.PhysicalTable) + start, end, err = getTableRange(d, tb, ver.Ver, job.Priority) + if err != nil { + return nil, errors.Trace(err) + } + logutil.BgLogger().Info("[ddl] job get table range", zap.Int64("jobID", job.ID), zap.Int64("physicalTableID", pid), zap.Int64("startHandle", start), zap.Int64("endHandle", end)) + + failpoint.Inject("errorUpdateReorgHandle", func() (*reorgInfo, error) { + return &info, errors.New("occur an error when update reorg handle") + }) + err = t.UpdateDDLReorgHandle(job, start, end, pid) + if err != nil { + return &info, errors.Trace(err) + } + // Update info should after data persistent. + job.SnapshotVer = ver.Ver + } else { + start, end, pid, err = t.GetDDLReorgHandle(job) + if err != nil { + return nil, errors.Trace(err) + } + } + info.Job = job + info.d = d + info.StartHandle = start + info.EndHandle = end + info.PhysicalTableID = pid + + return &info, errors.Trace(err) +} + +func (r *reorgInfo) UpdateReorgMeta(txn kv.Transaction, startHandle, endHandle, physicalTableID int64) error { + t := meta.NewMeta(txn) + return errors.Trace(t.UpdateDDLReorgHandle(r.Job, startHandle, endHandle, physicalTableID)) +} diff --git a/ddl/reorg_test.go b/ddl/reorg_test.go new file mode 100644 index 0000000..e39ab6b --- /dev/null +++ b/ddl/reorg_test.go @@ -0,0 +1,221 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ddl + +import ( + "context" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/types" +) + +type testCtxKeyType int + +func (k testCtxKeyType) String() string { + return "test_ctx_key" +} + +const testCtxKey testCtxKeyType = 0 + +func (s *testDDLSuite) TestReorg(c *C) { + store := testCreateStore(c, "test_reorg") + defer store.Close() + + d := newDDL( + context.Background(), + WithStore(store), + WithLease(testLease), + ) + defer d.Stop() + + time.Sleep(testLease) + + ctx := testNewContext(d) + + ctx.SetValue(testCtxKey, 1) + c.Assert(ctx.Value(testCtxKey), Equals, 1) + ctx.ClearValue(testCtxKey) + + err := ctx.NewTxn(context.Background()) + c.Assert(err, IsNil) + txn, err := ctx.Txn(true) + c.Assert(err, IsNil) + err = txn.Set([]byte("a"), []byte("b")) + c.Assert(err, IsNil) + err = txn.Rollback() + c.Assert(err, IsNil) + + err = ctx.NewTxn(context.Background()) + c.Assert(err, IsNil) + txn, err = ctx.Txn(true) + c.Assert(err, IsNil) + err = txn.Set([]byte("a"), []byte("b")) + c.Assert(err, IsNil) + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + + rowCount := int64(10) + handle := int64(100) + f := func() error { + d.generalWorker().reorgCtx.setRowCount(rowCount) + d.generalWorker().reorgCtx.setNextHandle(handle) + time.Sleep(1*ReorgWaitTimeout + 100*time.Millisecond) + return nil + } + job := &model.Job{ + ID: 1, + SnapshotVer: 1, // Make sure it is not zero. So the reorgInfo's first is false. + } + err = ctx.NewTxn(context.Background()) + c.Assert(err, IsNil) + txn, err = ctx.Txn(true) + c.Assert(err, IsNil) + m := meta.NewMeta(txn) + rInfo := &reorgInfo{ + Job: job, + } + err = d.generalWorker().runReorgJob(m, rInfo, d.lease, f) + c.Assert(err, NotNil) + + // The longest to wait for 5 seconds to make sure the function of f is returned. + for i := 0; i < 1000; i++ { + time.Sleep(5 * time.Millisecond) + err = d.generalWorker().runReorgJob(m, rInfo, d.lease, f) + if err == nil { + c.Assert(job.RowCount, Equals, rowCount) + c.Assert(d.generalWorker().reorgCtx.rowCount, Equals, int64(0)) + + // Test whether reorgInfo's Handle is update. + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + err = ctx.NewTxn(context.Background()) + c.Assert(err, IsNil) + + m = meta.NewMeta(txn) + info, err1 := getReorgInfo(d.ddlCtx, m, job, nil) + c.Assert(err1, IsNil) + c.Assert(info.StartHandle, Equals, handle) + c.Assert(d.generalWorker().reorgCtx.doneHandle, Equals, int64(0)) + break + } + } + c.Assert(err, IsNil) + + d.Stop() + err = d.generalWorker().runReorgJob(m, rInfo, d.lease, func() error { + time.Sleep(4 * testLease) + return nil + }) + c.Assert(err, NotNil) + txn, err = ctx.Txn(true) + c.Assert(err, IsNil) + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + + d.start(context.Background(), nil) + job = &model.Job{ + ID: 2, + SchemaID: 1, + Type: model.ActionCreateSchema, + Args: []interface{}{model.NewCIStr("test")}, + SnapshotVer: 1, // Make sure it is not zero. So the reorgInfo's first is false. + } + + var info *reorgInfo + err = kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { + t := meta.NewMeta(txn) + var err1 error + info, err1 = getReorgInfo(d.ddlCtx, t, job, nil) + c.Assert(err1, IsNil) + err1 = info.UpdateReorgMeta(txn, 1, 0, 0) + c.Assert(err1, IsNil) + return nil + }) + c.Assert(err, IsNil) + + err = kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { + t := meta.NewMeta(txn) + var err1 error + info, err1 = getReorgInfo(d.ddlCtx, t, job, nil) + c.Assert(err1, IsNil) + c.Assert(info.StartHandle, Greater, int64(0)) + return nil + }) + c.Assert(err, IsNil) +} + +func (s *testDDLSuite) TestReorgOwner(c *C) { + store := testCreateStore(c, "test_reorg_owner") + defer store.Close() + + d1 := newDDL( + context.Background(), + WithStore(store), + WithLease(testLease), + ) + defer d1.Stop() + + ctx := testNewContext(d1) + + testCheckOwner(c, d1, true) + + d2 := newDDL( + context.Background(), + WithStore(store), + WithLease(testLease), + ) + defer d2.Stop() + + dbInfo := testSchemaInfo(c, d1, "test") + testCreateSchema(c, ctx, d1, dbInfo) + + tblInfo := testTableInfo(c, d1, "t", 3) + testCreateTable(c, ctx, d1, dbInfo, tblInfo) + t := testGetTable(c, d1, dbInfo.ID, tblInfo.ID) + + num := 10 + for i := 0; i < num; i++ { + _, err := t.AddRecord(ctx, types.MakeDatums(i, i, i)) + c.Assert(err, IsNil) + } + + txn, err := ctx.Txn(true) + c.Assert(err, IsNil) + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + + tc := &TestDDLCallback{} + tc.onJobRunBefore = func(job *model.Job) { + if job.SchemaState == model.StateDeleteReorganization { + d1.Stop() + } + } + + d1.SetHook(tc) + + testDropSchema(c, ctx, d1, dbInfo) + + err = kv.RunInNewTxn(d1.store, false, func(txn kv.Transaction) error { + t := meta.NewMeta(txn) + db, err1 := t.GetDatabase(dbInfo.ID) + c.Assert(err1, IsNil) + c.Assert(db, IsNil) + return nil + }) + c.Assert(err, IsNil) +} diff --git a/ddl/rollingback.go b/ddl/rollingback.go new file mode 100644 index 0000000..a86ac34 --- /dev/null +++ b/ddl/rollingback.go @@ -0,0 +1,266 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ddl + +import ( + "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +func updateColsNull2NotNull(tblInfo *model.TableInfo, indexInfo *model.IndexInfo) error { + nullCols, err := getNullColInfos(tblInfo, indexInfo) + if err != nil { + return errors.Trace(err) + } + + for _, col := range nullCols { + col.Flag |= mysql.NotNullFlag + col.Flag = col.Flag &^ mysql.PreventNullInsertFlag + } + return nil +} + +func convertAddIdxJob2RollbackJob(t *meta.Meta, job *model.Job, tblInfo *model.TableInfo, indexInfo *model.IndexInfo, err error) (int64, error) { + job.State = model.JobStateRollingback + + if indexInfo.Primary { + nullCols, err := getNullColInfos(tblInfo, indexInfo) + if err != nil { + return 0, errors.Trace(err) + } + for _, col := range nullCols { + // Field PreventNullInsertFlag flag reset. + col.Flag = col.Flag &^ mysql.PreventNullInsertFlag + } + } + + job.Args = []interface{}{indexInfo.Name} + // If add index job rollbacks in write reorganization state, its need to delete all keys which has been added. + // Its work is the same as drop index job do. + // The write reorganization state in add index job that likes write only state in drop index job. + // So the next state is delete only state. + originalState := indexInfo.State + indexInfo.State = model.StateDeleteOnly + job.SchemaState = model.StateDeleteOnly + ver, err1 := updateVersionAndTableInfo(t, job, tblInfo, originalState != indexInfo.State) + if err1 != nil { + return ver, errors.Trace(err1) + } + + if kv.ErrKeyExists.Equal(err) { + return ver, kv.ErrKeyExists.GenWithStackByArgs("", indexInfo.Name.O) + } + + return ver, errors.Trace(err) +} + +// convertNotStartAddIdxJob2RollbackJob converts the add index job that are not started workers to rollingbackJob, +// to rollback add index operations. job.SnapshotVer == 0 indicates the workers are not started. +func convertNotStartAddIdxJob2RollbackJob(t *meta.Meta, job *model.Job, occuredErr error) (ver int64, err error) { + schemaID := job.SchemaID + tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID) + if err != nil { + return ver, errors.Trace(err) + } + + var ( + unique bool + indexName model.CIStr + idxColNames []*ast.IndexPartSpecification + indexOption *ast.IndexOption + ) + err = job.DecodeArgs(&unique, &indexName, &idxColNames, &indexOption) + if err != nil { + job.State = model.JobStateCancelled + return ver, errors.Trace(err) + } + + indexInfo := tblInfo.FindIndexByName(indexName.L) + if indexInfo == nil { + job.State = model.JobStateCancelled + return ver, errCancelledDDLJob + } + return convertAddIdxJob2RollbackJob(t, job, tblInfo, indexInfo, occuredErr) +} + +func rollingbackAddColumn(t *meta.Meta, job *model.Job) (ver int64, err error) { + job.State = model.JobStateRollingback + tblInfo, columnInfo, col, _, err := checkAddColumn(t, job) + if err != nil { + return ver, errors.Trace(err) + } + if columnInfo == nil { + job.State = model.JobStateCancelled + return ver, errCancelledDDLJob + } + + originalState := columnInfo.State + columnInfo.State = model.StateDeleteOnly + job.SchemaState = model.StateDeleteOnly + + job.Args = []interface{}{col.Name} + ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != columnInfo.State) + if err != nil { + return ver, errors.Trace(err) + } + return ver, errCancelledDDLJob +} + +func rollingbackDropColumn(t *meta.Meta, job *model.Job) (ver int64, err error) { + tblInfo, colInfo, err := checkDropColumn(t, job) + if err != nil { + return ver, errors.Trace(err) + } + + // StatePublic means when the job is not running yet. + if colInfo.State == model.StatePublic { + job.State = model.JobStateCancelled + job.FinishTableJob(model.JobStateRollbackDone, model.StatePublic, ver, tblInfo) + return ver, errCancelledDDLJob + } + // In the state of drop column `write only -> delete only -> reorganization`, + // We can not rollback now, so just continue to drop column. + job.State = model.JobStateRunning + return ver, nil +} + +func rollingbackDropIndex(t *meta.Meta, job *model.Job) (ver int64, err error) { + tblInfo, indexInfo, err := checkDropIndex(t, job) + if err != nil { + return ver, errors.Trace(err) + } + + originalState := indexInfo.State + switch indexInfo.State { + case model.StateDeleteOnly, model.StateDeleteReorganization, model.StateNone: + // We can not rollback now, so just continue to drop index. + // Normally won't fetch here, because there is check when cancel ddl jobs. see function: isJobRollbackable. + job.State = model.JobStateRunning + return ver, nil + case model.StatePublic, model.StateWriteOnly: + job.State = model.JobStateRollbackDone + indexInfo.State = model.StatePublic + default: + return ver, ErrInvalidDDLState.GenWithStackByArgs("index", indexInfo.State) + } + + job.SchemaState = indexInfo.State + job.Args = []interface{}{indexInfo.Name} + ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != indexInfo.State) + if err != nil { + return ver, errors.Trace(err) + } + job.FinishTableJob(model.JobStateRollbackDone, model.StatePublic, ver, tblInfo) + return ver, errCancelledDDLJob +} + +func rollingbackAddIndex(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job, isPK bool) (ver int64, err error) { + // If the value of SnapshotVer isn't zero, it means the work is backfilling the indexes. + if job.SchemaState == model.StateWriteReorganization && job.SnapshotVer != 0 { + // add index workers are started. need to ask them to exit. + logutil.Logger(w.logCtx).Info("[ddl] run the cancelling DDL job", zap.String("job", job.String())) + w.reorgCtx.notifyReorgCancel() + ver, err = w.onCreateIndex(d, t, job, isPK) + } else { + // add index workers are not started, remove the indexInfo in tableInfo. + ver, err = convertNotStartAddIdxJob2RollbackJob(t, job, errCancelledDDLJob) + } + return +} + +func rollingbackDropTableOrView(t *meta.Meta, job *model.Job) error { + tblInfo, err := checkTableExistAndCancelNonExistJob(t, job, job.SchemaID) + if err != nil { + return errors.Trace(err) + } + // To simplify the rollback logic, cannot be canceled after job start to run. + // Normally won't fetch here, because there is check when cancel ddl jobs. see function: isJobRollbackable. + if tblInfo.State == model.StatePublic { + job.State = model.JobStateCancelled + return errCancelledDDLJob + } + job.State = model.JobStateRunning + return nil +} + +func rollingbackDropSchema(t *meta.Meta, job *model.Job) error { + dbInfo, err := checkSchemaExistAndCancelNotExistJob(t, job) + if err != nil { + return errors.Trace(err) + } + // To simplify the rollback logic, cannot be canceled after job start to run. + // Normally won't fetch here, because there is check when cancel ddl jobs. see function: isJobRollbackable. + if dbInfo.State == model.StatePublic { + job.State = model.JobStateCancelled + return errCancelledDDLJob + } + job.State = model.JobStateRunning + return nil +} + +func cancelOnlyNotHandledJob(job *model.Job) (ver int64, err error) { + // We can only cancel the not handled job. + if job.SchemaState == model.StateNone { + job.State = model.JobStateCancelled + return ver, errCancelledDDLJob + } + + job.State = model.JobStateRunning + + return ver, nil +} + +func convertJob2RollbackJob(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error) { + switch job.Type { + case model.ActionAddColumn: + ver, err = rollingbackAddColumn(t, job) + case model.ActionAddIndex: + ver, err = rollingbackAddIndex(w, d, t, job, false) + case model.ActionAddPrimaryKey: + ver, err = rollingbackAddIndex(w, d, t, job, true) + case model.ActionDropColumn: + ver, err = rollingbackDropColumn(t, job) + case model.ActionDropIndex, model.ActionDropPrimaryKey: + ver, err = rollingbackDropIndex(t, job) + case model.ActionDropTable: + err = rollingbackDropTableOrView(t, job) + case model.ActionDropSchema: + err = rollingbackDropSchema(t, job) + case model.ActionShardRowID, + model.ActionModifyColumn, + model.ActionModifyTableCharsetAndCollate, model.ActionModifySchemaCharsetAndCollate: + ver, err = cancelOnlyNotHandledJob(job) + default: + job.State = model.JobStateCancelled + err = errCancelledDDLJob + } + + if err != nil { + if job.State != model.JobStateRollingback && job.State != model.JobStateCancelled { + logutil.Logger(w.logCtx).Error("[ddl] run DDL job failed", zap.String("job", job.String()), zap.Error(err)) + } else { + logutil.Logger(w.logCtx).Info("[ddl] the DDL job is cancelled normally", zap.String("job", job.String()), zap.Error(err)) + } + + job.Error = toTError(err) + job.ErrorCount++ + } + return +} diff --git a/ddl/schema.go b/ddl/schema.go new file mode 100644 index 0000000..0252af0 --- /dev/null +++ b/ddl/schema.go @@ -0,0 +1,212 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ddl + +import ( + "github.com/pingcap/errors" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/meta" + "github.com/pingcap/tidb/parser/model" +) + +func onCreateSchema(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) { + schemaID := job.SchemaID + dbInfo := &model.DBInfo{} + if err := job.DecodeArgs(dbInfo); err != nil { + // Invalid arguments, cancel this job. + job.State = model.JobStateCancelled + return ver, errors.Trace(err) + } + + dbInfo.ID = schemaID + dbInfo.State = model.StateNone + + err := checkSchemaNotExists(d, t, schemaID, dbInfo) + if err != nil { + if infoschema.ErrDatabaseExists.Equal(err) { + // The database already exists, can't create it, we should cancel this job now. + job.State = model.JobStateCancelled + } + return ver, errors.Trace(err) + } + + ver, err = updateSchemaVersion(t, job) + if err != nil { + return ver, errors.Trace(err) + } + + switch dbInfo.State { + case model.StateNone: + // none -> public + dbInfo.State = model.StatePublic + err = t.CreateDatabase(dbInfo) + if err != nil { + return ver, errors.Trace(err) + } + // Finish this job. + job.FinishDBJob(model.JobStateDone, model.StatePublic, ver, dbInfo) + return ver, nil + default: + // We can't enter here. + return ver, errors.Errorf("invalid db state %v", dbInfo.State) + } +} + +func checkSchemaNotExists(d *ddlCtx, t *meta.Meta, schemaID int64, dbInfo *model.DBInfo) error { + // d.infoHandle maybe nil in some test. + if d.infoHandle == nil { + return checkSchemaNotExistsFromStore(t, schemaID, dbInfo) + } + // Try to use memory schema info to check first. + currVer, err := t.GetSchemaVersion() + if err != nil { + return err + } + is := d.infoHandle.Get() + if is.SchemaMetaVersion() == currVer { + return checkSchemaNotExistsFromInfoSchema(is, schemaID, dbInfo) + } + return checkSchemaNotExistsFromStore(t, schemaID, dbInfo) +} + +func checkSchemaNotExistsFromInfoSchema(is infoschema.InfoSchema, schemaID int64, dbInfo *model.DBInfo) error { + // Check database exists by name. + if is.SchemaExists(dbInfo.Name) { + return infoschema.ErrDatabaseExists.GenWithStackByArgs(dbInfo.Name) + } + // Check database exists by ID. + if _, ok := is.SchemaByID(schemaID); ok { + return infoschema.ErrDatabaseExists.GenWithStackByArgs(dbInfo.Name) + } + return nil +} + +func checkSchemaNotExistsFromStore(t *meta.Meta, schemaID int64, dbInfo *model.DBInfo) error { + dbs, err := t.ListDatabases() + if err != nil { + return errors.Trace(err) + } + + for _, db := range dbs { + if db.Name.L == dbInfo.Name.L { + if db.ID != schemaID { + return infoschema.ErrDatabaseExists.GenWithStackByArgs(db.Name) + } + dbInfo = db + } + } + return nil +} + +func onModifySchemaCharsetAndCollate(t *meta.Meta, job *model.Job) (ver int64, _ error) { + var toCharset, toCollate string + if err := job.DecodeArgs(&toCharset, &toCollate); err != nil { + job.State = model.JobStateCancelled + return ver, errors.Trace(err) + } + + dbInfo, err := t.GetDatabase(job.SchemaID) + if err != nil { + job.State = model.JobStateCancelled + return ver, errors.Trace(err) + } + + if dbInfo.Charset == toCharset && dbInfo.Collate == toCollate { + job.FinishDBJob(model.JobStateDone, model.StatePublic, ver, dbInfo) + return ver, nil + } + + dbInfo.Charset = toCharset + dbInfo.Collate = toCollate + + if err = t.UpdateDatabase(dbInfo); err != nil { + return ver, errors.Trace(err) + } + if ver, err = updateSchemaVersion(t, job); err != nil { + return ver, errors.Trace(err) + } + job.FinishDBJob(model.JobStateDone, model.StatePublic, ver, dbInfo) + return ver, nil +} + +func onDropSchema(t *meta.Meta, job *model.Job) (ver int64, _ error) { + dbInfo, err := checkSchemaExistAndCancelNotExistJob(t, job) + if err != nil { + return ver, errors.Trace(err) + } + + ver, err = updateSchemaVersion(t, job) + if err != nil { + return ver, errors.Trace(err) + } + switch dbInfo.State { + case model.StatePublic: + // public -> write only + job.SchemaState = model.StateWriteOnly + dbInfo.State = model.StateWriteOnly + err = t.UpdateDatabase(dbInfo) + case model.StateWriteOnly: + // write only -> delete only + job.SchemaState = model.StateDeleteOnly + dbInfo.State = model.StateDeleteOnly + err = t.UpdateDatabase(dbInfo) + case model.StateDeleteOnly: + dbInfo.State = model.StateNone + var tables []*model.TableInfo + tables, err = t.ListTables(job.SchemaID) + if err != nil { + return ver, errors.Trace(err) + } + + err = t.UpdateDatabase(dbInfo) + if err != nil { + return ver, errors.Trace(err) + } + if err = t.DropDatabase(dbInfo.ID); err != nil { + break + } + + // Finish this job. + if len(tables) > 0 { + job.Args = append(job.Args, getIDs(tables)) + } + job.FinishDBJob(model.JobStateDone, model.StateNone, ver, dbInfo) + default: + // We can't enter here. + err = errors.Errorf("invalid db state %v", dbInfo.State) + } + + return ver, errors.Trace(err) +} + +func checkSchemaExistAndCancelNotExistJob(t *meta.Meta, job *model.Job) (*model.DBInfo, error) { + dbInfo, err := t.GetDatabase(job.SchemaID) + if err != nil { + return nil, errors.Trace(err) + } + if dbInfo == nil { + job.State = model.JobStateCancelled + return nil, infoschema.ErrDatabaseDropExists.GenWithStackByArgs("") + } + return dbInfo, nil +} + +func getIDs(tables []*model.TableInfo) []int64 { + ids := make([]int64, 0, len(tables)) + for _, t := range tables { + ids = append(ids, t.ID) + } + + return ids +} diff --git a/ddl/schema_test.go b/ddl/schema_test.go new file mode 100644 index 0000000..9705818 --- /dev/null +++ b/ddl/schema_test.go @@ -0,0 +1,280 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ddl + +import ( + "context" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/mock" +) + +var _ = Suite(&testSchemaSuite{}) + +type testSchemaSuite struct{} + +func (s *testSchemaSuite) SetUpSuite(c *C) { +} + +func (s *testSchemaSuite) TearDownSuite(c *C) { +} + +func testSchemaInfo(c *C, d *ddl, name string) *model.DBInfo { + dbInfo := &model.DBInfo{ + Name: model.NewCIStr(name), + } + genIDs, err := d.genGlobalIDs(1) + c.Assert(err, IsNil) + dbInfo.ID = genIDs[0] + return dbInfo +} + +func testCreateSchema(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo) *model.Job { + job := &model.Job{ + SchemaID: dbInfo.ID, + Type: model.ActionCreateSchema, + BinlogInfo: &model.HistoryInfo{}, + Args: []interface{}{dbInfo}, + } + err := d.doDDLJob(ctx, job) + c.Assert(err, IsNil) + + v := getSchemaVer(c, ctx) + dbInfo.State = model.StatePublic + checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, db: dbInfo}) + dbInfo.State = model.StateNone + return job +} + +func buildDropSchemaJob(dbInfo *model.DBInfo) *model.Job { + return &model.Job{ + SchemaID: dbInfo.ID, + Type: model.ActionDropSchema, + BinlogInfo: &model.HistoryInfo{}, + } +} + +func testDropSchema(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo) (*model.Job, int64) { + job := buildDropSchemaJob(dbInfo) + err := d.doDDLJob(ctx, job) + c.Assert(err, IsNil) + ver := getSchemaVer(c, ctx) + return job, ver +} + +func isDDLJobDone(c *C, t *meta.Meta) bool { + job, err := t.GetDDLJobByIdx(0) + c.Assert(err, IsNil) + if job == nil { + return true + } + + time.Sleep(testLease) + return false +} + +func testCheckSchemaState(c *C, d *ddl, dbInfo *model.DBInfo, state model.SchemaState) { + isDropped := true + + for { + kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { + t := meta.NewMeta(txn) + info, err := t.GetDatabase(dbInfo.ID) + c.Assert(err, IsNil) + + if state == model.StateNone { + isDropped = isDDLJobDone(c, t) + if !isDropped { + return nil + } + c.Assert(info, IsNil) + return nil + } + + c.Assert(info.Name, DeepEquals, dbInfo.Name) + c.Assert(info.State, Equals, state) + return nil + }) + + if isDropped { + break + } + } +} + +func (s *testSchemaSuite) TestSchema(c *C) { + store := testCreateStore(c, "test_schema") + defer store.Close() + d := newDDL( + context.Background(), + WithStore(store), + WithLease(testLease), + ) + defer d.Stop() + ctx := testNewContext(d) + dbInfo := testSchemaInfo(c, d, "test") + + // create a database. + job := testCreateSchema(c, ctx, d, dbInfo) + testCheckSchemaState(c, d, dbInfo, model.StatePublic) + testCheckJobDone(c, d, job, true) + + /*** to drop the schema with two tables. ***/ + // create table t with 100 records. + tblInfo1 := testTableInfo(c, d, "t", 3) + tJob1 := testCreateTable(c, ctx, d, dbInfo, tblInfo1) + testCheckTableState(c, d, dbInfo, tblInfo1, model.StatePublic) + testCheckJobDone(c, d, tJob1, true) + tbl1 := testGetTable(c, d, dbInfo.ID, tblInfo1.ID) + for i := 1; i <= 100; i++ { + _, err := tbl1.AddRecord(ctx, types.MakeDatums(i, i, i)) + c.Assert(err, IsNil) + } + // create table t1 with 1034 records. + tblInfo2 := testTableInfo(c, d, "t1", 3) + tJob2 := testCreateTable(c, ctx, d, dbInfo, tblInfo2) + testCheckTableState(c, d, dbInfo, tblInfo2, model.StatePublic) + testCheckJobDone(c, d, tJob2, true) + tbl2 := testGetTable(c, d, dbInfo.ID, tblInfo2.ID) + for i := 1; i <= 1034; i++ { + _, err := tbl2.AddRecord(ctx, types.MakeDatums(i, i, i)) + c.Assert(err, IsNil) + } + job, v := testDropSchema(c, ctx, d, dbInfo) + testCheckSchemaState(c, d, dbInfo, model.StateNone) + ids := make(map[int64]struct{}) + ids[tblInfo1.ID] = struct{}{} + ids[tblInfo2.ID] = struct{}{} + checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, db: dbInfo, tblIDs: ids}) + + // Drop a non-existent database. + job = &model.Job{ + SchemaID: dbInfo.ID, + Type: model.ActionDropSchema, + BinlogInfo: &model.HistoryInfo{}, + } + err := d.doDDLJob(ctx, job) + c.Assert(terror.ErrorEqual(err, infoschema.ErrDatabaseDropExists), IsTrue, Commentf("err %v", err)) + + // Drop a database without a table. + dbInfo1 := testSchemaInfo(c, d, "test1") + job = testCreateSchema(c, ctx, d, dbInfo1) + testCheckSchemaState(c, d, dbInfo1, model.StatePublic) + testCheckJobDone(c, d, job, true) + job, _ = testDropSchema(c, ctx, d, dbInfo1) + testCheckSchemaState(c, d, dbInfo1, model.StateNone) + testCheckJobDone(c, d, job, false) +} + +func (s *testSchemaSuite) TestSchemaWaitJob(c *C) { + store := testCreateStore(c, "test_schema_wait") + defer store.Close() + + d1 := newDDL( + context.Background(), + WithStore(store), + WithLease(testLease), + ) + defer d1.Stop() + + testCheckOwner(c, d1, true) + + d2 := newDDL( + context.Background(), + WithStore(store), + WithLease(testLease*4), + ) + defer d2.Stop() + ctx := testNewContext(d2) + + // d2 must not be owner. + d2.ownerManager.RetireOwner() + + dbInfo := testSchemaInfo(c, d2, "test") + testCreateSchema(c, ctx, d2, dbInfo) + testCheckSchemaState(c, d2, dbInfo, model.StatePublic) + + // d2 must not be owner. + c.Assert(d2.ownerManager.IsOwner(), IsFalse) + + genIDs, err := d2.genGlobalIDs(1) + c.Assert(err, IsNil) + schemaID := genIDs[0] + doDDLJobErr(c, schemaID, 0, model.ActionCreateSchema, []interface{}{dbInfo}, ctx, d2) +} + +func testRunInterruptedJob(c *C, d *ddl, job *model.Job) { + ctx := mock.NewContext() + ctx.Store = d.store + done := make(chan error, 1) + go func() { + done <- d.doDDLJob(ctx, job) + }() + + ticker := time.NewTicker(d.lease * 1) + defer ticker.Stop() + +LOOP: + for { + select { + case <-ticker.C: + d.Stop() + d.restartWorkers(context.Background()) + time.Sleep(time.Millisecond * 20) + case err := <-done: + c.Assert(err, IsNil) + break LOOP + } + } +} + +func (s *testSchemaSuite) TestSchemaResume(c *C) { + store := testCreateStore(c, "test_schema_resume") + defer store.Close() + + d1 := newDDL( + context.Background(), + WithStore(store), + WithLease(testLease), + ) + defer d1.Stop() + + testCheckOwner(c, d1, true) + + dbInfo := testSchemaInfo(c, d1, "test") + job := &model.Job{ + SchemaID: dbInfo.ID, + Type: model.ActionCreateSchema, + BinlogInfo: &model.HistoryInfo{}, + Args: []interface{}{dbInfo}, + } + testRunInterruptedJob(c, d1, job) + testCheckSchemaState(c, d1, dbInfo, model.StatePublic) + + job = &model.Job{ + SchemaID: dbInfo.ID, + Type: model.ActionDropSchema, + BinlogInfo: &model.HistoryInfo{}, + } + testRunInterruptedJob(c, d1, job) + testCheckSchemaState(c, d1, dbInfo, model.StateNone) +} diff --git a/ddl/serial_test.go b/ddl/serial_test.go new file mode 100644 index 0000000..71467f5 --- /dev/null +++ b/ddl/serial_test.go @@ -0,0 +1,313 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ddl_test + +import ( + "context" + "fmt" + "math" + "sync" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/config" + "github.com/pingcap/tidb/ddl" + ddlutil "github.com/pingcap/tidb/ddl/util" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/store/mockstore/mocktikv" + "github.com/pingcap/tidb/util/admin" + "github.com/pingcap/tidb/util/mock" + "github.com/pingcap/tidb/util/testkit" +) + +var _ = SerialSuites(&testSerialSuite{}) + +type testSerialSuite struct { + store kv.Storage + cluster *mocktikv.Cluster + mvccStore mocktikv.MVCCStore + dom *domain.Domain +} + +func (s *testSerialSuite) SetUpSuite(c *C) { + session.SetSchemaLease(200 * time.Millisecond) + session.DisableStats4Test() + + cfg := config.GetGlobalConfig() + newCfg := *cfg + config.StoreGlobalConfig(&newCfg) + + s.cluster = mocktikv.NewCluster() + s.mvccStore = mocktikv.MustNewMVCCStore() + + ddl.WaitTimeWhenErrorOccured = 1 * time.Microsecond + var err error + s.store, err = mockstore.NewMockTikvStore() + c.Assert(err, IsNil) + + s.dom, err = session.BootstrapSession(s.store) + c.Assert(err, IsNil) +} + +func (s *testSerialSuite) TearDownSuite(c *C) { + if s.dom != nil { + s.dom.Close() + } + if s.store != nil { + s.store.Close() + } +} + +func (s *testSerialSuite) TestMultiRegionGetTableEndHandle(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("drop database if exists test_get_endhandle") + tk.MustExec("create database test_get_endhandle") + tk.MustExec("use test_get_endhandle") + + tk.MustExec("create table t(a bigint PRIMARY KEY, b int)") + for i := 0; i < 1000; i++ { + tk.MustExec(fmt.Sprintf("insert into t values(%v, %v)", i, i)) + } + + // Get table ID for split. + dom := domain.GetDomain(tk.Se) + is := dom.InfoSchema() + tbl, err := is.TableByName(model.NewCIStr("test_get_endhandle"), model.NewCIStr("t")) + c.Assert(err, IsNil) + tblID := tbl.Meta().ID + + d := s.dom.DDL() + testCtx := newTestMaxTableRowIDContext(c, d, tbl) + + // Split the table. + s.cluster.SplitTable(s.mvccStore, tblID, 100) + + maxID, emptyTable := getMaxTableRowID(testCtx, s.store) + c.Assert(emptyTable, IsFalse) + c.Assert(maxID, Equals, int64(999)) + + tk.MustExec("insert into t values(10000, 1000)") + maxID, emptyTable = getMaxTableRowID(testCtx, s.store) + c.Assert(emptyTable, IsFalse) + c.Assert(maxID, Equals, int64(10000)) + + tk.MustExec("insert into t values(-1, 1000)") + maxID, emptyTable = getMaxTableRowID(testCtx, s.store) + c.Assert(emptyTable, IsFalse) + c.Assert(maxID, Equals, int64(10000)) +} + +func (s *testSerialSuite) TestGetTableEndHandle(c *C) { + // TestGetTableEndHandle test ddl.GetTableMaxRowID method, which will return the max row id of the table. + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("drop database if exists test_get_endhandle") + tk.MustExec("create database test_get_endhandle") + tk.MustExec("use test_get_endhandle") + // Test PK is handle. + tk.MustExec("create table t(a bigint PRIMARY KEY, b int)") + + is := s.dom.InfoSchema() + d := s.dom.DDL() + tbl, err := is.TableByName(model.NewCIStr("test_get_endhandle"), model.NewCIStr("t")) + c.Assert(err, IsNil) + + testCtx := newTestMaxTableRowIDContext(c, d, tbl) + // test empty table + checkGetMaxTableRowID(testCtx, s.store, true, int64(math.MaxInt64)) + + tk.MustExec("insert into t values(-1, 1)") + checkGetMaxTableRowID(testCtx, s.store, false, int64(-1)) + + tk.MustExec("insert into t values(9223372036854775806, 1)") + checkGetMaxTableRowID(testCtx, s.store, false, int64(9223372036854775806)) + + tk.MustExec("insert into t values(9223372036854775807, 1)") + checkGetMaxTableRowID(testCtx, s.store, false, int64(9223372036854775807)) + + tk.MustExec("insert into t values(10, 1)") + tk.MustExec("insert into t values(102149142, 1)") + checkGetMaxTableRowID(testCtx, s.store, false, int64(9223372036854775807)) + + tk.MustExec("create table t1(a bigint PRIMARY KEY, b int)") + + for i := 0; i < 1000; i++ { + tk.MustExec(fmt.Sprintf("insert into t1 values(%v, %v)", i, i)) + } + is = s.dom.InfoSchema() + testCtx.tbl, err = is.TableByName(model.NewCIStr("test_get_endhandle"), model.NewCIStr("t1")) + c.Assert(err, IsNil) + checkGetMaxTableRowID(testCtx, s.store, false, int64(999)) + + // Test PK is not handle + tk.MustExec("create table t2(a varchar(255))") + + is = s.dom.InfoSchema() + testCtx.tbl, err = is.TableByName(model.NewCIStr("test_get_endhandle"), model.NewCIStr("t2")) + c.Assert(err, IsNil) + checkGetMaxTableRowID(testCtx, s.store, true, int64(math.MaxInt64)) + + for i := 0; i < 1000; i++ { + tk.MustExec(fmt.Sprintf("insert into t2 values(%v)", i)) + } + + result := tk.MustQuery("select MAX(_tidb_rowid) from t2") + maxID, emptyTable := getMaxTableRowID(testCtx, s.store) + result.Check(testkit.Rows(fmt.Sprintf("%v", maxID))) + c.Assert(emptyTable, IsFalse) + + tk.MustExec("insert into t2 values(100000)") + result = tk.MustQuery("select MAX(_tidb_rowid) from t2") + maxID, emptyTable = getMaxTableRowID(testCtx, s.store) + result.Check(testkit.Rows(fmt.Sprintf("%v", maxID))) + c.Assert(emptyTable, IsFalse) + + tk.MustExec(fmt.Sprintf("insert into t2 values(%v)", math.MaxInt64-1)) + result = tk.MustQuery("select MAX(_tidb_rowid) from t2") + maxID, emptyTable = getMaxTableRowID(testCtx, s.store) + result.Check(testkit.Rows(fmt.Sprintf("%v", maxID))) + c.Assert(emptyTable, IsFalse) + + tk.MustExec(fmt.Sprintf("insert into t2 values(%v)", math.MaxInt64)) + result = tk.MustQuery("select MAX(_tidb_rowid) from t2") + maxID, emptyTable = getMaxTableRowID(testCtx, s.store) + result.Check(testkit.Rows(fmt.Sprintf("%v", maxID))) + c.Assert(emptyTable, IsFalse) + + tk.MustExec("insert into t2 values(100)") + result = tk.MustQuery("select MAX(_tidb_rowid) from t2") + maxID, emptyTable = getMaxTableRowID(testCtx, s.store) + result.Check(testkit.Rows(fmt.Sprintf("%v", maxID))) + c.Assert(emptyTable, IsFalse) +} + +// TestCancelAddIndex1 tests canceling ddl job when the add index worker is not started. +func (s *testSerialSuite) TestCancelAddIndexPanic(c *C) { + c.Assert(failpoint.Enable("github.com/pingcap/tidb/ddl/errorMockPanic", `return(true)`), IsNil) + defer func() { + c.Assert(failpoint.Disable("github.com/pingcap/tidb/ddl/errorMockPanic"), IsNil) + }() + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(c1 int, c2 int)") + defer tk.MustExec("drop table t;") + for i := 0; i < 5; i++ { + tk.MustExec(fmt.Sprintf("insert into t values (%d, %d)", i, i)) + } + var checkErr error + oldReorgWaitTimeout := ddl.ReorgWaitTimeout + ddl.ReorgWaitTimeout = 50 * time.Millisecond + defer func() { ddl.ReorgWaitTimeout = oldReorgWaitTimeout }() + hook := &ddl.TestDDLCallback{} + hook.OnJobRunBeforeExported = func(job *model.Job) { + if job.Type == model.ActionAddIndex && job.State == model.JobStateRunning && job.SchemaState == model.StateWriteReorganization && job.SnapshotVer != 0 { + jobIDs := []int64{job.ID} + hookCtx := mock.NewContext() + hookCtx.Store = s.store + err := hookCtx.NewTxn(context.Background()) + if err != nil { + checkErr = errors.Trace(err) + return + } + txn, err := hookCtx.Txn(true) + if err != nil { + checkErr = errors.Trace(err) + return + } + errs, err := admin.CancelJobs(txn, jobIDs) + if err != nil { + checkErr = errors.Trace(err) + return + } + if errs[0] != nil { + checkErr = errors.Trace(errs[0]) + return + } + txn, err = hookCtx.Txn(true) + if err != nil { + checkErr = errors.Trace(err) + return + } + checkErr = txn.Commit(context.Background()) + } + } + origHook := s.dom.DDL().GetHook() + defer s.dom.DDL().(ddl.DDLForTest).SetHook(origHook) + s.dom.DDL().(ddl.DDLForTest).SetHook(hook) + rs, err := tk.Exec("alter table t add index idx_c2(c2)") + if rs != nil { + rs.Close() + } + c.Assert(checkErr, IsNil) + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[ddl:8214]Cancelled DDL job") +} + +func (s *testSerialSuite) TestCancelJobByErrorCountLimit(c *C) { + tk := testkit.NewTestKit(c, s.store) + c.Assert(failpoint.Enable("github.com/pingcap/tidb/ddl/mockExceedErrorLimit", `return(true)`), IsNil) + defer func() { + c.Assert(failpoint.Disable("github.com/pingcap/tidb/ddl/mockExceedErrorLimit"), IsNil) + }() + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + + limit := variable.GetDDLErrorCountLimit() + tk.MustExec("set @@global.tidb_ddl_error_count_limit = 16") + err := ddlutil.LoadDDLVars(tk.Se) + c.Assert(err, IsNil) + defer tk.MustExec(fmt.Sprintf("set @@global.tidb_ddl_error_count_limit = %d", limit)) + + _, err = tk.Exec("create table t (a int)") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[ddl:8214]Cancelled DDL job") +} + +func (s *testSerialSuite) TestCanceledJobTakeTime(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("create table t_cjtt(a int)") + + hook := &ddl.TestDDLCallback{} + once := sync.Once{} + hook.OnJobUpdatedExported = func(job *model.Job) { + once.Do(func() { + err := kv.RunInNewTxn(s.store, false, func(txn kv.Transaction) error { + t := meta.NewMeta(txn) + return t.DropTableOrView(job.SchemaID, job.TableID, true) + }) + c.Assert(err, IsNil) + }) + } + origHook := s.dom.DDL().GetHook() + s.dom.DDL().(ddl.DDLForTest).SetHook(hook) + defer s.dom.DDL().(ddl.DDLForTest).SetHook(origHook) + + originalWT := ddl.WaitTimeWhenErrorOccured + ddl.WaitTimeWhenErrorOccured = 1 * time.Second + defer func() { ddl.WaitTimeWhenErrorOccured = originalWT }() + startTime := time.Now() + tk.MustGetErrCode("alter table t_cjtt add column b int", mysql.ErrNoSuchTable) + sub := time.Since(startTime) + c.Assert(sub, Less, ddl.WaitTimeWhenErrorOccured) +} diff --git a/ddl/session_pool.go b/ddl/session_pool.go new file mode 100644 index 0000000..56ea7d4 --- /dev/null +++ b/ddl/session_pool.go @@ -0,0 +1,88 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ddl + +import ( + "sync" + + "github.com/ngaut/pools" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/mock" +) + +// sessionPool is used to new session. +type sessionPool struct { + mu struct { + sync.Mutex + closed bool + } + resPool *pools.ResourcePool +} + +func newSessionPool(resPool *pools.ResourcePool) *sessionPool { + return &sessionPool{resPool: resPool} +} + +// get gets sessionctx from context resource pool. +// Please remember to call put after you finished using sessionctx. +func (sg *sessionPool) get() (sessionctx.Context, error) { + if sg.resPool == nil { + return mock.NewContext(), nil + } + + sg.mu.Lock() + if sg.mu.closed { + sg.mu.Unlock() + return nil, errors.Errorf("sessionPool is closed.") + } + sg.mu.Unlock() + + // no need to protect sg.resPool + resource, err := sg.resPool.Get() + if err != nil { + return nil, errors.Trace(err) + } + + ctx := resource.(sessionctx.Context) + ctx.GetSessionVars().SetStatusFlag(mysql.ServerStatusAutocommit, true) + ctx.GetSessionVars().InRestrictedSQL = true + return ctx, nil +} + +// put returns sessionctx to context resource pool. +func (sg *sessionPool) put(ctx sessionctx.Context) { + if sg.resPool == nil { + return + } + + // no need to protect sg.resPool, even the sg.resPool is closed, the ctx still need to + // put into resPool, because when resPool is closing, it will wait all the ctx returns, then resPool finish closing. + sg.resPool.Put(ctx.(pools.Resource)) +} + +// close clean up the sessionPool. +func (sg *sessionPool) close() { + sg.mu.Lock() + defer sg.mu.Unlock() + // prevent closing resPool twice. + if sg.mu.closed || sg.resPool == nil { + return + } + logutil.BgLogger().Info("[ddl] closing sessionPool") + sg.resPool.Close() + sg.mu.closed = true +} diff --git a/ddl/stat.go b/ddl/stat.go new file mode 100644 index 0000000..7a3bd67 --- /dev/null +++ b/ddl/stat.go @@ -0,0 +1,88 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ddl + +import ( + "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/util/admin" +) + +var ( + serverID = "server_id" + ddlSchemaVersion = "ddl_schema_version" + ddlJobID = "ddl_job_id" + ddlJobAction = "ddl_job_action" + ddlJobStartTS = "ddl_job_start_ts" + ddlJobState = "ddl_job_state" + ddlJobError = "ddl_job_error" + ddlJobRows = "ddl_job_row_count" + ddlJobSchemaState = "ddl_job_schema_state" + ddlJobSchemaID = "ddl_job_schema_id" + ddlJobTableID = "ddl_job_table_id" + ddlJobSnapshotVer = "ddl_job_snapshot_ver" + ddlJobReorgHandle = "ddl_job_reorg_handle" + ddlJobArgs = "ddl_job_args" +) + +// GetScope gets the status variables scope. +func (d *ddl) GetScope(status string) variable.ScopeFlag { + // Now ddl status variables scope are all default scope. + return variable.DefaultStatusVarScopeFlag +} + +// Stats returns the DDL statistics. +func (d *ddl) Stats(vars *variable.SessionVars) (map[string]interface{}, error) { + m := make(map[string]interface{}) + m[serverID] = d.uuid + var ddlInfo *admin.DDLInfo + + err := kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { + var err1 error + ddlInfo, err1 = admin.GetDDLInfo(txn) + if err1 != nil { + return errors.Trace(err1) + } + return errors.Trace(err1) + }) + if err != nil { + return nil, errors.Trace(err) + } + + m[ddlSchemaVersion] = ddlInfo.SchemaVer + // TODO: Get the owner information. + if len(ddlInfo.Jobs) == 0 { + return m, nil + } + // TODO: Add all job information if needed. + job := ddlInfo.Jobs[0] + m[ddlJobID] = job.ID + m[ddlJobAction] = job.Type.String() + m[ddlJobStartTS] = job.StartTS / 1e9 // unit: second + m[ddlJobState] = job.State.String() + m[ddlJobRows] = job.RowCount + if job.Error == nil { + m[ddlJobError] = "" + } else { + m[ddlJobError] = job.Error.Error() + } + m[ddlJobSchemaState] = job.SchemaState.String() + m[ddlJobSchemaID] = job.SchemaID + m[ddlJobTableID] = job.TableID + m[ddlJobSnapshotVer] = job.SnapshotVer + m[ddlJobReorgHandle] = ddlInfo.ReorgHandle + m[ddlJobArgs] = job.Args + return m, nil +} diff --git a/ddl/stat_test.go b/ddl/stat_test.go new file mode 100644 index 0000000..010b31d --- /dev/null +++ b/ddl/stat_test.go @@ -0,0 +1,98 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ddl + +import ( + "context" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/util/mock" +) + +var _ = Suite(&testStatSuite{}) + +type testStatSuite struct { +} + +func (s *testStatSuite) SetUpSuite(c *C) { +} + +func (s *testStatSuite) TearDownSuite(c *C) { +} + +func (s *testStatSuite) getDDLSchemaVer(c *C, d *ddl) int64 { + m, err := d.Stats(nil) + c.Assert(err, IsNil) + v := m[ddlSchemaVersion] + return v.(int64) +} + +func (s *testStatSuite) TestStat(c *C) { + store := testCreateStore(c, "test_stat") + defer store.Close() + + d := newDDL( + context.Background(), + WithStore(store), + WithLease(testLease), + ) + defer d.Stop() + + time.Sleep(testLease) + + dbInfo := testSchemaInfo(c, d, "test") + testCreateSchema(c, testNewContext(d), d, dbInfo) + + // TODO: Get this information from etcd. + // m, err := d.Stats(nil) + // c.Assert(err, IsNil) + // c.Assert(m[ddlOwnerID], Equals, d.uuid) + + job := &model.Job{ + SchemaID: dbInfo.ID, + Type: model.ActionDropSchema, + BinlogInfo: &model.HistoryInfo{}, + Args: []interface{}{dbInfo.Name}, + } + + ctx := mock.NewContext() + ctx.Store = store + done := make(chan error, 1) + go func() { + done <- d.doDDLJob(ctx, job) + }() + + ticker := time.NewTicker(d.lease * 1) + defer ticker.Stop() + + ver := s.getDDLSchemaVer(c, d) +LOOP: + for { + select { + case <-ticker.C: + d.close() + c.Assert(s.getDDLSchemaVer(c, d), GreaterEqual, ver) + d.restartWorkers(context.Background()) + time.Sleep(time.Millisecond * 20) + case err := <-done: + c.Assert(err, IsNil) + // TODO: Get this information from etcd. + // m, err := d.Stats(nil) + // c.Assert(err, IsNil) + break LOOP + } + } +} diff --git a/ddl/table.go b/ddl/table.go new file mode 100644 index 0000000..4a20451 --- /dev/null +++ b/ddl/table.go @@ -0,0 +1,375 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ddl + +import ( + "fmt" + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta" + "github.com/pingcap/tidb/meta/autoid" + "github.com/pingcap/tidb/parser/charset" + "github.com/pingcap/tidb/parser/model" + field_types "github.com/pingcap/tidb/parser/types" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/table/tables" + "github.com/pingcap/tidb/tablecodec" +) + +func onCreateTable(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) { + failpoint.Inject("mockExceedErrorLimit", func(val failpoint.Value) { + if val.(bool) { + failpoint.Return(ver, errors.New("mock do job error")) + } + }) + + schemaID := job.SchemaID + tbInfo := &model.TableInfo{} + if err := job.DecodeArgs(tbInfo); err != nil { + // Invalid arguments, cancel this job. + job.State = model.JobStateCancelled + return ver, errors.Trace(err) + } + + tbInfo.State = model.StateNone + err := checkTableNotExists(d, t, schemaID, tbInfo.Name.L) + if err != nil { + if infoschema.ErrDatabaseNotExists.Equal(err) || infoschema.ErrTableExists.Equal(err) { + job.State = model.JobStateCancelled + } + return ver, errors.Trace(err) + } + + ver, err = updateSchemaVersion(t, job) + if err != nil { + return ver, errors.Trace(err) + } + + switch tbInfo.State { + case model.StateNone: + // none -> public + tbInfo.State = model.StatePublic + tbInfo.UpdateTS = t.StartTS + err = createTableOrViewWithCheck(t, job, schemaID, tbInfo) + if err != nil { + return ver, errors.Trace(err) + } + // Finish this job. + job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tbInfo) + return ver, nil + default: + return ver, ErrInvalidDDLState.GenWithStackByArgs("table", tbInfo.State) + } +} + +func createTableOrViewWithCheck(t *meta.Meta, job *model.Job, schemaID int64, tbInfo *model.TableInfo) error { + err := checkTableInfoValid(tbInfo) + if err != nil { + job.State = model.JobStateCancelled + return errors.Trace(err) + } + return t.CreateTableOrView(schemaID, tbInfo) +} + +func onDropTableOrView(t *meta.Meta, job *model.Job) (ver int64, _ error) { + tblInfo, err := checkTableExistAndCancelNonExistJob(t, job, job.SchemaID) + if err != nil { + return ver, errors.Trace(err) + } + + originalState := job.SchemaState + switch tblInfo.State { + case model.StatePublic: + // public -> write only + job.SchemaState = model.StateWriteOnly + tblInfo.State = model.StateWriteOnly + ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != tblInfo.State) + case model.StateWriteOnly: + // write only -> delete only + job.SchemaState = model.StateDeleteOnly + tblInfo.State = model.StateDeleteOnly + ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != tblInfo.State) + case model.StateDeleteOnly: + tblInfo.State = model.StateNone + ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != tblInfo.State) + if err != nil { + return ver, errors.Trace(err) + } + if err = t.DropTableOrView(job.SchemaID, job.TableID, true); err != nil { + break + } + // Finish this job. + job.FinishTableJob(model.JobStateDone, model.StateNone, ver, tblInfo) + startKey := tablecodec.EncodeTablePrefix(job.TableID) + job.Args = append(job.Args, startKey) + default: + err = ErrInvalidDDLState.GenWithStackByArgs("table", tblInfo.State) + } + + return ver, errors.Trace(err) +} + +func getTable(store kv.Storage, schemaID int64, tblInfo *model.TableInfo) (table.Table, error) { + alloc := autoid.NewAllocator(store, tblInfo.GetDBID(schemaID), tblInfo.IsAutoIncColUnsigned()) + tbl, err := table.TableFromMeta(alloc, tblInfo) + return tbl, errors.Trace(err) +} + +func getTableInfoAndCancelFaultJob(t *meta.Meta, job *model.Job, schemaID int64) (*model.TableInfo, error) { + tblInfo, err := checkTableExistAndCancelNonExistJob(t, job, schemaID) + if err != nil { + return nil, errors.Trace(err) + } + + if tblInfo.State != model.StatePublic { + job.State = model.JobStateCancelled + return nil, ErrInvalidDDLState.GenWithStack("table %s is not in public, but %s", tblInfo.Name, tblInfo.State) + } + + return tblInfo, nil +} + +func checkTableExistAndCancelNonExistJob(t *meta.Meta, job *model.Job, schemaID int64) (*model.TableInfo, error) { + tblInfo, err := getTableInfo(t, job.TableID, schemaID) + if err == nil { + return tblInfo, nil + } + if infoschema.ErrDatabaseNotExists.Equal(err) || infoschema.ErrTableNotExists.Equal(err) { + job.State = model.JobStateCancelled + } + return nil, err +} + +func getTableInfo(t *meta.Meta, tableID, schemaID int64) (*model.TableInfo, error) { + // Check this table's database. + tblInfo, err := t.GetTable(schemaID, tableID) + if err != nil { + if meta.ErrDBNotExists.Equal(err) { + return nil, errors.Trace(infoschema.ErrDatabaseNotExists.GenWithStackByArgs( + fmt.Sprintf("(Schema ID %d)", schemaID), + )) + } + return nil, errors.Trace(err) + } + + // Check the table. + if tblInfo == nil { + return nil, errors.Trace(infoschema.ErrTableNotExists.GenWithStackByArgs( + fmt.Sprintf("(Schema ID %d)", schemaID), + fmt.Sprintf("(Table ID %d)", tableID), + )) + } + return tblInfo, nil +} + +func (w *worker) onShardRowID(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) { + var shardRowIDBits uint64 + err := job.DecodeArgs(&shardRowIDBits) + if err != nil { + job.State = model.JobStateCancelled + return ver, errors.Trace(err) + } + tblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID) + if err != nil { + job.State = model.JobStateCancelled + return ver, errors.Trace(err) + } + if shardRowIDBits < tblInfo.ShardRowIDBits { + tblInfo.ShardRowIDBits = shardRowIDBits + } else { + tbl, err := getTable(d.store, job.SchemaID, tblInfo) + if err != nil { + return ver, errors.Trace(err) + } + err = verifyNoOverflowShardBits(w.sessPool, tbl, shardRowIDBits) + if err != nil { + job.State = model.JobStateCancelled + return ver, err + } + tblInfo.ShardRowIDBits = shardRowIDBits + // MaxShardRowIDBits use to check the overflow of auto ID. + tblInfo.MaxShardRowIDBits = shardRowIDBits + } + ver, err = updateVersionAndTableInfo(t, job, tblInfo, true) + if err != nil { + job.State = model.JobStateCancelled + return ver, errors.Trace(err) + } + job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo) + return ver, nil +} + +func verifyNoOverflowShardBits(s *sessionPool, tbl table.Table, shardRowIDBits uint64) error { + ctx, err := s.get() + if err != nil { + return errors.Trace(err) + } + defer s.put(ctx) + + // Check next global max auto ID first. + autoIncID, err := tbl.Allocator(ctx).NextGlobalAutoID(tbl.Meta().ID) + if err != nil { + return errors.Trace(err) + } + if tables.OverflowShardBits(autoIncID, shardRowIDBits) { + return autoid.ErrAutoincReadFailed.GenWithStack("shard_row_id_bits %d will cause next global auto ID %v overflow", shardRowIDBits, autoIncID) + } + return nil +} + +func onModifyTableComment(t *meta.Meta, job *model.Job) (ver int64, _ error) { + var comment string + if err := job.DecodeArgs(&comment); err != nil { + job.State = model.JobStateCancelled + return ver, errors.Trace(err) + } + + tblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID) + if err != nil { + return ver, errors.Trace(err) + } + + tblInfo.Comment = comment + ver, err = updateVersionAndTableInfo(t, job, tblInfo, true) + if err != nil { + return ver, errors.Trace(err) + } + job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo) + return ver, nil +} + +func onModifyTableCharsetAndCollate(t *meta.Meta, job *model.Job) (ver int64, _ error) { + var toCharset, toCollate string + if err := job.DecodeArgs(&toCharset, &toCollate); err != nil { + job.State = model.JobStateCancelled + return ver, errors.Trace(err) + } + + dbInfo, err := checkSchemaExistAndCancelNotExistJob(t, job) + if err != nil { + return ver, errors.Trace(err) + } + + tblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID) + if err != nil { + return ver, errors.Trace(err) + } + + // double check. + _, err = checkAlterTableCharset(tblInfo, dbInfo, toCharset, toCollate) + if err != nil { + job.State = model.JobStateCancelled + return ver, errors.Trace(err) + } + + tblInfo.Charset = toCharset + tblInfo.Collate = toCollate + // update column charset. + for _, col := range tblInfo.Columns { + if field_types.HasCharset(&col.FieldType) { + col.Charset = toCharset + col.Collate = toCollate + } else { + col.Charset = charset.CharsetBin + col.Collate = charset.CharsetBin + } + } + + ver, err = updateVersionAndTableInfo(t, job, tblInfo, true) + if err != nil { + return ver, errors.Trace(err) + } + job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo) + return ver, nil +} + +func checkTableNotExists(d *ddlCtx, t *meta.Meta, schemaID int64, tableName string) error { + // d.infoHandle maybe nil in some test. + if d.infoHandle == nil || !d.infoHandle.IsValid() { + return checkTableNotExistsFromStore(t, schemaID, tableName) + } + // Try to use memory schema info to check first. + currVer, err := t.GetSchemaVersion() + if err != nil { + return err + } + is := d.infoHandle.Get() + if is.SchemaMetaVersion() == currVer { + return checkTableNotExistsFromInfoSchema(is, schemaID, tableName) + } + + return checkTableNotExistsFromStore(t, schemaID, tableName) +} + +func checkTableNotExistsFromInfoSchema(is infoschema.InfoSchema, schemaID int64, tableName string) error { + // Check this table's database. + schema, ok := is.SchemaByID(schemaID) + if !ok { + return infoschema.ErrDatabaseNotExists.GenWithStackByArgs("") + } + if is.TableExists(schema.Name, model.NewCIStr(tableName)) { + return infoschema.ErrTableExists.GenWithStackByArgs(tableName) + } + return nil +} + +func checkTableNotExistsFromStore(t *meta.Meta, schemaID int64, tableName string) error { + // Check this table's database. + tables, err := t.ListTables(schemaID) + if err != nil { + if meta.ErrDBNotExists.Equal(err) { + return infoschema.ErrDatabaseNotExists.GenWithStackByArgs("") + } + return errors.Trace(err) + } + + // Check the table. + for _, tbl := range tables { + if tbl.Name.L == tableName { + return infoschema.ErrTableExists.GenWithStackByArgs(tbl.Name) + } + } + + return nil +} + +// updateVersionAndTableInfoWithCheck checks table info validate and updates the schema version and the table information +func updateVersionAndTableInfoWithCheck(t *meta.Meta, job *model.Job, tblInfo *model.TableInfo, shouldUpdateVer bool) ( + ver int64, err error) { + err = checkTableInfoValid(tblInfo) + if err != nil { + job.State = model.JobStateCancelled + return ver, errors.Trace(err) + } + return updateVersionAndTableInfo(t, job, tblInfo, shouldUpdateVer) + +} + +// updateVersionAndTableInfo updates the schema version and the table information. +func updateVersionAndTableInfo(t *meta.Meta, job *model.Job, tblInfo *model.TableInfo, shouldUpdateVer bool) ( + ver int64, err error) { + if shouldUpdateVer { + ver, err = updateSchemaVersion(t, job) + if err != nil { + return 0, errors.Trace(err) + } + } + + if tblInfo.State == model.StatePublic { + tblInfo.UpdateTS = t.StartTS + } + return ver, t.UpdateTable(job.SchemaID, tblInfo) +} diff --git a/ddl/table_test.go b/ddl/table_test.go new file mode 100644 index 0000000..d93cd07 --- /dev/null +++ b/ddl/table_test.go @@ -0,0 +1,224 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ddl + +import ( + "context" + "fmt" + + . "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta" + "github.com/pingcap/tidb/meta/autoid" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/types" +) + +var _ = Suite(&testTableSuite{}) + +type testTableSuite struct { + store kv.Storage + dbInfo *model.DBInfo + + d *ddl +} + +// testTableInfo creates a test table with num int columns and with no index. +func testTableInfo(c *C, d *ddl, name string, num int) *model.TableInfo { + tblInfo := &model.TableInfo{ + Name: model.NewCIStr(name), + } + genIDs, err := d.genGlobalIDs(1) + c.Assert(err, IsNil) + tblInfo.ID = genIDs[0] + + cols := make([]*model.ColumnInfo, num) + for i := range cols { + col := &model.ColumnInfo{ + Name: model.NewCIStr(fmt.Sprintf("c%d", i+1)), + Offset: i, + DefaultValue: i + 1, + State: model.StatePublic, + } + + col.FieldType = *types.NewFieldType(mysql.TypeLong) + col.ID = allocateColumnID(tblInfo) + cols[i] = col + } + tblInfo.Columns = cols + tblInfo.Charset = "utf8" + tblInfo.Collate = "utf8_bin" + return tblInfo +} + +func testCreateTable(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo) *model.Job { + job := &model.Job{ + SchemaID: dbInfo.ID, + TableID: tblInfo.ID, + Type: model.ActionCreateTable, + BinlogInfo: &model.HistoryInfo{}, + Args: []interface{}{tblInfo}, + } + err := d.doDDLJob(ctx, job) + c.Assert(err, IsNil) + + v := getSchemaVer(c, ctx) + tblInfo.State = model.StatePublic + checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) + tblInfo.State = model.StateNone + return job +} + +func testDropTable(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo) *model.Job { + job := &model.Job{ + SchemaID: dbInfo.ID, + TableID: tblInfo.ID, + Type: model.ActionDropTable, + BinlogInfo: &model.HistoryInfo{}, + } + err := d.doDDLJob(ctx, job) + c.Assert(err, IsNil) + + v := getSchemaVer(c, ctx) + checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) + return job +} + +func testCheckTableState(c *C, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, state model.SchemaState) { + err := kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { + t := meta.NewMeta(txn) + info, err := t.GetTable(dbInfo.ID, tblInfo.ID) + c.Assert(err, IsNil) + + if state == model.StateNone { + c.Assert(info, IsNil) + return nil + } + + c.Assert(info.Name, DeepEquals, tblInfo.Name) + c.Assert(info.State, Equals, state) + return nil + }) + c.Assert(err, IsNil) +} + +func testGetTable(c *C, d *ddl, schemaID int64, tableID int64) table.Table { + tbl, err := testGetTableWithError(d, schemaID, tableID) + c.Assert(err, IsNil) + return tbl +} + +func testGetTableWithError(d *ddl, schemaID, tableID int64) (table.Table, error) { + var tblInfo *model.TableInfo + err := kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { + t := meta.NewMeta(txn) + var err1 error + tblInfo, err1 = t.GetTable(schemaID, tableID) + if err1 != nil { + return errors.Trace(err1) + } + return nil + }) + if err != nil { + return nil, errors.Trace(err) + } + if tblInfo == nil { + return nil, errors.New("table not found") + } + alloc := autoid.NewAllocator(d.store, schemaID, false) + tbl, err := table.TableFromMeta(alloc, tblInfo) + if err != nil { + return nil, errors.Trace(err) + } + return tbl, nil +} + +func (s *testTableSuite) SetUpSuite(c *C) { + s.store = testCreateStore(c, "test_table") + s.d = newDDL( + context.Background(), + WithStore(s.store), + WithLease(testLease), + ) + + s.dbInfo = testSchemaInfo(c, s.d, "test") + testCreateSchema(c, testNewContext(s.d), s.d, s.dbInfo) +} + +func (s *testTableSuite) TearDownSuite(c *C) { + testDropSchema(c, testNewContext(s.d), s.d, s.dbInfo) + s.d.Stop() + s.store.Close() +} + +func (s *testTableSuite) TestTable(c *C) { + d := s.d + + ctx := testNewContext(d) + + tblInfo := testTableInfo(c, d, "t", 3) + job := testCreateTable(c, ctx, d, s.dbInfo, tblInfo) + testCheckTableState(c, d, s.dbInfo, tblInfo, model.StatePublic) + testCheckJobDone(c, d, job, true) + + // Create an existing table. + newTblInfo := testTableInfo(c, d, "t", 3) + doDDLJobErr(c, s.dbInfo.ID, newTblInfo.ID, model.ActionCreateTable, []interface{}{newTblInfo}, ctx, d) + + count := 2000 + tbl := testGetTable(c, d, s.dbInfo.ID, tblInfo.ID) + for i := 1; i <= count; i++ { + _, err := tbl.AddRecord(ctx, types.MakeDatums(i, i, i)) + c.Assert(err, IsNil) + } + + job = testDropTable(c, ctx, d, s.dbInfo, tblInfo) + testCheckJobDone(c, d, job, false) + + // for truncate table + tblInfo = testTableInfo(c, d, "tt", 3) + job = testCreateTable(c, ctx, d, s.dbInfo, tblInfo) + testCheckTableState(c, d, s.dbInfo, tblInfo, model.StatePublic) + testCheckJobDone(c, d, job, true) +} + +func (s *testTableSuite) TestTableResume(c *C) { + d := s.d + + testCheckOwner(c, d, true) + + tblInfo := testTableInfo(c, d, "t1", 3) + job := &model.Job{ + SchemaID: s.dbInfo.ID, + TableID: tblInfo.ID, + Type: model.ActionCreateTable, + BinlogInfo: &model.HistoryInfo{}, + Args: []interface{}{tblInfo}, + } + testRunInterruptedJob(c, d, job) + testCheckTableState(c, d, s.dbInfo, tblInfo, model.StatePublic) + + job = &model.Job{ + SchemaID: s.dbInfo.ID, + TableID: tblInfo.ID, + Type: model.ActionDropTable, + BinlogInfo: &model.HistoryInfo{}, + } + testRunInterruptedJob(c, d, job) + testCheckTableState(c, d, s.dbInfo, tblInfo, model.StateNone) +} diff --git a/ddl/testutil/testutil.go b/ddl/testutil/testutil.go new file mode 100644 index 0000000..c690b05 --- /dev/null +++ b/ddl/testutil/testutil.go @@ -0,0 +1,57 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package testutil + +import ( + "context" + + "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/session" +) + +// SessionExecInGoroutine export for testing. +func SessionExecInGoroutine(c *check.C, s kv.Storage, sql string, done chan error) { + ExecMultiSQLInGoroutine(c, s, "test_db", []string{sql}, done) +} + +// ExecMultiSQLInGoroutine exports for testing. +func ExecMultiSQLInGoroutine(c *check.C, s kv.Storage, dbName string, multiSQL []string, done chan error) { + go func() { + se, err := session.CreateSession4Test(s) + if err != nil { + done <- errors.Trace(err) + return + } + defer se.Close() + _, err = se.Execute(context.Background(), "use "+dbName) + if err != nil { + done <- errors.Trace(err) + return + } + for _, sql := range multiSQL { + rs, err := se.Execute(context.Background(), sql) + if err != nil { + done <- errors.Trace(err) + return + } + if rs != nil { + done <- errors.Errorf("RecordSet should be empty.") + return + } + done <- nil + } + }() +} diff --git a/ddl/util/syncer.go b/ddl/util/syncer.go new file mode 100644 index 0000000..cccd40b --- /dev/null +++ b/ddl/util/syncer.go @@ -0,0 +1,475 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "context" + "fmt" + "math" + "strconv" + "sync" + "sync/atomic" + "time" + "unsafe" + + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/parser/terror" + + "github.com/pingcap/tidb/owner" + "github.com/pingcap/tidb/util/logutil" + "go.etcd.io/etcd/clientv3" + "go.etcd.io/etcd/clientv3/concurrency" + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + "go.uber.org/zap" +) + +const ( + // DDLAllSchemaVersions is the path on etcd that is used to store all servers current schema versions. + // It's exported for testing. + DDLAllSchemaVersions = "/tidb/ddl/all_schema_versions" + // DDLGlobalSchemaVersion is the path on etcd that is used to store the latest schema versions. + // It's exported for testing. + DDLGlobalSchemaVersion = "/tidb/ddl/global_schema_version" + // InitialVersion is the initial schema version for every server. + // It's exported for testing. + InitialVersion = "0" + putKeyNoRetry = 1 + keyOpDefaultRetryCnt = 3 + putKeyRetryUnlimited = math.MaxInt64 + keyOpDefaultTimeout = 2 * time.Second + keyOpRetryInterval = 30 * time.Millisecond + checkVersInterval = 20 * time.Millisecond + + ddlPrompt = "ddl-syncer" +) + +var ( + // CheckVersFirstWaitTime is a waitting time before the owner checks all the servers of the schema version, + // and it's an exported variable for testing. + CheckVersFirstWaitTime = 50 * time.Millisecond + // SyncerSessionTTL is the etcd session's TTL in seconds. + // and it's an exported variable for testing. + SyncerSessionTTL = 90 +) + +// SchemaSyncer is used to synchronize schema version between the DDL worker leader and followers through etcd. +type SchemaSyncer interface { + // Init sets the global schema version path to etcd if it isn't exist, + // then watch this path, and initializes the self schema version to etcd. + Init(ctx context.Context) error + // UpdateSelfVersion updates the current version to the self path on etcd. + UpdateSelfVersion(ctx context.Context, version int64) error + // RemoveSelfVersionPath remove the self path from etcd. + RemoveSelfVersionPath() error + // OwnerUpdateGlobalVersion updates the latest version to the global path on etcd until updating is successful or the ctx is done. + OwnerUpdateGlobalVersion(ctx context.Context, version int64) error + // GlobalVersionCh gets the chan for watching global version. + GlobalVersionCh() clientv3.WatchChan + // WatchGlobalSchemaVer watches the global schema version. + WatchGlobalSchemaVer(ctx context.Context) + // MustGetGlobalVersion gets the global version. The only reason it fails is that ctx is done. + MustGetGlobalVersion(ctx context.Context) (int64, error) + // Done returns a channel that closes when the syncer is no longer being refreshed. + Done() <-chan struct{} + // Restart restarts the syncer when it's on longer being refreshed. + Restart(ctx context.Context) error + // OwnerCheckAllVersions checks whether all followers' schema version are equal to + // the latest schema version. If the result is false, wait for a while and check again util the processing time reach 2 * lease. + // It returns until all servers' versions are equal to the latest version or the ctx is done. + OwnerCheckAllVersions(ctx context.Context, latestVer int64) error + // NotifyCleanExpiredPaths informs to clean up expired paths. + // The returned value is used for testing. + NotifyCleanExpiredPaths() bool + // StartCleanWork starts to clean up tasks. + StartCleanWork() + // CloseCleanWork ends cleanup tasks. + CloseCleanWork() +} + +type ownerChecker interface { + IsOwner() bool +} + +type schemaVersionSyncer struct { + selfSchemaVerPath string + etcdCli *clientv3.Client + session unsafe.Pointer + mu struct { + sync.RWMutex + globalVerCh clientv3.WatchChan + } + + // for clean worker + ownerChecker ownerChecker + notifyCleanExpiredPathsCh chan struct{} + quiteCh chan struct{} +} + +// NewSchemaSyncer creates a new SchemaSyncer. +func NewSchemaSyncer(etcdCli *clientv3.Client, id string, oc ownerChecker) SchemaSyncer { + return &schemaVersionSyncer{ + etcdCli: etcdCli, + selfSchemaVerPath: fmt.Sprintf("%s/%s", DDLAllSchemaVersions, id), + ownerChecker: oc, + notifyCleanExpiredPathsCh: make(chan struct{}, 1), + quiteCh: make(chan struct{}), + } +} + +// PutKVToEtcd puts key value to etcd. +// etcdCli is client of etcd. +// retryCnt is retry time when an error occurs. +// opts is configures of etcd Operations. +func PutKVToEtcd(ctx context.Context, etcdCli *clientv3.Client, retryCnt int, key, val string, + opts ...clientv3.OpOption) error { + var err error + for i := 0; i < retryCnt; i++ { + if isContextDone(ctx) { + return errors.Trace(ctx.Err()) + } + + childCtx, cancel := context.WithTimeout(ctx, keyOpDefaultTimeout) + _, err = etcdCli.Put(childCtx, key, val, opts...) + cancel() + if err == nil { + return nil + } + logutil.BgLogger().Warn("[ddl] etcd-cli put kv failed", zap.String("key", key), zap.String("value", val), zap.Error(err), zap.Int("retryCnt", i)) + time.Sleep(keyOpRetryInterval) + } + return errors.Trace(err) +} + +// Init implements SchemaSyncer.Init interface. +func (s *schemaVersionSyncer) Init(ctx context.Context) error { + var err error + _, err = s.etcdCli.Txn(ctx). + If(clientv3.Compare(clientv3.CreateRevision(DDLGlobalSchemaVersion), "=", 0)). + Then(clientv3.OpPut(DDLGlobalSchemaVersion, InitialVersion)). + Commit() + if err != nil { + return errors.Trace(err) + } + logPrefix := fmt.Sprintf("[%s] %s", ddlPrompt, s.selfSchemaVerPath) + session, err := owner.NewSession(ctx, logPrefix, s.etcdCli, owner.NewSessionDefaultRetryCnt, SyncerSessionTTL) + if err != nil { + return errors.Trace(err) + } + s.storeSession(session) + + s.mu.Lock() + s.mu.globalVerCh = s.etcdCli.Watch(ctx, DDLGlobalSchemaVersion) + s.mu.Unlock() + + err = PutKVToEtcd(ctx, s.etcdCli, keyOpDefaultRetryCnt, s.selfSchemaVerPath, InitialVersion, + clientv3.WithLease(s.loadSession().Lease())) + return errors.Trace(err) +} + +func (s *schemaVersionSyncer) loadSession() *concurrency.Session { + return (*concurrency.Session)(atomic.LoadPointer(&s.session)) +} + +func (s *schemaVersionSyncer) storeSession(session *concurrency.Session) { + atomic.StorePointer(&s.session, (unsafe.Pointer)(session)) +} + +// Done implements SchemaSyncer.Done interface. +func (s *schemaVersionSyncer) Done() <-chan struct{} { + failpoint.Inject("ErrorMockSessionDone", func(val failpoint.Value) { + if val.(bool) { + err := s.loadSession().Close() + logutil.BgLogger().Info("close session failed", zap.Error(err)) + } + }) + + return s.loadSession().Done() +} + +// Restart implements SchemaSyncer.Restart interface. +func (s *schemaVersionSyncer) Restart(ctx context.Context) error { + var err error + defer func() { + + }() + + logPrefix := fmt.Sprintf("[%s] %s", ddlPrompt, s.selfSchemaVerPath) + // NewSession's context will affect the exit of the session. + session, err := owner.NewSession(ctx, logPrefix, s.etcdCli, owner.NewSessionRetryUnlimited, SyncerSessionTTL) + if err != nil { + return errors.Trace(err) + } + s.storeSession(session) + + childCtx, cancel := context.WithTimeout(ctx, keyOpDefaultTimeout) + defer cancel() + err = PutKVToEtcd(childCtx, s.etcdCli, putKeyRetryUnlimited, s.selfSchemaVerPath, InitialVersion, + clientv3.WithLease(s.loadSession().Lease())) + + return errors.Trace(err) +} + +// GlobalVersionCh implements SchemaSyncer.GlobalVersionCh interface. +func (s *schemaVersionSyncer) GlobalVersionCh() clientv3.WatchChan { + s.mu.RLock() + defer s.mu.RUnlock() + return s.mu.globalVerCh +} + +// WatchGlobalSchemaVer implements SchemaSyncer.WatchGlobalSchemaVer interface. +func (s *schemaVersionSyncer) WatchGlobalSchemaVer(ctx context.Context) { + // Make sure the globalVerCh doesn't receive the information of 'close' before we finish the rewatch. + s.mu.Lock() + s.mu.globalVerCh = nil + s.mu.Unlock() + + go func() { + ch := s.etcdCli.Watch(ctx, DDLGlobalSchemaVersion) + + s.mu.Lock() + s.mu.globalVerCh = ch + s.mu.Unlock() + logutil.BgLogger().Info("[ddl] syncer watch global schema finished") + }() +} + +// UpdateSelfVersion implements SchemaSyncer.UpdateSelfVersion interface. +func (s *schemaVersionSyncer) UpdateSelfVersion(ctx context.Context, version int64) error { + ver := strconv.FormatInt(version, 10) + err := PutKVToEtcd(ctx, s.etcdCli, putKeyNoRetry, s.selfSchemaVerPath, ver, + clientv3.WithLease(s.loadSession().Lease())) + return errors.Trace(err) +} + +// OwnerUpdateGlobalVersion implements SchemaSyncer.OwnerUpdateGlobalVersion interface. +func (s *schemaVersionSyncer) OwnerUpdateGlobalVersion(ctx context.Context, version int64) error { + ver := strconv.FormatInt(version, 10) + // TODO: If the version is larger than the original global version, we need set the version. + // Otherwise, we'd better set the original global version. + err := PutKVToEtcd(ctx, s.etcdCli, putKeyRetryUnlimited, DDLGlobalSchemaVersion, ver) + return errors.Trace(err) +} + +// RemoveSelfVersionPath implements SchemaSyncer.RemoveSelfVersionPath interface. +func (s *schemaVersionSyncer) RemoveSelfVersionPath() error { + err := DeleteKeyFromEtcd(s.selfSchemaVerPath, s.etcdCli, keyOpDefaultRetryCnt, keyOpDefaultTimeout) + return errors.Trace(err) +} + +// DeleteKeyFromEtcd deletes key value from etcd. +func DeleteKeyFromEtcd(key string, etcdCli *clientv3.Client, retryCnt int, timeout time.Duration) error { + var err error + ctx := context.Background() + for i := 0; i < retryCnt; i++ { + childCtx, cancel := context.WithTimeout(ctx, timeout) + _, err = etcdCli.Delete(childCtx, key) + cancel() + if err == nil { + return nil + } + logutil.BgLogger().Warn("[ddl] etcd-cli delete key failed", zap.String("key", key), zap.Error(err), zap.Int("retryCnt", i)) + } + return errors.Trace(err) +} + +// MustGetGlobalVersion implements SchemaSyncer.MustGetGlobalVersion interface. +func (s *schemaVersionSyncer) MustGetGlobalVersion(ctx context.Context) (int64, error) { + var ( + err error + ver int + resp *clientv3.GetResponse + ) + failedCnt := 0 + intervalCnt := int(time.Second / keyOpRetryInterval) + for { + if err != nil { + if failedCnt%intervalCnt == 0 { + logutil.BgLogger().Info("[ddl] syncer get global version failed", zap.Error(err)) + } + time.Sleep(keyOpRetryInterval) + failedCnt++ + } + + if isContextDone(ctx) { + err = errors.Trace(ctx.Err()) + return 0, err + } + + resp, err = s.etcdCli.Get(ctx, DDLGlobalSchemaVersion) + if err != nil { + continue + } + if len(resp.Kvs) > 0 { + ver, err = strconv.Atoi(string(resp.Kvs[0].Value)) + if err == nil { + return int64(ver), nil + } + } + } +} + +func isContextDone(ctx context.Context) bool { + select { + case <-ctx.Done(): + return true + default: + } + return false +} + +// OwnerCheckAllVersions implements SchemaSyncer.OwnerCheckAllVersions interface. +func (s *schemaVersionSyncer) OwnerCheckAllVersions(ctx context.Context, latestVer int64) error { + time.Sleep(CheckVersFirstWaitTime) + notMatchVerCnt := 0 + intervalCnt := int(time.Second / checkVersInterval) + updatedMap := make(map[string]struct{}) + + var err error + for { + if isContextDone(ctx) { + // ctx is canceled or timeout. + err = errors.Trace(ctx.Err()) + return err + } + + resp, err := s.etcdCli.Get(ctx, DDLAllSchemaVersions, clientv3.WithPrefix()) + if err != nil { + logutil.BgLogger().Info("[ddl] syncer check all versions failed, continue checking.", zap.Error(err)) + continue + } + + succ := true + for _, kv := range resp.Kvs { + if _, ok := updatedMap[string(kv.Key)]; ok { + continue + } + + ver, err := strconv.Atoi(string(kv.Value)) + if err != nil { + logutil.BgLogger().Info("[ddl] syncer check all versions, convert value to int failed, continue checking.", zap.String("ddl", string(kv.Key)), zap.String("value", string(kv.Value)), zap.Error(err)) + succ = false + break + } + if int64(ver) < latestVer { + if notMatchVerCnt%intervalCnt == 0 { + logutil.BgLogger().Info("[ddl] syncer check all versions, someone is not synced, continue checking", + zap.String("ddl", string(kv.Key)), zap.Int("currentVer", ver), zap.Int64("latestVer", latestVer)) + } + succ = false + notMatchVerCnt++ + break + } + updatedMap[string(kv.Key)] = struct{}{} + } + if succ { + return nil + } + time.Sleep(checkVersInterval) + } +} + +const ( + opDefaultRetryCnt = 10 + failedGetTTLLimit = 20 + opDefaultTimeout = 3 * time.Second + opRetryInterval = 500 * time.Millisecond +) + +// NeededCleanTTL is exported for testing. +var NeededCleanTTL = int64(-60) + +func (s *schemaVersionSyncer) StartCleanWork() { + for { + select { + case <-s.notifyCleanExpiredPathsCh: + if !s.ownerChecker.IsOwner() { + continue + } + + for i := 0; i < opDefaultRetryCnt; i++ { + childCtx, cancelFunc := context.WithTimeout(context.Background(), opDefaultTimeout) + resp, err := s.etcdCli.Leases(childCtx) + cancelFunc() + if err != nil { + logutil.BgLogger().Info("[ddl] syncer clean expired paths, failed to get leases.", zap.Error(err)) + continue + } + + if isFinished := s.doCleanExpirePaths(resp.Leases); isFinished { + break + } + time.Sleep(opRetryInterval) + } + case <-s.quiteCh: + return + } + } +} + +func (s *schemaVersionSyncer) CloseCleanWork() { + close(s.quiteCh) +} + +func (s *schemaVersionSyncer) NotifyCleanExpiredPaths() bool { + var isNotified bool + select { + case s.notifyCleanExpiredPathsCh <- struct{}{}: + isNotified = true + default: + terror.Log(errors.New("channel is full, failed to notify clean expired paths")) + } + + return isNotified +} + +func (s *schemaVersionSyncer) doCleanExpirePaths(leases []clientv3.LeaseStatus) bool { + failedGetIDs := 0 + failedRevokeIDs := 0 + // TODO: Now LeaseStatus only has lease ID. + for _, lease := range leases { + // The DDL owner key uses '%x', so here print it too. + leaseID := fmt.Sprintf("%x, %d", lease.ID, lease.ID) + childCtx, cancelFunc := context.WithTimeout(context.Background(), opDefaultTimeout) + ttlResp, err := s.etcdCli.TimeToLive(childCtx, lease.ID) + cancelFunc() + if err != nil { + logutil.BgLogger().Info("[ddl] syncer clean expired paths, failed to get one TTL.", zap.String("leaseID", leaseID), zap.Error(err)) + failedGetIDs++ + continue + } + + if failedGetIDs > failedGetTTLLimit { + return false + } + if ttlResp.TTL >= NeededCleanTTL { + continue + } + + childCtx, cancelFunc = context.WithTimeout(context.Background(), opDefaultTimeout) + _, err = s.etcdCli.Revoke(childCtx, lease.ID) + cancelFunc() + if err != nil && terror.ErrorEqual(err, rpctypes.ErrLeaseNotFound) { + logutil.BgLogger().Warn("[ddl] syncer clean expired paths, failed to revoke lease.", zap.String("leaseID", leaseID), + zap.Int64("TTL", ttlResp.TTL), zap.Error(err)) + failedRevokeIDs++ + } + logutil.BgLogger().Warn("[ddl] syncer clean expired paths,", zap.String("leaseID", leaseID), zap.Int64("TTL", ttlResp.TTL)) + } + + if failedGetIDs == 0 && failedRevokeIDs == 0 { + return true + } + return false +} diff --git a/ddl/util/syncer_test.go b/ddl/util/syncer_test.go new file mode 100644 index 0000000..b44f119 --- /dev/null +++ b/ddl/util/syncer_test.go @@ -0,0 +1,256 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package util_test + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/errors" + . "github.com/pingcap/tidb/ddl" + . "github.com/pingcap/tidb/ddl/util" + "github.com/pingcap/tidb/owner" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/store/mockstore" + "go.etcd.io/etcd/clientv3" + "go.etcd.io/etcd/etcdserver" + "go.etcd.io/etcd/integration" + "go.etcd.io/etcd/mvcc/mvccpb" + goctx "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +func TestT(t *testing.T) { + TestingT(t) +} + +const minInterval = 10 * time.Nanosecond // It's used to test timeout. + +func TestSyncerSimple(t *testing.T) { + testLease := 5 * time.Millisecond + origin := CheckVersFirstWaitTime + CheckVersFirstWaitTime = 0 + defer func() { + CheckVersFirstWaitTime = origin + }() + + store, err := mockstore.NewMockTikvStore() + if err != nil { + t.Fatal(err) + } + defer store.Close() + + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + defer clus.Terminate(t) + cli := clus.RandClient() + ctx := goctx.Background() + d := NewDDL( + ctx, + WithEtcdClient(cli), + WithStore(store), + WithLease(testLease), + ) + defer d.Stop() + + // for init function + if err = d.SchemaSyncer().Init(ctx); err != nil { + t.Fatalf("schema version syncer init failed %v", err) + } + resp, err := cli.Get(ctx, DDLAllSchemaVersions, clientv3.WithPrefix()) + if err != nil { + t.Fatalf("client get version failed %v", err) + } + key := DDLAllSchemaVersions + "/" + d.OwnerManager().ID() + checkRespKV(t, 1, key, InitialVersion, resp.Kvs...) + // for MustGetGlobalVersion function + globalVer, err := d.SchemaSyncer().MustGetGlobalVersion(ctx) + if err != nil { + t.Fatalf("client get global version failed %v", err) + } + if InitialVersion != fmt.Sprintf("%d", globalVer) { + t.Fatalf("client get global version %d isn't equal to init version %s", globalVer, InitialVersion) + } + childCtx, _ := goctx.WithTimeout(ctx, minInterval) + _, err = d.SchemaSyncer().MustGetGlobalVersion(childCtx) + if !isTimeoutError(err) { + t.Fatalf("client get global version result not match, err %v", err) + } + + d1 := NewDDL( + ctx, + WithEtcdClient(cli), + WithStore(store), + WithLease(testLease), + ) + defer d1.Stop() + if err = d1.SchemaSyncer().Init(ctx); err != nil { + t.Fatalf("schema version syncer init failed %v", err) + } + + // for watchCh + wg := sync.WaitGroup{} + wg.Add(1) + currentVer := int64(123) + go func() { + defer wg.Done() + select { + case resp := <-d.SchemaSyncer().GlobalVersionCh(): + if len(resp.Events) < 1 { + t.Fatalf("get chan events count less than 1") + } + checkRespKV(t, 1, DDLGlobalSchemaVersion, fmt.Sprintf("%v", currentVer), resp.Events[0].Kv) + case <-time.After(100 * time.Millisecond): + t.Fatalf("get udpate version failed") + } + }() + + // for update latestSchemaVersion + err = d.SchemaSyncer().OwnerUpdateGlobalVersion(ctx, currentVer) + if err != nil { + t.Fatalf("update latest schema version failed %v", err) + } + + wg.Wait() + + // for CheckAllVersions + childCtx, cancel := goctx.WithTimeout(ctx, 200*time.Millisecond) + err = d.SchemaSyncer().OwnerCheckAllVersions(childCtx, currentVer) + if err == nil { + t.Fatalf("check result not match") + } + cancel() + + // for UpdateSelfVersion + err = d.SchemaSyncer().UpdateSelfVersion(context.Background(), currentVer) + if err != nil { + t.Fatalf("update self version failed %v", errors.ErrorStack(err)) + } + err = d1.SchemaSyncer().UpdateSelfVersion(context.Background(), currentVer) + if err != nil { + t.Fatalf("update self version failed %v", errors.ErrorStack(err)) + } + childCtx, _ = goctx.WithTimeout(ctx, minInterval) + err = d1.SchemaSyncer().UpdateSelfVersion(childCtx, currentVer) + if !isTimeoutError(err) { + t.Fatalf("update self version result not match, err %v", err) + } + + // for CheckAllVersions + err = d.SchemaSyncer().OwnerCheckAllVersions(context.Background(), currentVer-1) + if err != nil { + t.Fatalf("check all versions failed %v", err) + } + err = d.SchemaSyncer().OwnerCheckAllVersions(context.Background(), currentVer) + if err != nil { + t.Fatalf("check all versions failed %v", err) + } + childCtx, _ = goctx.WithTimeout(ctx, minInterval) + err = d.SchemaSyncer().OwnerCheckAllVersions(childCtx, currentVer) + if !isTimeoutError(err) { + t.Fatalf("check all versions result not match, err %v", err) + } + + // for StartCleanWork + go d.SchemaSyncer().StartCleanWork() + ttl := 10 + // Make sure NeededCleanTTL > ttl, then we definitely clean the ttl. + NeededCleanTTL = int64(11) + ttlKey := "session_ttl_key" + ttlVal := "session_ttl_val" + session, err := owner.NewSession(ctx, "", cli, owner.NewSessionDefaultRetryCnt, ttl) + if err != nil { + t.Fatalf("new session failed %v", err) + } + err = PutKVToEtcd(context.Background(), cli, 5, ttlKey, ttlVal, clientv3.WithLease(session.Lease())) + if err != nil { + t.Fatalf("put kv to etcd failed %v", err) + } + // Make sure the ttlKey is exist in etcd. + resp, err = cli.Get(ctx, ttlKey) + if err != nil { + t.Fatalf("client get version failed %v", err) + } + checkRespKV(t, 1, ttlKey, ttlVal, resp.Kvs...) + d.SchemaSyncer().NotifyCleanExpiredPaths() + // Make sure the clean worker is done. + notifiedCnt := 1 + for i := 0; i < 100; i++ { + isNotified := d.SchemaSyncer().NotifyCleanExpiredPaths() + if isNotified { + notifiedCnt++ + } + // notifyCleanExpiredPathsCh's length is 1, + // so when notifiedCnt is 3, we can make sure the clean worker is done at least once. + if notifiedCnt == 3 { + break + } + time.Sleep(20 * time.Millisecond) + } + if notifiedCnt != 3 { + t.Fatal("clean worker don't finish") + } + // Make sure the ttlKey is removed in etcd. + resp, err = cli.Get(ctx, ttlKey) + if err != nil { + t.Fatalf("client get version failed %v", err) + } + checkRespKV(t, 0, ttlKey, "", resp.Kvs...) + + // for RemoveSelfVersionPath + resp, err = cli.Get(goctx.Background(), key) + if err != nil { + t.Fatalf("get key %s failed %v", key, err) + } + currVer := fmt.Sprintf("%v", currentVer) + checkRespKV(t, 1, key, currVer, resp.Kvs...) + d.SchemaSyncer().RemoveSelfVersionPath() + resp, err = cli.Get(goctx.Background(), key) + if err != nil { + t.Fatalf("get key %s failed %v", key, err) + } + if len(resp.Kvs) != 0 { + t.Fatalf("remove key %s failed %v", key, err) + } +} + +func isTimeoutError(err error) bool { + if terror.ErrorEqual(err, goctx.DeadlineExceeded) || grpc.Code(errors.Cause(err)) == codes.DeadlineExceeded || + terror.ErrorEqual(err, etcdserver.ErrTimeout) { + return true + } + return false +} + +func checkRespKV(t *testing.T, kvCount int, key, val string, + kvs ...*mvccpb.KeyValue) { + if len(kvs) != kvCount { + t.Fatalf("resp key %s kvs %v length is != %d", key, kvs, kvCount) + } + if kvCount == 0 { + return + } + + kv := kvs[0] + if string(kv.Key) != key { + t.Fatalf("key resp %s, exported %s", kv.Key, key) + } + if val != val { + t.Fatalf("val resp %s, exported %s", kv.Value, val) + } +} diff --git a/ddl/util/util.go b/ddl/util/util.go new file mode 100644 index 0000000..b588859 --- /dev/null +++ b/ddl/util/util.go @@ -0,0 +1,59 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "fmt" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/util/sqlexec" +) + +// LoadDDLReorgVars loads ddl reorg variable from mysql.global_variables. +func LoadDDLReorgVars(ctx sessionctx.Context) error { + return LoadGlobalVars(ctx, []string{variable.TiDBDDLReorgWorkerCount, variable.TiDBDDLReorgBatchSize}) +} + +// LoadDDLVars loads ddl variable from mysql.global_variables. +func LoadDDLVars(ctx sessionctx.Context) error { + return LoadGlobalVars(ctx, []string{variable.TiDBDDLErrorCountLimit}) +} + +const loadGlobalVarsSQL = "select HIGH_PRIORITY variable_name, variable_value from mysql.global_variables where variable_name in (%s)" + +// LoadGlobalVars loads global variable from mysql.global_variables. +func LoadGlobalVars(ctx sessionctx.Context, varNames []string) error { + if sctx, ok := ctx.(sqlexec.RestrictedSQLExecutor); ok { + nameList := "" + for i, name := range varNames { + if i > 0 { + nameList += ", " + } + nameList += fmt.Sprintf("'%s'", name) + } + sql := fmt.Sprintf(loadGlobalVarsSQL, nameList) + rows, _, err := sctx.ExecRestrictedSQL(sql) + if err != nil { + return errors.Trace(err) + } + for _, row := range rows { + varName := row.GetString(0) + varValue := row.GetString(1) + variable.SetLocalSystemVar(varName, varValue) + } + } + return nil +} diff --git a/distsql/distsql.go b/distsql/distsql.go new file mode 100644 index 0000000..d571a5b --- /dev/null +++ b/distsql/distsql.go @@ -0,0 +1,60 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package distsql + +import ( + "context" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" +) + +// Select sends a DAG request, returns SelectResult. +// In kvReq, KeyRanges is required, Concurrency/KeepOrder/Desc/IsolationLevel/Priority are optional. +func Select(ctx context.Context, sctx sessionctx.Context, kvReq *kv.Request, fieldTypes []*types.FieldType) (SelectResult, error) { + // For testing purpose. + if hook := ctx.Value("CheckSelectRequestHook"); hook != nil { + hook.(func(*kv.Request))(kvReq) + } + + resp := sctx.GetClient().Send(ctx, kvReq, sctx.GetSessionVars().KVVars) + if resp == nil { + err := errors.New("client returns nil response") + return nil, err + } + + return &selectResult{ + label: "dag", + resp: resp, + rowLen: len(fieldTypes), + fieldTypes: fieldTypes, + ctx: sctx, + }, nil +} + +// Analyze do a analyze request. +func Analyze(ctx context.Context, client kv.Client, kvReq *kv.Request, vars *kv.Variables) (SelectResult, error) { + resp := client.Send(ctx, kvReq, vars) + if resp == nil { + return nil, errors.New("client returns nil response") + } + + result := &selectResult{ + label: "analyze", + resp: resp, + } + return result, nil +} diff --git a/distsql/distsql_test.go b/distsql/distsql_test.go new file mode 100644 index 0000000..981df11 --- /dev/null +++ b/distsql/distsql_test.go @@ -0,0 +1,180 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package distsql + +import ( + "context" + "sync" + "time" + + "github.com/cznic/mathutil" + . "github.com/pingcap/check" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/charset" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/codec" + "github.com/pingcap/tipb/go-tipb" +) + +func (s *testSuite) createSelectNormal(batch, totalRows int, c *C) (*selectResult, []*types.FieldType) { + request, err := (&RequestBuilder{}).SetKeyRanges(nil). + SetDAGRequest(&tipb.DAGRequest{}). + SetDesc(false). + SetKeepOrder(false). + SetFromSessionVars(variable.NewSessionVars()). + Build() + c.Assert(err, IsNil) + + /// 4 int64 types. + colTypes := []*types.FieldType{ + { + Tp: mysql.TypeLonglong, + Flen: mysql.MaxIntWidth, + Decimal: 0, + Flag: mysql.BinaryFlag, + Charset: charset.CharsetBin, + Collate: charset.CollationBin, + }, + } + colTypes = append(colTypes, colTypes[0]) + colTypes = append(colTypes, colTypes[0]) + colTypes = append(colTypes, colTypes[0]) + + // Test Next. + var response SelectResult + response, err = Select(context.TODO(), s.sctx, request, colTypes) + + c.Assert(err, IsNil) + result, ok := response.(*selectResult) + c.Assert(ok, IsTrue) + c.Assert(result.label, Equals, "dag") + c.Assert(result.rowLen, Equals, len(colTypes)) + + resp, ok := result.resp.(*mockResponse) + c.Assert(ok, IsTrue) + resp.total = totalRows + resp.batch = batch + + return result, colTypes +} + +func (s *testSuite) TestSelectNormal(c *C) { + response, colTypes := s.createSelectNormal(1, 2, c) + + // Test Next. + chk := chunk.New(colTypes, 32, 32) + numAllRows := 0 + for { + err := response.Next(context.TODO(), chk) + c.Assert(err, IsNil) + numAllRows += chk.NumRows() + if chk.NumRows() == 0 { + break + } + } + c.Assert(numAllRows, Equals, 2) + err := response.Close() + c.Assert(err, IsNil) +} + +func (s *testSuite) TestAnalyze(c *C) { + request, err := (&RequestBuilder{}).SetKeyRanges(nil). + SetAnalyzeRequest(&tipb.AnalyzeReq{}). + SetKeepOrder(true). + Build() + c.Assert(err, IsNil) + + response, err := Analyze(context.TODO(), s.sctx.GetClient(), request, kv.DefaultVars) + c.Assert(err, IsNil) + + result, ok := response.(*selectResult) + c.Assert(ok, IsTrue) + c.Assert(result.label, Equals, "analyze") + + bytes, err := response.NextRaw(context.TODO()) + c.Assert(err, IsNil) + c.Assert(len(bytes), Equals, 16) + + err = response.Close() + c.Assert(err, IsNil) +} + +// mockResponse implements kv.Response interface. +// Used only for test. +type mockResponse struct { + count int + total int + batch int + ctx sessionctx.Context + sync.Mutex +} + +// Close implements kv.Response interface. +func (resp *mockResponse) Close() error { + resp.Lock() + defer resp.Unlock() + + resp.count = 0 + return nil +} + +// Next implements kv.Response interface. +func (resp *mockResponse) Next(ctx context.Context) (kv.ResultSubset, error) { + resp.Lock() + defer resp.Unlock() + + if resp.count >= resp.total { + return nil, nil + } + numRows := mathutil.Min(resp.batch, resp.total-resp.count) + resp.count += numRows + + var chunks []tipb.Chunk + datum := types.NewIntDatum(1) + bytes := make([]byte, 0, 100) + bytes, _ = codec.EncodeValue(nil, bytes, datum, datum, datum, datum) + chunks = make([]tipb.Chunk, numRows) + for i := range chunks { + chkData := make([]byte, len(bytes)) + copy(chkData, bytes) + chunks[i] = tipb.Chunk{RowsData: chkData} + } + + respPB := &tipb.SelectResponse{ + Chunks: chunks, + OutputCounts: []int64{1}, + } + respBytes, err := respPB.Marshal() + if err != nil { + panic(err) + } + return &mockResultSubset{respBytes}, nil +} + +// mockResultSubset implements kv.ResultSubset interface. +// Used only for test. +type mockResultSubset struct{ data []byte } + +// GetData implements kv.ResultSubset interface. +func (r *mockResultSubset) GetData() []byte { return r.data } + +// MemSize implements kv.ResultSubset interface. +func (r *mockResultSubset) MemSize() int64 { return int64(cap(r.data)) } + +// RespTime implements kv.ResultSubset interface. +func (r *mockResultSubset) RespTime() time.Duration { return 0 } diff --git a/distsql/request_builder.go b/distsql/request_builder.go new file mode 100644 index 0000000..5270f70 --- /dev/null +++ b/distsql/request_builder.go @@ -0,0 +1,227 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package distsql + +import ( + "math" + + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/util/codec" + "github.com/pingcap/tidb/util/ranger" + "github.com/pingcap/tipb/go-tipb" +) + +// RequestBuilder is used to build a "kv.Request". +// It is called before we issue a kv request by "Select". +type RequestBuilder struct { + kv.Request + err error +} + +// Build builds a "kv.Request". +func (builder *RequestBuilder) Build() (*kv.Request, error) { + return &builder.Request, builder.err +} + +// SetTableRanges sets "KeyRanges" for "kv.Request" by converting "tableRanges" +// to "KeyRanges" firstly. +func (builder *RequestBuilder) SetTableRanges(tid int64, tableRanges []*ranger.Range) *RequestBuilder { + if builder.err == nil { + builder.Request.KeyRanges = TableRangesToKVRanges(tid, tableRanges) + } + return builder +} + +// SetIndexRanges sets "KeyRanges" for "kv.Request" by converting index range +// "ranges" to "KeyRanges" firstly. +func (builder *RequestBuilder) SetIndexRanges(sc *stmtctx.StatementContext, tid, idxID int64, ranges []*ranger.Range) *RequestBuilder { + if builder.err == nil { + builder.Request.KeyRanges, builder.err = IndexRangesToKVRanges(sc, tid, idxID, ranges) + } + return builder +} + +// SetTableHandles sets "KeyRanges" for "kv.Request" by converting table handles +// "handles" to "KeyRanges" firstly. +func (builder *RequestBuilder) SetTableHandles(tid int64, handles []int64) *RequestBuilder { + builder.Request.KeyRanges = TableHandlesToKVRanges(tid, handles) + return builder +} + +// SetDAGRequest sets the request type to "ReqTypeDAG" and construct request data. +func (builder *RequestBuilder) SetDAGRequest(dag *tipb.DAGRequest) *RequestBuilder { + if builder.err == nil { + builder.Request.Tp = kv.ReqTypeDAG + builder.Request.Data, builder.err = dag.Marshal() + } + + return builder +} + +// SetAnalyzeRequest sets the request type to "ReqTypeAnalyze" and cosntruct request data. +func (builder *RequestBuilder) SetAnalyzeRequest(ana *tipb.AnalyzeReq) *RequestBuilder { + if builder.err == nil { + builder.Request.Tp = kv.ReqTypeAnalyze + builder.Request.Data, builder.err = ana.Marshal() + builder.Request.NotFillCache = true + builder.Request.IsolationLevel = kv.RC + } + + return builder +} + +// SetKeyRanges sets "KeyRanges" for "kv.Request". +func (builder *RequestBuilder) SetKeyRanges(keyRanges []kv.KeyRange) *RequestBuilder { + builder.Request.KeyRanges = keyRanges + return builder +} + +// SetStartTS sets "StartTS" for "kv.Request". +func (builder *RequestBuilder) SetStartTS(startTS uint64) *RequestBuilder { + builder.Request.StartTs = startTS + return builder +} + +// SetDesc sets "Desc" for "kv.Request". +func (builder *RequestBuilder) SetDesc(desc bool) *RequestBuilder { + builder.Request.Desc = desc + return builder +} + +// SetKeepOrder sets "KeepOrder" for "kv.Request". +func (builder *RequestBuilder) SetKeepOrder(order bool) *RequestBuilder { + builder.Request.KeepOrder = order + return builder +} + +func (builder *RequestBuilder) getIsolationLevel() kv.IsoLevel { + switch builder.Tp { + case kv.ReqTypeAnalyze: + return kv.RC + } + return kv.SI +} + +// SetFromSessionVars sets the following fields for "kv.Request" from session variables: +// "Concurrency", "IsolationLevel", "NotFillCache", "ReplicaRead". +func (builder *RequestBuilder) SetFromSessionVars(sv *variable.SessionVars) *RequestBuilder { + builder.Request.Concurrency = sv.DistSQLScanConcurrency + builder.Request.IsolationLevel = builder.getIsolationLevel() + builder.Request.NotFillCache = sv.StmtCtx.NotFillCache + builder.Request.ReplicaRead = sv.GetReplicaRead() + return builder +} + +// SetConcurrency sets "Concurrency" for "kv.Request". +func (builder *RequestBuilder) SetConcurrency(concurrency int) *RequestBuilder { + builder.Request.Concurrency = concurrency + return builder +} + +// TableRangesToKVRanges converts table ranges to "KeyRange". +func TableRangesToKVRanges(tid int64, ranges []*ranger.Range) []kv.KeyRange { + krs := make([]kv.KeyRange, 0, len(ranges)) + for _, ran := range ranges { + low, high := encodeHandleKey(ran) + startKey := tablecodec.EncodeRowKey(tid, low) + endKey := tablecodec.EncodeRowKey(tid, high) + krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey}) + } + return krs +} + +func encodeHandleKey(ran *ranger.Range) ([]byte, []byte) { + low := codec.EncodeInt(nil, ran.LowVal[0].GetInt64()) + high := codec.EncodeInt(nil, ran.HighVal[0].GetInt64()) + if ran.LowExclude { + low = []byte(kv.Key(low).PrefixNext()) + } + if !ran.HighExclude { + high = []byte(kv.Key(high).PrefixNext()) + } + return low, high +} + +// TableHandlesToKVRanges converts sorted handle to kv ranges. +// For continuous handles, we should merge them to a single key range. +func TableHandlesToKVRanges(tid int64, handles []int64) []kv.KeyRange { + krs := make([]kv.KeyRange, 0, len(handles)) + i := 0 + for i < len(handles) { + j := i + 1 + for ; j < len(handles) && handles[j-1] != math.MaxInt64; j++ { + if handles[j] != handles[j-1]+1 { + break + } + } + low := codec.EncodeInt(nil, handles[i]) + high := codec.EncodeInt(nil, handles[j-1]) + high = []byte(kv.Key(high).PrefixNext()) + startKey := tablecodec.EncodeRowKey(tid, low) + endKey := tablecodec.EncodeRowKey(tid, high) + krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey}) + i = j + } + return krs +} + +// IndexRangesToKVRanges converts index ranges to "KeyRange". +func IndexRangesToKVRanges(sc *stmtctx.StatementContext, tid, idxID int64, ranges []*ranger.Range) ([]kv.KeyRange, error) { + krs := make([]kv.KeyRange, 0, len(ranges)) + for _, ran := range ranges { + low, high, err := encodeIndexKey(sc, ran) + if err != nil { + return nil, err + } + startKey := tablecodec.EncodeIndexSeekKey(tid, idxID, low) + endKey := tablecodec.EncodeIndexSeekKey(tid, idxID, high) + krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey}) + } + return krs, nil +} + +func encodeIndexKey(sc *stmtctx.StatementContext, ran *ranger.Range) ([]byte, []byte, error) { + low, err := codec.EncodeKey(sc, nil, ran.LowVal...) + if err != nil { + return nil, nil, err + } + if ran.LowExclude { + low = []byte(kv.Key(low).PrefixNext()) + } + high, err := codec.EncodeKey(sc, nil, ran.HighVal...) + if err != nil { + return nil, nil, err + } + + if !ran.HighExclude { + high = []byte(kv.Key(high).PrefixNext()) + } + + var hasNull bool + for _, highVal := range ran.HighVal { + if highVal.IsNull() { + hasNull = true + break + } + } + + if hasNull { + // Append 0 to make unique-key range [null, null] to be a scan rather than point-get. + high = []byte(kv.Key(high).Next()) + } + return low, high, nil +} diff --git a/distsql/request_builder_test.go b/distsql/request_builder_test.go new file mode 100644 index 0000000..6168921 --- /dev/null +++ b/distsql/request_builder_test.go @@ -0,0 +1,540 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package distsql + +import ( + "os" + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/codec" + "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/mock" + "github.com/pingcap/tidb/util/ranger" + "github.com/pingcap/tidb/util/testleak" + "github.com/pingcap/tipb/go-tipb" +) + +var _ = Suite(&testSuite{}) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + logLevel := os.Getenv("log_level") + logutil.InitLogger(logutil.NewLogConfig(logLevel, logutil.DefaultLogFormat, logutil.EmptyFileLogConfig, false)) + TestingT(t) +} + +var _ = Suite(&testSuite{}) + +type testSuite struct { + sctx sessionctx.Context +} + +func (s *testSuite) SetUpSuite(c *C) { + ctx := mock.NewContext() + ctx.GetSessionVars().StmtCtx = &stmtctx.StatementContext{} + ctx.Store = &mock.Store{ + Client: &mock.Client{ + MockResponse: &mockResponse{ + ctx: ctx, + batch: 1, + total: 2, + }, + }, + } + s.sctx = ctx +} + +func (s *testSuite) TearDownSuite(c *C) { +} + +func (s *testSuite) SetUpTest(c *C) { + testleak.BeforeTest() + ctx := s.sctx.(*mock.Context) + store := ctx.Store.(*mock.Store) + store.Client = &mock.Client{ + MockResponse: &mockResponse{ + ctx: ctx, + batch: 1, + total: 2, + }, + } +} + +func (s *testSuite) TearDownTest(c *C) { + testleak.AfterTest(c)() +} + +type handleRange struct { + start int64 + end int64 +} + +func (s *testSuite) getExpectedRanges(tid int64, hrs []*handleRange) []kv.KeyRange { + krs := make([]kv.KeyRange, 0, len(hrs)) + for _, hr := range hrs { + low := codec.EncodeInt(nil, hr.start) + high := codec.EncodeInt(nil, hr.end) + high = []byte(kv.Key(high).PrefixNext()) + startKey := tablecodec.EncodeRowKey(tid, low) + endKey := tablecodec.EncodeRowKey(tid, high) + krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey}) + } + return krs +} + +func (s *testSuite) TestTableHandlesToKVRanges(c *C) { + handles := []int64{0, 2, 3, 4, 5, 10, 11, 100, 9223372036854775806, 9223372036854775807} + + // Build expected key ranges. + hrs := make([]*handleRange, 0, len(handles)) + hrs = append(hrs, &handleRange{start: 0, end: 0}) + hrs = append(hrs, &handleRange{start: 2, end: 5}) + hrs = append(hrs, &handleRange{start: 10, end: 11}) + hrs = append(hrs, &handleRange{start: 100, end: 100}) + hrs = append(hrs, &handleRange{start: 9223372036854775806, end: 9223372036854775807}) + + // Build key ranges. + expect := s.getExpectedRanges(1, hrs) + actual := TableHandlesToKVRanges(1, handles) + + // Compare key ranges and expected key ranges. + c.Assert(len(actual), Equals, len(expect)) + for i := range actual { + c.Assert(actual[i].StartKey, DeepEquals, expect[i].StartKey) + c.Assert(actual[i].EndKey, DeepEquals, expect[i].EndKey) + } +} + +func (s *testSuite) TestTableRangesToKVRanges(c *C) { + ranges := []*ranger.Range{ + { + LowVal: []types.Datum{types.NewIntDatum(1)}, + HighVal: []types.Datum{types.NewIntDatum(2)}, + }, + { + LowVal: []types.Datum{types.NewIntDatum(2)}, + HighVal: []types.Datum{types.NewIntDatum(4)}, + LowExclude: true, + HighExclude: true, + }, + { + LowVal: []types.Datum{types.NewIntDatum(4)}, + HighVal: []types.Datum{types.NewIntDatum(19)}, + HighExclude: true, + }, + { + LowVal: []types.Datum{types.NewIntDatum(19)}, + HighVal: []types.Datum{types.NewIntDatum(32)}, + LowExclude: true, + }, + { + LowVal: []types.Datum{types.NewIntDatum(34)}, + HighVal: []types.Datum{types.NewIntDatum(34)}, + LowExclude: true, + }, + } + + actual := TableRangesToKVRanges(13, ranges) + expect := []kv.KeyRange{ + { + StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, + EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3}, + }, + { + StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3}, + EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4}, + }, + { + StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4}, + EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x13}, + }, + { + StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14}, + EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x21}, + }, + { + StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23}, + EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23}, + }, + } + for i := 0; i < len(actual); i++ { + c.Assert(actual[i], DeepEquals, expect[i]) + } +} + +func (s *testSuite) TestIndexRangesToKVRanges(c *C) { + ranges := []*ranger.Range{ + { + LowVal: []types.Datum{types.NewIntDatum(1)}, + HighVal: []types.Datum{types.NewIntDatum(2)}, + }, + { + LowVal: []types.Datum{types.NewIntDatum(2)}, + HighVal: []types.Datum{types.NewIntDatum(4)}, + LowExclude: true, + HighExclude: true, + }, + { + LowVal: []types.Datum{types.NewIntDatum(4)}, + HighVal: []types.Datum{types.NewIntDatum(19)}, + HighExclude: true, + }, + { + LowVal: []types.Datum{types.NewIntDatum(19)}, + HighVal: []types.Datum{types.NewIntDatum(32)}, + LowExclude: true, + }, + { + LowVal: []types.Datum{types.NewIntDatum(34)}, + HighVal: []types.Datum{types.NewIntDatum(34)}, + LowExclude: true, + }, + } + + expect := []kv.KeyRange{ + { + StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, + EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3}, + }, + { + StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3}, + EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4}, + }, + { + StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4}, + EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x13}, + }, + { + StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14}, + EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x21}, + }, + { + StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23}, + EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23}, + }, + } + + actual, err := IndexRangesToKVRanges(new(stmtctx.StatementContext), 12, 15, ranges) + c.Assert(err, IsNil) + for i := range actual { + c.Assert(actual[i], DeepEquals, expect[i]) + } +} + +func (s *testSuite) TestRequestBuilder1(c *C) { + ranges := []*ranger.Range{ + { + LowVal: []types.Datum{types.NewIntDatum(1)}, + HighVal: []types.Datum{types.NewIntDatum(2)}, + }, + { + LowVal: []types.Datum{types.NewIntDatum(2)}, + HighVal: []types.Datum{types.NewIntDatum(4)}, + LowExclude: true, + HighExclude: true, + }, + { + LowVal: []types.Datum{types.NewIntDatum(4)}, + HighVal: []types.Datum{types.NewIntDatum(19)}, + HighExclude: true, + }, + { + LowVal: []types.Datum{types.NewIntDatum(19)}, + HighVal: []types.Datum{types.NewIntDatum(32)}, + LowExclude: true, + }, + { + LowVal: []types.Datum{types.NewIntDatum(34)}, + HighVal: []types.Datum{types.NewIntDatum(34)}, + LowExclude: true, + }, + } + + actual, err := (&RequestBuilder{}).SetTableRanges(12, ranges). + SetDAGRequest(&tipb.DAGRequest{}). + SetDesc(false). + SetKeepOrder(false). + SetFromSessionVars(variable.NewSessionVars()). + Build() + c.Assert(err, IsNil) + expect := &kv.Request{ + Tp: 103, + StartTs: 0x0, + Data: []uint8{0x18, 0x0, 0x20, 0x0, 0x40, 0x0, 0x5a, 0x0}, + KeyRanges: []kv.KeyRange{ + { + StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, + EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3}, + }, + { + StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3}, + EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4}, + }, + { + StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4}, + EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x13}, + }, + { + StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14}, + EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x21}, + }, + { + StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23}, + EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23}, + }, + }, + KeepOrder: false, + Desc: false, + Concurrency: 15, + IsolationLevel: 0, + NotFillCache: false, + SyncLog: false, + ReplicaRead: kv.ReplicaReadLeader, + } + c.Assert(actual, DeepEquals, expect) +} + +func (s *testSuite) TestRequestBuilder2(c *C) { + ranges := []*ranger.Range{ + { + LowVal: []types.Datum{types.NewIntDatum(1)}, + HighVal: []types.Datum{types.NewIntDatum(2)}, + }, + { + LowVal: []types.Datum{types.NewIntDatum(2)}, + HighVal: []types.Datum{types.NewIntDatum(4)}, + LowExclude: true, + HighExclude: true, + }, + { + LowVal: []types.Datum{types.NewIntDatum(4)}, + HighVal: []types.Datum{types.NewIntDatum(19)}, + HighExclude: true, + }, + { + LowVal: []types.Datum{types.NewIntDatum(19)}, + HighVal: []types.Datum{types.NewIntDatum(32)}, + LowExclude: true, + }, + { + LowVal: []types.Datum{types.NewIntDatum(34)}, + HighVal: []types.Datum{types.NewIntDatum(34)}, + LowExclude: true, + }, + } + + actual, err := (&RequestBuilder{}).SetIndexRanges(new(stmtctx.StatementContext), 12, 15, ranges). + SetDAGRequest(&tipb.DAGRequest{}). + SetDesc(false). + SetKeepOrder(false). + SetFromSessionVars(variable.NewSessionVars()). + Build() + c.Assert(err, IsNil) + expect := &kv.Request{ + Tp: 103, + StartTs: 0x0, + Data: []uint8{0x18, 0x0, 0x20, 0x0, 0x40, 0x0, 0x5a, 0x0}, + KeyRanges: []kv.KeyRange{ + { + StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, + EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3}, + }, + { + StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3}, + EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4}, + }, + { + StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4}, + EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x13}, + }, + { + StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14}, + EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x21}, + }, + { + StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23}, + EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23}, + }, + }, + KeepOrder: false, + Desc: false, + Concurrency: 15, + IsolationLevel: 0, + NotFillCache: false, + SyncLog: false, + ReplicaRead: kv.ReplicaReadLeader, + } + c.Assert(actual, DeepEquals, expect) +} + +func (s *testSuite) TestRequestBuilder3(c *C) { + handles := []int64{0, 2, 3, 4, 5, 10, 11, 100} + + actual, err := (&RequestBuilder{}).SetTableHandles(15, handles). + SetDAGRequest(&tipb.DAGRequest{}). + SetDesc(false). + SetKeepOrder(false). + SetFromSessionVars(variable.NewSessionVars()). + Build() + c.Assert(err, IsNil) + expect := &kv.Request{ + Tp: 103, + StartTs: 0x0, + Data: []uint8{0x18, 0x0, 0x20, 0x0, 0x40, 0x0, 0x5a, 0x0}, + KeyRanges: []kv.KeyRange{ + { + StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, + }, + { + StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, + EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x6}, + }, + { + StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa}, + EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc}, + }, + { + StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x64}, + EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x65}, + }, + }, + KeepOrder: false, + Desc: false, + Concurrency: 15, + IsolationLevel: 0, + NotFillCache: false, + SyncLog: false, + ReplicaRead: kv.ReplicaReadLeader, + } + c.Assert(actual, DeepEquals, expect) +} + +func (s *testSuite) TestRequestBuilder4(c *C) { + keyRanges := []kv.KeyRange{ + { + StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, + }, + { + StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, + EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x6}, + }, + { + StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa}, + EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc}, + }, + { + StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x64}, + EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x65}, + }, + } + + actual, err := (&RequestBuilder{}).SetKeyRanges(keyRanges). + SetDAGRequest(&tipb.DAGRequest{}). + SetDesc(false). + SetKeepOrder(false). + SetFromSessionVars(variable.NewSessionVars()). + Build() + c.Assert(err, IsNil) + expect := &kv.Request{ + Tp: 103, + StartTs: 0x0, + Data: []uint8{0x18, 0x0, 0x20, 0x0, 0x40, 0x0, 0x5a, 0x0}, + KeyRanges: keyRanges, + KeepOrder: false, + Desc: false, + Concurrency: 15, + IsolationLevel: 0, + NotFillCache: false, + SyncLog: false, + ReplicaRead: kv.ReplicaReadLeader, + } + c.Assert(actual, DeepEquals, expect) +} + +func (s *testSuite) TestRequestBuilder5(c *C) { + keyRanges := []kv.KeyRange{ + { + StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, + }, + { + StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, + EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x6}, + }, + { + StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa}, + EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc}, + }, + { + StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x64}, + EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x65}, + }, + } + + actual, err := (&RequestBuilder{}).SetKeyRanges(keyRanges). + SetAnalyzeRequest(&tipb.AnalyzeReq{}). + SetKeepOrder(true). + SetConcurrency(15). + Build() + c.Assert(err, IsNil) + expect := &kv.Request{ + Tp: 104, + StartTs: 0x0, + Data: []uint8{0x8, 0x0, 0x18, 0x0, 0x20, 0x0}, + KeyRanges: keyRanges, + KeepOrder: true, + Desc: false, + Concurrency: 15, + IsolationLevel: kv.RC, + NotFillCache: true, + SyncLog: false, + } + c.Assert(actual, DeepEquals, expect) +} + +func (s *testSuite) TestRequestBuilder7(c *C) { + vars := variable.NewSessionVars() + vars.SetReplicaRead(kv.ReplicaReadFollower) + + concurrency := 10 + + actual, err := (&RequestBuilder{}). + SetFromSessionVars(vars). + SetConcurrency(concurrency). + Build() + c.Assert(err, IsNil) + + expect := &kv.Request{ + Tp: 0, + StartTs: 0x0, + KeepOrder: false, + Desc: false, + Concurrency: concurrency, + IsolationLevel: 0, + NotFillCache: false, + SyncLog: false, + ReplicaRead: kv.ReplicaReadFollower, + } + + c.Assert(actual, DeepEquals, expect) +} diff --git a/distsql/select_result.go b/distsql/select_result.go new file mode 100644 index 0000000..c547e17 --- /dev/null +++ b/distsql/select_result.go @@ -0,0 +1,159 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package distsql + +import ( + "context" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/codec" + "github.com/pingcap/tipb/go-tipb" +) + +var _ SelectResult = (*selectResult)(nil) + +// SelectResult is an iterator of coprocessor partial results. +type SelectResult interface { + // NextRaw gets the next raw result. + NextRaw(context.Context) ([]byte, error) + // Next reads the data into chunk. + Next(context.Context, *chunk.Chunk) error + // Close closes the iterator. + Close() error +} + +type selectResult struct { + label string + resp kv.Response + + rowLen int + fieldTypes []*types.FieldType + ctx sessionctx.Context + + selectResp *tipb.SelectResponse + selectRespSize int // record the selectResp.Size() when it is initialized. + respChkIdx int + + partialCount int64 // number of partial results. + + fetchDuration time.Duration + durationReported bool +} + +func (r *selectResult) fetchResp(ctx context.Context) error { + for { + r.respChkIdx = 0 + startTime := time.Now() + resultSubset, err := r.resp.Next(ctx) + duration := time.Since(startTime) + r.fetchDuration += duration + if err != nil { + return errors.Trace(err) + } + if resultSubset == nil { + r.selectResp = nil + if !r.durationReported { + // final round of fetch + // TODO: Add a label to distinguish between success or failure. + // https://github.com/pingcap/tidb/issues/11397 + + r.durationReported = true + } + return nil + } + r.selectResp = new(tipb.SelectResponse) + err = r.selectResp.Unmarshal(resultSubset.GetData()) + if err != nil { + return errors.Trace(err) + } + r.selectRespSize = r.selectResp.Size() + if err := r.selectResp.Error; err != nil { + return terror.ClassTiKV.New(terror.ErrCode(err.Code), err.Msg) + } + sc := r.ctx.GetSessionVars().StmtCtx + for _, warning := range r.selectResp.Warnings { + sc.AppendWarning(terror.ClassTiKV.New(terror.ErrCode(warning.Code), warning.Msg)) + } + r.partialCount++ + if len(r.selectResp.Chunks) != 0 { + break + } + } + return nil +} + +func (r *selectResult) Next(ctx context.Context, chk *chunk.Chunk) error { + chk.Reset() + if r.selectResp == nil || r.respChkIdx == len(r.selectResp.Chunks) { + err := r.fetchResp(ctx) + if err != nil { + return err + } + if r.selectResp == nil { + return nil + } + } + for !chk.IsFull() { + if r.respChkIdx == len(r.selectResp.Chunks) { + err := r.fetchResp(ctx) + if err != nil || r.selectResp == nil { + return err + } + } + err := r.readRowsData(chk) + if err != nil { + return err + } + if len(r.selectResp.Chunks[r.respChkIdx].RowsData) == 0 { + r.respChkIdx++ + } + } + return nil +} + +// NextRaw returns the next raw partial result. +func (r *selectResult) NextRaw(ctx context.Context) (data []byte, err error) { + resultSubset, err := r.resp.Next(ctx) + r.partialCount++ + if resultSubset != nil && err == nil { + data = resultSubset.GetData() + } + return data, err +} + +func (r *selectResult) readRowsData(chk *chunk.Chunk) (err error) { + rowsData := r.selectResp.Chunks[r.respChkIdx].RowsData + decoder := codec.NewDecoder(chk, r.ctx.GetSessionVars().Location()) + for !chk.IsFull() && len(rowsData) > 0 { + for i := 0; i < r.rowLen; i++ { + rowsData, err = decoder.DecodeOne(rowsData, i, r.fieldTypes[i]) + if err != nil { + return err + } + } + } + r.selectResp.Chunks[r.respChkIdx].RowsData = rowsData + return nil +} + +// Close closes selectResult. +func (r *selectResult) Close() error { + return r.resp.Close() +} diff --git a/docs/MAINTAINERS.md b/docs/MAINTAINERS.md new file mode 100644 index 0000000..29021c7 --- /dev/null +++ b/docs/MAINTAINERS.md @@ -0,0 +1,8 @@ +## Maintainers + +- [dongxu](https://github.com/c4pt0r) +- [Ewan Chou](https://github.com/coocood) +- [goroutine](https://github.com/ngaut) +- [qiuyesuifeng](https://github.com/qiuyesuifeng) +- [Shen Li](https://github.com/shenli) +- [siddontang](https://github.com/siddontang) diff --git a/docs/QUICKSTART.md b/docs/QUICKSTART.md new file mode 100644 index 0000000..1ab4328 --- /dev/null +++ b/docs/QUICKSTART.md @@ -0,0 +1,58 @@ +# Quick Start + +#### Run TiDB with Docker (Standalone mode) + +You can quickly test TiDB with Docker, the source repository contains the Dockerfile which contains local tidb-server. + +To install Docker on your system, you can read the document on https://docs.docker.com/ + +``` +docker pull pingcap/tidb:latest +docker run --name tidb-server -d -p 4000:4000 pingcap/tidb:latest +``` + +`docker pull` may take a while to download images ~560M. + +Then you can use official mysql client to connect to TiDB. + +``` +mysql -h 127.0.0.1 -P 4000 -u root -D test --prompt="tidb> " +``` + +Notice: OS X user may use `docker-machine ip` to connect it. + +#### __Or run TiDB on TiKV cluster__ + +Read the documents for [Ansible deployment](https://github.com/pingcap/docs/blob/master/op-guide/ansible-deployment.md) or [docker deployment](https://github.com/pingcap/docs/blob/master/op-guide/docker-deployment.md). + +#### __Pre-requirement__ + +Go environment. Currently a 64-bit version of go >= 1.9 is required. +``` +git clone https://github.com/pingcap/tidb.git $GOPATH/src/github.com/pingcap/tidb +cd $GOPATH/src/github.com/pingcap/tidb +make +``` + +#### __Run as MySQL protocol server__ + +``` +make +cd bin && ./tidb-server +``` + +In case you want to compile a specific location: + +``` +make server TARGET=$GOPATH/bin/tidb-server +``` + +The default server port is `4000` and can be changed by flag `-P `. + +Run `./tidb-server -h` to see more flag options. + +After you started tidb-server, you can use official `mysql` client to connect to TiDB. + +``` +mysql -h 127.0.0.1 -P 4000 -u root -D test --prompt="tidb> " +``` diff --git a/docs/architecture.png b/docs/architecture.png new file mode 100644 index 0000000..51d4f57 Binary files /dev/null and b/docs/architecture.png differ diff --git a/docs/logo.png b/docs/logo.png new file mode 100644 index 0000000..762a135 Binary files /dev/null and b/docs/logo.png differ diff --git a/docs/logo_with_text.png b/docs/logo_with_text.png new file mode 100644 index 0000000..722bbf8 Binary files /dev/null and b/docs/logo_with_text.png differ diff --git a/docs/tidb_http_api.md b/docs/tidb_http_api.md new file mode 100644 index 0000000..be3b305 --- /dev/null +++ b/docs/tidb_http_api.md @@ -0,0 +1,441 @@ +# TiDB HTTP API + +`TiDBIP` is the ip of the TiDB server. `10080` is the default status port, and you can edit it in tidb.toml when starting the TiDB server. + +1. Get the current status of TiDB, including the connections, version and git_hash + + ```shell + curl http://{TiDBIP}:10080/status + ``` + + ```shell + $curl http://127.0.0.1:10080/status + { + "connections": 0, + "git_hash": "f572e33854e1c0f942f031e9656d0004f99995c6", + "version": "5.7.25-TiDB-v2.1.0-rc.3-355-gf572e3385-dirty" + } + ``` + + + + ```shell + curl http://{TiDBIP}:10080/metrics + ``` + +1. Get the metadata of all regions + + ```shell + curl http://{TiDBIP}:10080/regions/meta + ``` + + ```shell + $curl http://127.0.0.1:10080/regions/meta + [ + { + "leader": { + "id": 5, + "store_id": 1 + }, + "peers": [ + { + "id": 5, + "store_id": 1 + } + ], + "region_epoch": { + "conf_ver": 1, + "version": 2 + }, + "region_id": 4 + } + ] + ``` + +1. Get the table/index of hot regions + + ```shell + curl http://{TiDBIP}:10080/regions/hot + ``` + + ```shell + $curl http://127.0.0.1:10080/regions/hot + { + "read": [ + + ], + "write": [ + { + "db_name": "sbtest1", + "table_name": "sbtest13", + "index_name": "", + "flow_bytes": 220718, + "max_hot_degree": 12, + "region_count": 1 + } + ] + } + ``` + +1. Get the information of a specific region by ID + + ```shell + curl http://{TiDBIP}:10080/regions/{regionID} + ``` + + ```shell + $curl http://127.0.0.1:10080/regions/4001 + { + "end_key": "dIAAAAAAAAEk", + "frames": [ + { + "db_name": "test", + "is_record": true, + "table_id": 286, + "table_name": "t1" + } + ], + "region_id": 4001, + "start_key": "dIAAAAAAAAEe" + } + ``` + +1. Get regions Information from db.table + + ```shell + curl http://{TiDBIP}:10080/tables/{db}/{table}/regions + ``` + + ```shell + $curl http://127.0.0.1:10080/tables/test/t1/regions + { + "id": 286, + "indices": [], + "name": "t1", + "record_regions": [ + { + "leader": { + "id": 4002, + "store_id": 1 + }, + "peers": [ + { + "id": 4002, + "store_id": 1 + } + ], + "region_epoch": { + "conf_ver": 1, + "version": 83 + }, + "region_id": 4001 + } + ] + } + ``` + +1. Get schema Information about all db + + ```shell + curl http://{TiDBIP}:10080/schema + ``` + + ```shell + $curl http://127.0.0.1:10080/schema + [ + { + "charset": "utf8mb4", + "collate": "utf8mb4_bin", + "db_name": { + "L": "test", + "O": "test" + }, + "id": 266, + "state": 5 + }, + . + . + . + ] + ``` + +1. Get schema Information about db + + ```shell + curl http://{TiDBIP}:10080/schema/{db} + ``` + +1. Get schema Information about db.table, and you can get schema info by tableID (tableID is the **unique** identifier of table in TiDB) + + ```shell + curl http://{TiDBIP}:10080/schema/{db}/{table} + + curl http://{TiDBIP}:10080/schema?table_id={tableID} + ``` + +1. Get database information, table information and tidb info schema version by tableID. + + ```shell + curl http://{TiDBIP}:10080/db-table/{tableID} + ``` + +1. Get MVCC Information of the key with a specified handle ID + + ```shell + curl http://{TiDBIP}:10080/mvcc/key/{db}/{table}/{handle} + ``` + + ```shell + $curl http://127.0.0.1:10080/mvcc/key/test/t1/1 + { + "info": { + "writes": [ + { + "commit_ts": 405179368526053380, + "short_value": "CAICAkE=", + "start_ts": 405179368526053377 + } + ] + } + } + ``` + +1. Get MVCC Information of the first key in the table with a specified start ts + + ```shell + curl http://{TiDBIP}:10080/mvcc/txn/{startTS}/{db}/{table} + ``` + + ```shell + $curl http://127.0.0.1:10080/mvcc/txn/405179368526053377/test/t1 + { + "info": { + "writes": [ + { + "commit_ts": 405179368526053380, + "short_value": "CAICAkE=", + "start_ts": 405179368526053377 + } + ] + }, + "key": "dIAAAAAAAAEzX3KAAAAAAAAAAQ==" + } + ``` + +1. Get MVCC Information by a hex value + + ```shell + curl http://{TiDBIP}:10080/mvcc/hex/{hexKey} + ``` + +1. Get MVCC Information of a specified index key, argument example: column_name_1=column_value_1&column_name_2=column_value2... + + ```shell + curl http://{TiDBIP}:10080/mvcc/index/{db}/{table}/{index}/{handle}?${c1}={v1}&${c2}=${v2} + ``` + + *Hint: For the index column which column type is timezone dependent, e.g. `timestamp`, convert its value to UTC +timezone.* + + ```shell + $curl http://127.0.0.1:10080/mvcc/index/test/t1/idx/1\?a\=A + { + "info": { + "writes": [ + { + "commit_ts": 405179523374252037, + "short_value": "MA==", + "start_ts": 405179523374252036 + } + ] + } + } + ``` + +1. Scatter regions of the specified table, add a `scatter-range` scheduler for the PD and the range is same as the table range. + + ```shell + curl -X POST http://{TiDBIP}:10080/tables/{db}/{table}/scatter + ``` + + **Note**: The `scatter-range` scheduler may conflict with the global scheduler, do not use it for long periods on the larger table. + +1. Stop scatter the regions, disable the `scatter-range` scheduler for the specified table. + + ```shell + curl -X POST http://{TiDBIP}:10080/tables/{db}/{table}/stop-scatter + ``` + +1. Get TiDB server settings + + ```shell + curl http://{TiDBIP}:10080/settings + ``` + +1. Get TiDB server information. + + ```shell + curl http://{TiDBIP}:10080/info + ``` + + ```shell + $curl http://127.0.0.1:10080/info + { + "ddl_id": "f7e73ed5-63b4-4cb4-ba7c-42b32dc74e77", + "git_hash": "f572e33854e1c0f942f031e9656d0004f99995c6", + "ip": "", + "is_owner": true, + "lease": "45s", + "listening_port": 4000, + "status_port": 10080, + "version": "5.7.25-TiDB-v2.1.0-rc.3-355-gf572e3385-dirty" + } + ``` + +1. Get TiDB cluster all servers information. + + ```shell + curl http://{TiDBIP}:10080/info/all + ``` + + ```shell + $curl http://127.0.0.1:10080/info/all + { + "servers_num": 2, + "owner_id": "29a65ec0-d931-4f9e-a212-338eaeffab96", + "is_all_server_version_consistent": true, + "all_servers_info": { + "29a65ec0-d931-4f9e-a212-338eaeffab96": { + "version": "5.7.25-TiDB-v4.0.0-alpha-669-g8f2a09a52-dirty", + "git_hash": "8f2a09a52fdcaf9d9bfd775d2c6023f363dc121e", + "ddl_id": "29a65ec0-d931-4f9e-a212-338eaeffab96", + "ip": "", + "listening_port": 4000, + "status_port": 10080, + "lease": "45s", + "binlog_status": "Off" + }, + "cd13c9eb-c3ee-4887-af9b-e64f3162d92c": { + "version": "5.7.25-TiDB-v4.0.0-alpha-669-g8f2a09a52-dirty", + "git_hash": "8f2a09a52fdcaf9d9bfd775d2c6023f363dc121e", + "ddl_id": "cd13c9eb-c3ee-4887-af9b-e64f3162d92c", + "ip": "", + "listening_port": 4001, + "status_port": 10081, + "lease": "45s", + "binlog_status": "Off" + } + } + } + ``` + +1. Enable/Disable TiDB server general log + + ```shell + curl -X POST -d "tidb_general_log=1" http://{TiDBIP}:10080/settings + curl -X POST -d "tidb_general_log=0" http://{TiDBIP}:10080/settings + ``` + +1. Change TiDB server log level + + ```shell + curl -X POST -d "log_level=debug" http://{TiDBIP}:10080/settings + curl -X POST -d "log_level=info" http://{TiDBIP}:10080/settings + ``` + +1. Change TiDB DDL slow log threshold + + The unit is millisecond. + + ```shell + curl -X POST -d "ddl_slow_threshold=300" http://{TiDBIP}:10080/settings + ``` + +1. Get the column value by an encoded row and some information that can be obtained from a column of the table schema information. + + Argument example: rowBin=base64_encoded_row_value + + ```shell + curl http://{TiDBIP}:10080/tables/{colID}/{colFlag}/{colLen}?rowBin={val} + ``` + + *Hint: For the column which field type is timezone dependent, e.g. `timestamp`, convert its value to UTC timezone.* + +1. Resign the ddl owner, let tidb start a new ddl owner election. + + ```shell + curl -X POST http://{TiDBIP}:10080/ddl/owner/resign + ``` + +1. Get all TiDB DDL job history information. + + ```shell + curl http://{TiDBIP}:10080/ddl/history + ``` + +1. Get count {number} TiDB DDL job history information. + + ```shell + curl http://{TiDBIP}:10080/ddl/history?limit={number} + ``` + + **Note**: If you request a tidb that is not ddl owner, the response will be `This node is not a ddl owner, can't be resigned.` + +1. Download TiDB debug info + + ```shell + curl http://{TiDBIP}:10080/debug/zip?seconds=60 --output debug.zip + ``` + + zip file will include: + + - Go heap pprof(after GC) + - Go cpu pprof(10s) + - Go mutex pprof + - Full goroutine + - TiDB config and version + + Param: + + - seconds: profile time(s), default is 10s. + +1. Get statistics data of specified table. + + ```shell + curl http://{TiDBIP}:10080/stats/dump/{db}/{table} + ``` + +1. Get statistics data of specific table and timestamp. + + ```shell + curl http://{TiDBIP}:10080/stats/dump/{db}/{table}/{yyyyMMddHHmmss} + ``` + ```shell + curl http://{TiDBIP}:10080/stats/dump/{db}/{table}/{yyyy-MM-dd HH:mm:ss} + ``` + +1. Resume the binlog writing when Pump is recovered. + + ```shell + curl http://{TiDBIP}:10080/binlog/recover + ``` + + Return value: + + * timeout, return status code: 400, message: `timeout` + * If it returns normally, status code: 200, message example: + ```text + { +   "Skipped": false, +   "SkippedCommitterCounter": 0 + } + ``` + `Skipped`: false indicates that the current binlog is not in the skipped state, otherwise, it is in the skipped state + `SkippedCommitterCounter`: Represents how many transactions are currently being committed in the skipped state. By default, the API will return after waiting until all skipped-binlog transactions are committed. If this value is greater than 0, it means that you need to wait until them are committed . + + Param: + + * op=nowait: return after binlog status is recoverd, do not wait until the skipped-binlog transactions are committed. + * op=reset: reset `SkippedCommitterCounter` to 0 to avoid the problem that `SkippedCommitterCounter` is not cleared due to some unusual cases. + * op=status: Get the current status of binlog recovery. + * seconds={num}: Specify the interface request timeout time in seconds. If not specified, the default is 1800 seconds. diff --git a/domain/domain.go b/domain/domain.go new file mode 100644 index 0000000..d3c6ab5 --- /dev/null +++ b/domain/domain.go @@ -0,0 +1,698 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "context" + "os" + "sync" + "sync/atomic" + "time" + "unsafe" + + "github.com/ngaut/pools" + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/statistics" + "github.com/pingcap/tidb/store/tikv" + "github.com/pingcap/tidb/util" + "github.com/pingcap/tidb/util/logutil" + "go.etcd.io/etcd/clientv3" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" +) + +// Domain represents a storage space. Different domains can use the same database name. +// Multiple domains can be used in parallel without synchronization. +type Domain struct { + store kv.Storage + infoHandle *infoschema.Handle + statsHandle unsafe.Pointer + statsLease time.Duration + ddl ddl.DDL + m sync.Mutex + SchemaValidator SchemaValidator + sysSessionPool *sessionPool + exit chan struct{} + etcdClient *clientv3.Client + gvc GlobalVariableCache + wg sync.WaitGroup +} + +// loadInfoSchema loads infoschema at startTS into handle, usedSchemaVersion is the currently used +// infoschema version, if it is the same as the schema version at startTS, we don't need to reload again. +// It returns the latest schema version, the changed table IDs, whether it's a full load and an error. +func (do *Domain) loadInfoSchema(handle *infoschema.Handle, usedSchemaVersion int64, startTS uint64) (int64, []int64, bool, error) { + var fullLoad bool + snapshot, err := do.store.GetSnapshot(kv.NewVersion(startTS)) + if err != nil { + return 0, nil, fullLoad, err + } + m := meta.NewSnapshotMeta(snapshot) + neededSchemaVersion, err := m.GetSchemaVersion() + if err != nil { + return 0, nil, fullLoad, err + } + if usedSchemaVersion != 0 && usedSchemaVersion == neededSchemaVersion { + return neededSchemaVersion, nil, fullLoad, nil + } + + // Update self schema version to etcd. + defer func() { + // There are two possibilities for not updating the self schema version to etcd. + // 1. Failed to loading schema information. + // 2. When users use history read feature, the neededSchemaVersion isn't the latest schema version. + if err != nil || neededSchemaVersion < do.InfoSchema().SchemaMetaVersion() { + logutil.BgLogger().Info("do not update self schema version to etcd", + zap.Int64("usedSchemaVersion", usedSchemaVersion), + zap.Int64("neededSchemaVersion", neededSchemaVersion), zap.Error(err)) + return + } + + err = do.ddl.SchemaSyncer().UpdateSelfVersion(context.Background(), neededSchemaVersion) + if err != nil { + logutil.BgLogger().Info("update self version failed", + zap.Int64("usedSchemaVersion", usedSchemaVersion), + zap.Int64("neededSchemaVersion", neededSchemaVersion), zap.Error(err)) + } + }() + + startTime := time.Now() + ok, tblIDs, err := do.tryLoadSchemaDiffs(m, usedSchemaVersion, neededSchemaVersion) + if err != nil { + // We can fall back to full load, don't need to return the error. + logutil.BgLogger().Error("failed to load schema diff", zap.Error(err)) + } + if ok { + logutil.BgLogger().Info("diff load InfoSchema success", + zap.Int64("usedSchemaVersion", usedSchemaVersion), + zap.Int64("neededSchemaVersion", neededSchemaVersion), + zap.Duration("start time", time.Since(startTime)), + zap.Int64s("tblIDs", tblIDs)) + return neededSchemaVersion, tblIDs, fullLoad, nil + } + + fullLoad = true + schemas, err := do.fetchAllSchemasWithTables(m) + if err != nil { + return 0, nil, fullLoad, err + } + + newISBuilder, err := infoschema.NewBuilder(handle).InitWithDBInfos(schemas, neededSchemaVersion) + if err != nil { + return 0, nil, fullLoad, err + } + logutil.BgLogger().Info("full load InfoSchema success", + zap.Int64("usedSchemaVersion", usedSchemaVersion), + zap.Int64("neededSchemaVersion", neededSchemaVersion), + zap.Duration("start time", time.Since(startTime))) + newISBuilder.Build() + return neededSchemaVersion, nil, fullLoad, nil +} + +func (do *Domain) fetchAllSchemasWithTables(m *meta.Meta) ([]*model.DBInfo, error) { + allSchemas, err := m.ListDatabases() + if err != nil { + return nil, err + } + splittedSchemas := do.splitForConcurrentFetch(allSchemas) + doneCh := make(chan error, len(splittedSchemas)) + for _, schemas := range splittedSchemas { + go do.fetchSchemasWithTables(schemas, m, doneCh) + } + for range splittedSchemas { + err = <-doneCh + if err != nil { + return nil, err + } + } + return allSchemas, nil +} + +const fetchSchemaConcurrency = 8 + +func (do *Domain) splitForConcurrentFetch(schemas []*model.DBInfo) [][]*model.DBInfo { + groupSize := (len(schemas) + fetchSchemaConcurrency - 1) / fetchSchemaConcurrency + splitted := make([][]*model.DBInfo, 0, fetchSchemaConcurrency) + schemaCnt := len(schemas) + for i := 0; i < schemaCnt; i += groupSize { + end := i + groupSize + if end > schemaCnt { + end = schemaCnt + } + splitted = append(splitted, schemas[i:end]) + } + return splitted +} + +func (do *Domain) fetchSchemasWithTables(schemas []*model.DBInfo, m *meta.Meta, done chan error) { + for _, di := range schemas { + if di.State != model.StatePublic { + // schema is not public, can't be used outside. + continue + } + tables, err := m.ListTables(di.ID) + if err != nil { + done <- err + return + } + di.Tables = make([]*model.TableInfo, 0, len(tables)) + for _, tbl := range tables { + if tbl.State != model.StatePublic { + // schema is not public, can't be used outside. + continue + } + infoschema.ConvertCharsetCollateToLowerCaseIfNeed(tbl) + di.Tables = append(di.Tables, tbl) + } + } + done <- nil +} + +const ( + initialVersion = 0 + maxNumberOfDiffsToLoad = 100 +) + +func isTooOldSchema(usedVersion, newVersion int64) bool { + if usedVersion == initialVersion || newVersion-usedVersion > maxNumberOfDiffsToLoad { + return true + } + return false +} + +// tryLoadSchemaDiffs tries to only load latest schema changes. +// Return true if the schema is loaded successfully. +// Return false if the schema can not be loaded by schema diff, then we need to do full load. +// The second returned value is the delta updated table IDs. +func (do *Domain) tryLoadSchemaDiffs(m *meta.Meta, usedVersion, newVersion int64) (bool, []int64, error) { + // If there isn't any used version, or used version is too old, we do full load. + // And when users use history read feature, we will set usedVersion to initialVersion, then full load is needed. + if isTooOldSchema(usedVersion, newVersion) { + return false, nil, nil + } + var diffs []*model.SchemaDiff + for usedVersion < newVersion { + usedVersion++ + diff, err := m.GetSchemaDiff(usedVersion) + if err != nil { + return false, nil, err + } + if diff == nil { + // If diff is missing for any version between used and new version, we fall back to full reload. + return false, nil, nil + } + diffs = append(diffs, diff) + } + builder := infoschema.NewBuilder(do.infoHandle).InitWithOldInfoSchema() + tblIDs := make([]int64, 0, len(diffs)) + for _, diff := range diffs { + ids, err := builder.ApplyDiff(m, diff) + if err != nil { + return false, nil, err + } + tblIDs = append(tblIDs, ids...) + } + builder.Build() + return true, tblIDs, nil +} + +// InfoSchema gets information schema from domain. +func (do *Domain) InfoSchema() infoschema.InfoSchema { + return do.infoHandle.Get() +} + +// DDL gets DDL from domain. +func (do *Domain) DDL() ddl.DDL { + return do.ddl +} + +// Store gets KV store from domain. +func (do *Domain) Store() kv.Storage { + return do.store +} + +// GetScope gets the status variables scope. +func (do *Domain) GetScope(status string) variable.ScopeFlag { + // Now domain status variables scope are all default scope. + return variable.DefaultStatusVarScopeFlag +} + +// Reload reloads InfoSchema. +// It's public in order to do the test. +func (do *Domain) Reload() error { + failpoint.Inject("ErrorMockReloadFailed", func(val failpoint.Value) { + if val.(bool) { + failpoint.Return(errors.New("mock reload failed")) + } + }) + + // Lock here for only once at the same time. + do.m.Lock() + defer do.m.Unlock() + + startTime := time.Now() + + var err error + var neededSchemaVersion int64 + + ver, err := do.store.CurrentVersion() + if err != nil { + return err + } + + schemaVersion := int64(0) + oldInfoSchema := do.infoHandle.Get() + if oldInfoSchema != nil { + schemaVersion = oldInfoSchema.SchemaMetaVersion() + } + + var ( + fullLoad bool + changedTableIDs []int64 + ) + neededSchemaVersion, changedTableIDs, fullLoad, err = do.loadInfoSchema(do.infoHandle, schemaVersion, ver.Ver) + + if err != nil { + + return err + } + + if fullLoad { + logutil.BgLogger().Info("full load and reset schema validator") + do.SchemaValidator.Reset() + } + do.SchemaValidator.Update(ver.Ver, schemaVersion, neededSchemaVersion, changedTableIDs) + + lease := do.DDL().GetLease() + sub := time.Since(startTime) + // Reload interval is lease / 2, if load schema time elapses more than this interval, + // some query maybe responded by ErrInfoSchemaExpired error. + if sub > (lease/2) && lease > 0 { + logutil.BgLogger().Warn("loading schema takes a long time", zap.Duration("take time", sub)) + } + + return nil +} + +func (do *Domain) loadSchemaInLoop(lease time.Duration) { + defer do.wg.Done() + // Lease renewal can run at any frequency. + // Use lease/2 here as recommend by paper. + ticker := time.NewTicker(lease / 2) + defer ticker.Stop() + defer recoverInDomain("loadSchemaInLoop", true) + syncer := do.ddl.SchemaSyncer() + + for { + select { + case <-ticker.C: + err := do.Reload() + if err != nil { + logutil.BgLogger().Error("reload schema in loop failed", zap.Error(err)) + } + case _, ok := <-syncer.GlobalVersionCh(): + err := do.Reload() + if err != nil { + logutil.BgLogger().Error("reload schema in loop failed", zap.Error(err)) + } + if !ok { + logutil.BgLogger().Warn("reload schema in loop, schema syncer need rewatch") + // Make sure the rewatch doesn't affect load schema, so we watch the global schema version asynchronously. + syncer.WatchGlobalSchemaVer(context.Background()) + } + case <-syncer.Done(): + // The schema syncer stops, we need stop the schema validator to synchronize the schema version. + logutil.BgLogger().Info("reload schema in loop, schema syncer need restart") + // The etcd is responsible for schema synchronization, we should ensure there is at most two different schema version + // in the TiDB cluster, to make the data/schema be consistent. If we lost connection/session to etcd, the cluster + // will treats this TiDB as a down instance, and etcd will remove the key of `/tidb/ddl/all_schema_versions/tidb-id`. + // Say the schema version now is 1, the owner is changing the schema version to 2, it will not wait for this down TiDB syncing the schema, + // then continue to change the TiDB schema to version 3. Unfortunately, this down TiDB schema version will still be version 1. + // And version 1 is not consistent to version 3. So we need to stop the schema validator to prohibit the DML executing. + do.SchemaValidator.Stop() + err := do.mustRestartSyncer() + if err != nil { + logutil.BgLogger().Error("reload schema in loop, schema syncer restart failed", zap.Error(err)) + break + } + // The schema maybe changed, must reload schema then the schema validator can restart. + exitLoop := do.mustReload() + // domain is cosed. + if exitLoop { + logutil.BgLogger().Error("domain is closed, exit loadSchemaInLoop") + return + } + do.SchemaValidator.Restart() + logutil.BgLogger().Info("schema syncer restarted") + case <-do.exit: + return + } + } +} + +// mustRestartSyncer tries to restart the SchemaSyncer. +// It returns until it's successful or the domain is stoped. +func (do *Domain) mustRestartSyncer() error { + ctx := context.Background() + syncer := do.ddl.SchemaSyncer() + + for { + err := syncer.Restart(ctx) + if err == nil { + return nil + } + // If the domain has stopped, we return an error immediately. + if do.isClose() { + return err + } + time.Sleep(time.Second) + logutil.BgLogger().Info("restart the schema syncer failed", zap.Error(err)) + } +} + +// mustReload tries to Reload the schema, it returns until it's successful or the domain is closed. +// it returns false when it is successful, returns true when the domain is closed. +func (do *Domain) mustReload() (exitLoop bool) { + for { + err := do.Reload() + if err == nil { + logutil.BgLogger().Info("mustReload succeed") + return false + } + + // If the domain is closed, we returns immediately. + logutil.BgLogger().Info("reload the schema failed", zap.Error(err)) + if do.isClose() { + return true + } + time.Sleep(200 * time.Millisecond) + } +} + +func (do *Domain) isClose() bool { + select { + case <-do.exit: + logutil.BgLogger().Info("domain is closed") + return true + default: + } + return false +} + +// Close closes the Domain and release its resource. +func (do *Domain) Close() { + startTime := time.Now() + if do.ddl != nil { + terror.Log(do.ddl.Stop()) + } + close(do.exit) + if do.etcdClient != nil { + terror.Log(errors.Trace(do.etcdClient.Close())) + } + do.sysSessionPool.Close() + do.wg.Wait() + logutil.BgLogger().Info("domain closed", zap.Duration("take time", time.Since(startTime))) +} + +type ddlCallback struct { + ddl.BaseCallback + do *Domain +} + +func (c *ddlCallback) OnChanged(err error) error { + if err != nil { + return err + } + logutil.BgLogger().Info("performing DDL change, must reload") + + err = c.do.Reload() + if err != nil { + logutil.BgLogger().Error("performing DDL change failed", zap.Error(err)) + } + + return nil +} + +const resourceIdleTimeout = 3 * time.Minute // resources in the ResourcePool will be recycled after idleTimeout + +// NewDomain creates a new domain. Should not create multiple domains for the same store. +func NewDomain(store kv.Storage, ddlLease time.Duration, statsLease time.Duration, factory pools.Factory) *Domain { + capacity := 200 // capacity of the sysSessionPool size + return &Domain{ + store: store, + SchemaValidator: NewSchemaValidator(ddlLease), + exit: make(chan struct{}), + sysSessionPool: newSessionPool(capacity, factory), + statsLease: statsLease, + infoHandle: infoschema.NewHandle(store), + } +} + +// Init initializes a domain. +func (do *Domain) Init(ddlLease time.Duration, sysFactory func(*Domain) (pools.Resource, error)) error { + if ebd, ok := do.store.(tikv.EtcdBackend); ok { + if addrs := ebd.EtcdAddrs(); addrs != nil { + cli, err := clientv3.New(clientv3.Config{ + Endpoints: addrs, + AutoSyncInterval: 30 * time.Second, + DialTimeout: 5 * time.Second, + DialOptions: []grpc.DialOption{ + grpc.WithBackoffMaxDelay(time.Second * 3), + grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: time.Duration(10) * time.Second, + Timeout: time.Duration(3) * time.Second, + PermitWithoutStream: true, + }), + }, + TLS: ebd.TLSConfig(), + }) + if err != nil { + return errors.Trace(err) + } + do.etcdClient = cli + } + } + + // TODO: Here we create new sessions with sysFac in DDL, + // which will use `do` as Domain instead of call `domap.Get`. + // That's because `domap.Get` requires a lock, but before + // we initialize Domain finish, we can't require that again. + // After we remove the lazy logic of creating Domain, we + // can simplify code here. + sysFac := func() (pools.Resource, error) { + return sysFactory(do) + } + sysCtxPool := pools.NewResourcePool(sysFac, 2, 2, resourceIdleTimeout) + ctx := context.Background() + callback := &ddlCallback{do: do} + d := do.ddl + do.ddl = ddl.NewDDL( + ctx, + ddl.WithEtcdClient(do.etcdClient), + ddl.WithStore(do.store), + ddl.WithInfoHandle(do.infoHandle), + ddl.WithHook(callback), + ddl.WithLease(ddlLease), + ddl.WithResourcePool(sysCtxPool), + ) + failpoint.Inject("MockReplaceDDL", func(val failpoint.Value) { + if val.(bool) { + if err := do.ddl.Stop(); err != nil { + logutil.BgLogger().Error("stop DDL failed", zap.Error(err)) + } + do.ddl = d + } + }) + + err := do.ddl.SchemaSyncer().Init(ctx) + if err != nil { + return err + } + err = do.Reload() + if err != nil { + return err + } + + // Only when the store is local that the lease value is 0. + // If the store is local, it doesn't need loadSchemaInLoop. + if ddlLease > 0 { + do.wg.Add(1) + // Local store needs to get the change information for every DDL state in each session. + go do.loadSchemaInLoop(ddlLease) + } + return nil +} + +type sessionPool struct { + resources chan pools.Resource + factory pools.Factory + mu struct { + sync.RWMutex + closed bool + } +} + +func newSessionPool(cap int, factory pools.Factory) *sessionPool { + return &sessionPool{ + resources: make(chan pools.Resource, cap), + factory: factory, + } +} + +func (p *sessionPool) Get() (resource pools.Resource, err error) { + var ok bool + select { + case resource, ok = <-p.resources: + if !ok { + err = errors.New("session pool closed") + } + default: + resource, err = p.factory() + } + return +} + +func (p *sessionPool) Put(resource pools.Resource) { + p.mu.RLock() + defer p.mu.RUnlock() + if p.mu.closed { + resource.Close() + return + } + + select { + case p.resources <- resource: + default: + resource.Close() + } +} + +func (p *sessionPool) Close() { + p.mu.Lock() + if p.mu.closed { + p.mu.Unlock() + return + } + p.mu.closed = true + close(p.resources) + p.mu.Unlock() + + for r := range p.resources { + r.Close() + } +} + +// SysSessionPool returns the system session pool. +func (do *Domain) SysSessionPool() *sessionPool { + return do.sysSessionPool +} + +// GetEtcdClient returns the etcd client. +func (do *Domain) GetEtcdClient() *clientv3.Client { + return do.etcdClient +} + +// StatsHandle returns the statistic handle. +func (do *Domain) StatsHandle() *statistics.Handle { + return (*statistics.Handle)(atomic.LoadPointer(&do.statsHandle)) +} + +// CreateStatsHandle is used only for test. +func (do *Domain) CreateStatsHandle(ctx sessionctx.Context) { + atomic.StorePointer(&do.statsHandle, unsafe.Pointer(statistics.NewHandle(ctx, do.statsLease))) +} + +// UpdateTableStatsLoop creates a goroutine loads stats info and updates stats info in a loop. +// It will also start a goroutine to analyze tables automatically. +// It should be called only once in BootstrapSession. +func (do *Domain) UpdateTableStatsLoop(ctx sessionctx.Context) error { + ctx.GetSessionVars().InRestrictedSQL = true + statsHandle := statistics.NewHandle(ctx, do.statsLease) + atomic.StorePointer(&do.statsHandle, unsafe.Pointer(statsHandle)) + if err := statsHandle.Update(do.InfoSchema()); err != nil { + return err + } + // Negative stats lease indicates that it is in test, it does not need update. + if do.statsLease >= 0 { + do.wg.Add(1) + go do.loadStatsWorker() + } + return nil +} + +func (do *Domain) loadStatsWorker() { + defer recoverInDomain("loadStatsWorker", false) + defer do.wg.Done() + lease := do.statsLease + if lease == 0 { + lease = 3 * time.Second + } + loadTicker := time.NewTicker(lease) + defer loadTicker.Stop() + statsHandle := do.StatsHandle() + for { + select { + case <-loadTicker.C: + err := statsHandle.Update(do.InfoSchema()) + if err != nil { + logutil.BgLogger().Debug("update stats info failed", zap.Error(err)) + } + case <-do.exit: + return + } + } +} + +func recoverInDomain(funcName string, quit bool) { + r := recover() + if r == nil { + return + } + buf := util.GetStack() + logutil.BgLogger().Error("recover in domain failed", zap.String("funcName", funcName), + zap.Any("error", r), zap.String("buffer", string(buf))) + + if quit { + time.Sleep(time.Second * 15) + os.Exit(1) + } +} + +var ( + // ErrInfoSchemaExpired returns the error that information schema is out of date. + ErrInfoSchemaExpired = terror.ClassDomain.New(mysql.ErrInfoSchemaExpired, mysql.MySQLErrName[mysql.ErrInfoSchemaExpired]) + // ErrInfoSchemaChanged returns the error that information schema is changed. + ErrInfoSchemaChanged = terror.ClassDomain.New(mysql.ErrInfoSchemaChanged, + mysql.MySQLErrName[mysql.ErrInfoSchemaChanged]+". "+kv.TxnRetryableMark) +) + +func init() { + // Map error codes to mysql error codes. + domainMySQLErrCodes := map[terror.ErrCode]uint16{ + mysql.ErrInfoSchemaExpired: mysql.ErrInfoSchemaExpired, + mysql.ErrInfoSchemaChanged: mysql.ErrInfoSchemaChanged, + } + terror.ErrClassToMySQLCodes[terror.ClassDomain] = domainMySQLErrCodes +} diff --git a/domain/domain_test.go b/domain/domain_test.go new file mode 100644 index 0000000..4049a6b --- /dev/null +++ b/domain/domain_test.go @@ -0,0 +1,71 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "github.com/ngaut/pools" + . "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/parser/mysql" + "testing" +) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + TestingT(t) +} + +var _ = Suite(&testSuite{}) + +type testSuite struct { +} + +func mockFactory() (pools.Resource, error) { + return nil, errors.New("mock factory should not be called") +} + +func sysMockFactory(dom *Domain) (pools.Resource, error) { + return nil, nil +} + +type testResource struct { + status int +} + +func (tr *testResource) Close() { tr.status = 1 } + +func (*testSuite) TestSessionPool(c *C) { + f := func() (pools.Resource, error) { return &testResource{}, nil } + pool := newSessionPool(1, f) + tr, err := pool.Get() + c.Assert(err, IsNil) + tr1, err := pool.Get() + c.Assert(err, IsNil) + pool.Put(tr) + // Capacity is 1, so tr1 is closed. + pool.Put(tr1) + c.Assert(tr1.(*testResource).status, Equals, 1) + pool.Close() + + pool.Close() + pool.Put(tr1) + tr, err = pool.Get() + c.Assert(err.Error(), Equals, "session pool closed") + c.Assert(tr, IsNil) +} + +func (*testSuite) TestErrorCode(c *C) { + c.Assert(int(ErrInfoSchemaExpired.ToSQLError().Code), Equals, mysql.ErrInfoSchemaExpired) + c.Assert(int(ErrInfoSchemaChanged.ToSQLError().Code), Equals, mysql.ErrInfoSchemaChanged) +} diff --git a/domain/domainctx.go b/domain/domainctx.go new file mode 100644 index 0000000..ac49ac0 --- /dev/null +++ b/domain/domainctx.go @@ -0,0 +1,40 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import "github.com/pingcap/tidb/sessionctx" + +// domainKeyType is a dummy type to avoid naming collision in context. +type domainKeyType int + +// String defines a Stringer function for debugging and pretty printing. +func (k domainKeyType) String() string { + return "domain" +} + +const domainKey domainKeyType = 0 + +// BindDomain binds domain to context. +func BindDomain(ctx sessionctx.Context, domain *Domain) { + ctx.SetValue(domainKey, domain) +} + +// GetDomain gets domain from context. +func GetDomain(ctx sessionctx.Context) *Domain { + v, ok := ctx.Value(domainKey).(*Domain) + if !ok { + return nil + } + return v +} diff --git a/domain/domainctx_test.go b/domain/domainctx_test.go new file mode 100644 index 0000000..c510b23 --- /dev/null +++ b/domain/domainctx_test.go @@ -0,0 +1,40 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/util/mock" + "github.com/pingcap/tidb/util/testleak" +) + +var _ = Suite(&testDomainCtxSuite{}) + +type testDomainCtxSuite struct { +} + +func (s *testDomainCtxSuite) TestDomain(c *C) { + defer testleak.AfterTest(c)() + ctx := mock.NewContext() + + c.Assert(domainKey.String(), Not(Equals), "") + + BindDomain(ctx, nil) + v := GetDomain(ctx) + c.Assert(v, IsNil) + + ctx.ClearValue(domainKey) + v = GetDomain(ctx) + c.Assert(v, IsNil) +} diff --git a/domain/global_vars_cache.go b/domain/global_vars_cache.go new file mode 100644 index 0000000..2a6090d --- /dev/null +++ b/domain/global_vars_cache.go @@ -0,0 +1,68 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "sync" + "time" + + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/util/chunk" +) + +// GlobalVariableCache caches global variables. +type GlobalVariableCache struct { + sync.RWMutex + lastModify time.Time + rows []chunk.Row + fields []*ast.ResultField + + // Unit test may like to disable it. + disable bool +} + +const globalVariableCacheExpiry time.Duration = 2 * time.Second + +// Update updates the global variable cache. +func (gvc *GlobalVariableCache) Update(rows []chunk.Row, fields []*ast.ResultField) { + gvc.Lock() + gvc.lastModify = time.Now() + gvc.rows = rows + gvc.fields = fields + gvc.Unlock() +} + +// Get gets the global variables from cache. +func (gvc *GlobalVariableCache) Get() (succ bool, rows []chunk.Row, fields []*ast.ResultField) { + gvc.RLock() + defer gvc.RUnlock() + if time.Since(gvc.lastModify) < globalVariableCacheExpiry { + succ, rows, fields = !gvc.disable, gvc.rows, gvc.fields + return + } + succ = false + return +} + +// Disable disables the global variabe cache, used in test only. +func (gvc *GlobalVariableCache) Disable() { + gvc.Lock() + defer gvc.Unlock() + gvc.disable = true +} + +// GetGlobalVarsCache gets the global variable cache. +func (do *Domain) GetGlobalVarsCache() *GlobalVariableCache { + return &do.gvc +} diff --git a/domain/global_vars_cache_test.go b/domain/global_vars_cache_test.go new file mode 100644 index 0000000..398d3de --- /dev/null +++ b/domain/global_vars_cache_test.go @@ -0,0 +1,98 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/charset" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/testleak" +) + +var _ = Suite(&testGVCSuite{}) + +type testGVCSuite struct{} + +func (gvcSuite *testGVCSuite) TestSimple(c *C) { + defer testleak.AfterTest(c)() + testleak.BeforeTest() + + store, err := mockstore.NewMockTikvStore() + c.Assert(err, IsNil) + defer store.Close() + ddlLease := 50 * time.Millisecond + dom := NewDomain(store, ddlLease, 0, mockFactory) + err = dom.Init(ddlLease, sysMockFactory) + c.Assert(err, IsNil) + defer dom.Close() + + // Get empty global vars cache. + gvc := dom.GetGlobalVarsCache() + succ, rows, fields := gvc.Get() + c.Assert(succ, IsFalse) + c.Assert(rows, IsNil) + c.Assert(fields, IsNil) + // Get a variable from global vars cache. + rf := getResultField("c", 1, 0) + rf1 := getResultField("c1", 2, 1) + ft := &types.FieldType{ + Tp: mysql.TypeString, + Charset: charset.CharsetBin, + Collate: charset.CollationBin, + } + ft1 := &types.FieldType{ + Tp: mysql.TypeString, + Charset: charset.CharsetBin, + Collate: charset.CollationBin, + } + ck := chunk.NewChunkWithCapacity([]*types.FieldType{ft, ft1}, 1024) + ck.AppendString(0, "variable1") + ck.AppendString(1, "value1") + row := ck.GetRow(0) + gvc.Update([]chunk.Row{row}, []*ast.ResultField{rf, rf1}) + succ, rows, fields = gvc.Get() + c.Assert(succ, IsTrue) + c.Assert(rows[0], Equals, row) + c.Assert(fields, DeepEquals, []*ast.ResultField{rf, rf1}) + // Disable the cache. + gvc.Disable() + succ, rows, fields = gvc.Get() + c.Assert(succ, IsFalse) + c.Assert(rows[0], Equals, row) + c.Assert(fields, DeepEquals, []*ast.ResultField{rf, rf1}) +} + +func getResultField(colName string, id, offset int) *ast.ResultField { + return &ast.ResultField{ + Column: &model.ColumnInfo{ + Name: model.NewCIStr(colName), + ID: int64(id), + Offset: offset, + FieldType: types.FieldType{ + Tp: mysql.TypeString, + Charset: charset.CharsetUTF8, + Collate: charset.CollationUTF8, + }, + }, + TableAsName: model.NewCIStr("tbl"), + DBName: model.NewCIStr("test"), + } +} diff --git a/domain/schema_checker.go b/domain/schema_checker.go new file mode 100644 index 0000000..58d130d --- /dev/null +++ b/domain/schema_checker.go @@ -0,0 +1,63 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "sync/atomic" + "time" +) + +// SchemaChecker is used for checking schema-validity. +type SchemaChecker struct { + SchemaValidator + schemaVer int64 + relatedTableIDs []int64 +} + +var ( + // SchemaOutOfDateRetryInterval is the backoff time before retrying. + SchemaOutOfDateRetryInterval = int64(500 * time.Millisecond) + // SchemaOutOfDateRetryTimes is the max retry count when the schema is out of date. + SchemaOutOfDateRetryTimes = int32(10) +) + +// NewSchemaChecker creates a new schema checker. +func NewSchemaChecker(do *Domain, schemaVer int64, relatedTableIDs []int64) *SchemaChecker { + return &SchemaChecker{ + SchemaValidator: do.SchemaValidator, + schemaVer: schemaVer, + relatedTableIDs: relatedTableIDs, + } +} + +// Check checks the validity of the schema version. +func (s *SchemaChecker) Check(txnTS uint64) error { + schemaOutOfDateRetryInterval := atomic.LoadInt64(&SchemaOutOfDateRetryInterval) + schemaOutOfDateRetryTimes := int(atomic.LoadInt32(&SchemaOutOfDateRetryTimes)) + for i := 0; i < schemaOutOfDateRetryTimes; i++ { + result := s.SchemaValidator.Check(txnTS, s.schemaVer, s.relatedTableIDs) + switch result { + case ResultSucc: + return nil + case ResultFail: + + return ErrInfoSchemaChanged + case ResultUnknown: + time.Sleep(time.Duration(schemaOutOfDateRetryInterval)) + } + + } + + return ErrInfoSchemaExpired +} diff --git a/domain/schema_checker_test.go b/domain/schema_checker_test.go new file mode 100644 index 0000000..02dba12 --- /dev/null +++ b/domain/schema_checker_test.go @@ -0,0 +1,64 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/terror" +) + +func (s *testSuite) TestSchemaCheckerSimple(c *C) { + lease := 5 * time.Millisecond + validator := NewSchemaValidator(lease) + checker := &SchemaChecker{SchemaValidator: validator} + + // Add some schema versions and delta table IDs. + ts := uint64(time.Now().UnixNano()) + validator.Update(ts, 0, 2, []int64{1}) + validator.Update(ts, 2, 4, []int64{2}) + + // checker's schema version is the same as the current schema version. + checker.schemaVer = 4 + err := checker.Check(ts) + c.Assert(err, IsNil) + + // checker's schema version is less than the current schema version, and it doesn't exist in validator's items. + // checker's related table ID isn't in validator's changed table IDs. + checker.schemaVer = 2 + checker.relatedTableIDs = []int64{3} + err = checker.Check(ts) + c.Assert(err, IsNil) + // The checker's schema version isn't in validator's items. + checker.schemaVer = 1 + checker.relatedTableIDs = []int64{3} + err = checker.Check(ts) + c.Assert(terror.ErrorEqual(err, ErrInfoSchemaChanged), IsTrue) + // checker's related table ID is in validator's changed table IDs. + checker.relatedTableIDs = []int64{2} + err = checker.Check(ts) + c.Assert(terror.ErrorEqual(err, ErrInfoSchemaChanged), IsTrue) + + // validator's latest schema version is expired. + time.Sleep(lease + time.Microsecond) + checker.schemaVer = 4 + checker.relatedTableIDs = []int64{3} + err = checker.Check(ts) + c.Assert(err, IsNil) + nowTS := uint64(time.Now().UnixNano()) + // Use checker.SchemaValidator.Check instead of checker.Check here because backoff make CI slow. + result := checker.SchemaValidator.Check(nowTS, checker.schemaVer, checker.relatedTableIDs) + c.Assert(result, Equals, ResultUnknown) +} diff --git a/domain/schema_validator.go b/domain/schema_validator.go new file mode 100644 index 0000000..3338dd0 --- /dev/null +++ b/domain/schema_validator.go @@ -0,0 +1,274 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "sync" + "time" + + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/store/tikv/oracle" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +type checkResult int + +const ( + // ResultSucc means schemaValidator's check is passing. + ResultSucc checkResult = iota + // ResultFail means schemaValidator's check is fail. + ResultFail + // ResultUnknown means schemaValidator doesn't know the check would be success or fail. + ResultUnknown +) + +// SchemaValidator is the interface for checking the validity of schema version. +type SchemaValidator interface { + // Update the schema validator, add a new item, delete the expired deltaSchemaInfos. + // The latest schemaVer is valid within leaseGrantTime plus lease duration. + // Add the changed table IDs to the new schema information, + // which is produced when the oldSchemaVer is updated to the newSchemaVer. + Update(leaseGrantTime uint64, oldSchemaVer, newSchemaVer int64, changedTableIDs []int64) + // Check is it valid for a transaction to use schemaVer and related tables, at timestamp txnTS. + Check(txnTS uint64, schemaVer int64, relatedTableIDs []int64) checkResult + // Stop stops checking the valid of transaction. + Stop() + // Restart restarts the schema validator after it is stopped. + Restart() + // Reset resets SchemaValidator to initial state. + Reset() + // IsStarted indicates whether SchemaValidator is started. + IsStarted() bool +} + +type deltaSchemaInfo struct { + schemaVersion int64 + relatedTableIDs []int64 +} + +type schemaValidator struct { + isStarted bool + mux sync.RWMutex + lease time.Duration + latestSchemaVer int64 + latestSchemaExpire time.Time + // deltaSchemaInfos is a queue that maintain the history of changes. + deltaSchemaInfos []deltaSchemaInfo +} + +// NewSchemaValidator returns a SchemaValidator structure. +func NewSchemaValidator(lease time.Duration) SchemaValidator { + return &schemaValidator{ + isStarted: true, + lease: lease, + deltaSchemaInfos: make([]deltaSchemaInfo, 0, variable.DefTiDBMaxDeltaSchemaCount), + } +} + +func (s *schemaValidator) IsStarted() bool { + s.mux.RLock() + isStarted := s.isStarted + s.mux.RUnlock() + return isStarted +} + +func (s *schemaValidator) LatestSchemaVersion() int64 { + s.mux.RLock() + latestSchemaVer := s.latestSchemaVer + s.mux.RUnlock() + return latestSchemaVer +} + +func (s *schemaValidator) Stop() { + logutil.BgLogger().Info("the schema validator stops") + + s.mux.Lock() + defer s.mux.Unlock() + s.isStarted = false + s.latestSchemaVer = 0 + s.deltaSchemaInfos = s.deltaSchemaInfos[:0] +} + +func (s *schemaValidator) Restart() { + + logutil.BgLogger().Info("the schema validator restarts") + s.mux.Lock() + defer s.mux.Unlock() + s.isStarted = true +} + +func (s *schemaValidator) Reset() { + + s.mux.Lock() + defer s.mux.Unlock() + s.isStarted = true + s.latestSchemaVer = 0 + s.deltaSchemaInfos = s.deltaSchemaInfos[:0] +} + +func (s *schemaValidator) Update(leaseGrantTS uint64, oldVer, currVer int64, changedTableIDs []int64) { + s.mux.Lock() + defer s.mux.Unlock() + + if !s.isStarted { + logutil.BgLogger().Info("the schema validator stopped before updating") + return + } + + // Renew the lease. + s.latestSchemaVer = currVer + leaseGrantTime := oracle.GetTimeFromTS(leaseGrantTS) + leaseExpire := leaseGrantTime.Add(s.lease - time.Millisecond) + s.latestSchemaExpire = leaseExpire + + // Update the schema deltaItem information. + if currVer != oldVer { + logutil.BgLogger().Debug("update schema validator", zap.Int64("oldVer", oldVer), + zap.Int64("currVer", currVer), zap.Int64s("changedTableIDs", changedTableIDs)) + s.enqueue(currVer, changedTableIDs) + } +} + +func hasRelatedTableID(relatedTableIDs, updateTableIDs []int64) bool { + for _, tblID := range updateTableIDs { + for _, relatedTblID := range relatedTableIDs { + if tblID == relatedTblID { + return true + } + } + } + return false +} + +// isRelatedTablesChanged returns the result whether relatedTableIDs is changed +// from usedVer to the latest schema version. +// NOTE, this function should be called under lock! +func (s *schemaValidator) isRelatedTablesChanged(currVer int64, tableIDs []int64) bool { + if len(s.deltaSchemaInfos) == 0 { + + logutil.BgLogger().Info("schema change history is empty", zap.Int64("currVer", currVer)) + return true + } + newerDeltas := s.findNewerDeltas(currVer) + if len(newerDeltas) == len(s.deltaSchemaInfos) { + + logutil.BgLogger().Info("the schema version is much older than the latest version", zap.Int64("currVer", currVer), + zap.Int64("latestSchemaVer", s.latestSchemaVer), zap.Reflect("deltas", newerDeltas)) + return true + } + for _, item := range newerDeltas { + if hasRelatedTableID(item.relatedTableIDs, tableIDs) { + return true + } + } + return false +} + +func (s *schemaValidator) findNewerDeltas(currVer int64) []deltaSchemaInfo { + q := s.deltaSchemaInfos + pos := len(q) + for i := len(q) - 1; i >= 0 && q[i].schemaVersion > currVer; i-- { + pos = i + } + return q[pos:] +} + +// Check checks schema validity, returns true if use schemaVer and related tables at txnTS is legal. +func (s *schemaValidator) Check(txnTS uint64, schemaVer int64, relatedTableIDs []int64) checkResult { + s.mux.RLock() + defer s.mux.RUnlock() + if !s.isStarted { + logutil.BgLogger().Info("the schema validator stopped before checking") + return ResultUnknown + } + if s.lease == 0 { + return ResultSucc + } + + // Schema changed, result decided by whether related tables change. + if schemaVer < s.latestSchemaVer { + // The DDL relatedTableIDs is empty. + if len(relatedTableIDs) == 0 { + logutil.BgLogger().Info("the related table ID is empty", zap.Int64("schemaVer", schemaVer), + zap.Int64("latestSchemaVer", s.latestSchemaVer)) + return ResultFail + } + + if s.isRelatedTablesChanged(schemaVer, relatedTableIDs) { + return ResultFail + } + return ResultSucc + } + + // Schema unchanged, maybe success or the schema validator is unavailable. + t := oracle.GetTimeFromTS(txnTS) + if t.After(s.latestSchemaExpire) { + return ResultUnknown + } + return ResultSucc +} + +func (s *schemaValidator) enqueue(schemaVersion int64, relatedTableIDs []int64) { + maxCnt := int(variable.GetMaxDeltaSchemaCount()) + if maxCnt <= 0 { + logutil.BgLogger().Info("the schema validator enqueue", zap.Int("delta max count", maxCnt)) + return + } + + delta := deltaSchemaInfo{schemaVersion, relatedTableIDs} + if len(s.deltaSchemaInfos) == 0 { + s.deltaSchemaInfos = append(s.deltaSchemaInfos, delta) + return + } + + lastOffset := len(s.deltaSchemaInfos) - 1 + // The first item we needn't to merge, because we hope to cover more versions. + if lastOffset != 0 && ids(s.deltaSchemaInfos[lastOffset].relatedTableIDs).containIn(delta.relatedTableIDs) { + s.deltaSchemaInfos[lastOffset] = delta + } else { + s.deltaSchemaInfos = append(s.deltaSchemaInfos, delta) + } + + if len(s.deltaSchemaInfos) > maxCnt { + logutil.BgLogger().Info("the schema validator enqueue, queue is too long", + zap.Int("delta max count", maxCnt), zap.Int64("remove schema version", s.deltaSchemaInfos[0].schemaVersion)) + s.deltaSchemaInfos = s.deltaSchemaInfos[1:] + } +} + +type ids []int64 + +// containIn is checks if a is included in b. +func (a ids) containIn(b []int64) bool { + if len(a) > len(b) { + return false + } + + var isEqual bool + for _, i := range a { + isEqual = false + for _, j := range b { + if i == j { + isEqual = true + break + } + } + if !isEqual { + return false + } + } + + return true +} diff --git a/domain/schema_validator_test.go b/domain/schema_validator_test.go new file mode 100644 index 0000000..7ddc976 --- /dev/null +++ b/domain/schema_validator_test.go @@ -0,0 +1,205 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "math/rand" + "sync" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/store/tikv/oracle" + "github.com/pingcap/tidb/util/testleak" +) + +type leaseGrantItem struct { + leaseGrantTS uint64 + oldVer int64 + schemaVer int64 +} + +func (*testSuite) TestSchemaValidator(c *C) { + defer testleak.AfterTest(c)() + + lease := 10 * time.Millisecond + leaseGrantCh := make(chan leaseGrantItem) + oracleCh := make(chan uint64) + exit := make(chan struct{}) + var wg sync.WaitGroup + wg.Add(1) + go serverFunc(lease, leaseGrantCh, oracleCh, exit, &wg) + + validator := NewSchemaValidator(lease).(*schemaValidator) + c.Assert(validator.IsStarted(), IsTrue) + + for i := 0; i < 10; i++ { + delay := time.Duration(100+rand.Intn(900)) * time.Microsecond + time.Sleep(delay) + // Reload can run arbitrarily, at any time. + item := <-leaseGrantCh + validator.Update(item.leaseGrantTS, item.oldVer, item.schemaVer, nil) + } + + // Take a lease, check it's valid. + item := <-leaseGrantCh + validator.Update(item.leaseGrantTS, item.oldVer, item.schemaVer, []int64{10}) + valid := validator.Check(item.leaseGrantTS, item.schemaVer, []int64{10}) + c.Assert(valid, Equals, ResultSucc) + + // Stop the validator, validator's items value is nil. + validator.Stop() + c.Assert(validator.IsStarted(), IsFalse) + isTablesChanged := validator.isRelatedTablesChanged(item.schemaVer, []int64{10}) + c.Assert(isTablesChanged, IsTrue) + valid = validator.Check(item.leaseGrantTS, item.schemaVer, []int64{10}) + c.Assert(valid, Equals, ResultUnknown) + validator.Restart() + + // Increase the current time by 2 leases, check schema is invalid. + ts := uint64(time.Now().Add(2 * lease).UnixNano()) // Make sure that ts has timed out a lease. + valid = validator.Check(ts, item.schemaVer, []int64{10}) + c.Assert(valid, Equals, ResultUnknown, Commentf("validator latest schema ver %v, time %v, item schema ver %v, ts %v", + validator.latestSchemaVer, validator.latestSchemaExpire, 0, oracle.GetTimeFromTS(ts))) + // Make sure newItem's version is greater than item.schema. + newItem := getGreaterVersionItem(c, lease, leaseGrantCh, item.schemaVer) + currVer := newItem.schemaVer + validator.Update(newItem.leaseGrantTS, newItem.oldVer, currVer, nil) + valid = validator.Check(ts, item.schemaVer, nil) + c.Assert(valid, Equals, ResultFail, Commentf("currVer %d, newItem %v", currVer, item)) + valid = validator.Check(ts, item.schemaVer, []int64{0}) + c.Assert(valid, Equals, ResultFail, Commentf("currVer %d, newItem %v", currVer, item)) + // Check the latest schema version must changed. + c.Assert(item.schemaVer, Less, validator.latestSchemaVer) + + // Make sure newItem's version is greater than currVer. + newItem = getGreaterVersionItem(c, lease, leaseGrantCh, currVer) + // Update current schema version to newItem's version and the delta table IDs is 1, 2, 3. + validator.Update(ts, currVer, newItem.schemaVer, []int64{1, 2, 3}) + // Make sure the updated table IDs don't be covered with the same schema version. + validator.Update(ts, newItem.schemaVer, newItem.schemaVer, nil) + isTablesChanged = validator.isRelatedTablesChanged(currVer, nil) + c.Assert(isTablesChanged, IsFalse) + isTablesChanged = validator.isRelatedTablesChanged(currVer, []int64{2}) + c.Assert(isTablesChanged, IsTrue, Commentf("currVer %d, newItem %v", currVer, newItem)) + // The current schema version is older than the oldest schema version. + isTablesChanged = validator.isRelatedTablesChanged(-1, nil) + c.Assert(isTablesChanged, IsTrue, Commentf("currVer %d, newItem %v", currVer, newItem)) + + // All schema versions is expired. + ts = uint64(time.Now().Add(2 * lease).UnixNano()) + valid = validator.Check(ts, newItem.schemaVer, nil) + c.Assert(valid, Equals, ResultUnknown) + + close(exit) + wg.Wait() +} + +func getGreaterVersionItem(c *C, lease time.Duration, leaseGrantCh chan leaseGrantItem, currVer int64) leaseGrantItem { + var newItem leaseGrantItem + for i := 0; i < 10; i++ { + time.Sleep(lease / 2) + newItem = <-leaseGrantCh + if newItem.schemaVer > currVer { + break + } + } + c.Assert(newItem.schemaVer, Greater, currVer, Commentf("currVer %d, newItem %v", currVer, newItem)) + + return newItem +} + +// serverFunc plays the role as a remote server, runs in a separate goroutine. +// It can grant lease and provide timestamp oracle. +// Caller should communicate with it through channel to mock network. +func serverFunc(lease time.Duration, requireLease chan leaseGrantItem, oracleCh chan uint64, exit chan struct{}, wg *sync.WaitGroup) { + defer wg.Done() + var version int64 + leaseTS := uint64(time.Now().UnixNano()) + ticker := time.NewTicker(lease) + defer ticker.Stop() + for { + select { + case now := <-ticker.C: + version++ + leaseTS = uint64(now.UnixNano()) + case requireLease <- leaseGrantItem{ + leaseGrantTS: leaseTS, + oldVer: version - 1, + schemaVer: version, + }: + case oracleCh <- uint64(time.Now().UnixNano()): + case <-exit: + return + } + } +} + +func (*testSuite) TestEnqueue(c *C) { + lease := 10 * time.Millisecond + originalCnt := variable.GetMaxDeltaSchemaCount() + defer variable.SetMaxDeltaSchemaCount(originalCnt) + + validator := NewSchemaValidator(lease).(*schemaValidator) + c.Assert(validator.IsStarted(), IsTrue) + // maxCnt is 0. + variable.SetMaxDeltaSchemaCount(0) + validator.enqueue(1, []int64{11}) + c.Assert(validator.deltaSchemaInfos, HasLen, 0) + + // maxCnt is 10. + variable.SetMaxDeltaSchemaCount(10) + ds := []deltaSchemaInfo{ + {0, []int64{1}}, + {1, []int64{1}}, + {2, []int64{1}}, + {3, []int64{2, 2}}, + {4, []int64{2}}, + {5, []int64{1, 4}}, + {6, []int64{1, 4}}, + {7, []int64{3, 1, 3}}, + {8, []int64{1, 2, 3}}, + {9, []int64{1, 2, 3}}, + } + for _, d := range ds { + validator.enqueue(d.schemaVersion, d.relatedTableIDs) + } + validator.enqueue(10, []int64{1}) + ret := []deltaSchemaInfo{ + {0, []int64{1}}, + {2, []int64{1}}, + {3, []int64{2, 2}}, + {4, []int64{2}}, + {6, []int64{1, 4}}, + {9, []int64{1, 2, 3}}, + {10, []int64{1}}, + } + c.Assert(validator.deltaSchemaInfos, DeepEquals, ret) + // The Items' relatedTableIDs have different order. + validator.enqueue(11, []int64{1, 2, 3, 4}) + validator.enqueue(12, []int64{4, 1, 2, 3, 1}) + validator.enqueue(13, []int64{4, 1, 3, 2, 5}) + ret[len(ret)-1] = deltaSchemaInfo{13, []int64{4, 1, 3, 2, 5}} + c.Assert(validator.deltaSchemaInfos, DeepEquals, ret) + // The length of deltaSchemaInfos is greater then maxCnt. + validator.enqueue(14, []int64{1}) + validator.enqueue(15, []int64{2}) + validator.enqueue(16, []int64{3}) + validator.enqueue(17, []int64{4}) + ret = append(ret, deltaSchemaInfo{14, []int64{1}}) + ret = append(ret, deltaSchemaInfo{15, []int64{2}}) + ret = append(ret, deltaSchemaInfo{16, []int64{3}}) + ret = append(ret, deltaSchemaInfo{17, []int64{4}}) + c.Assert(validator.deltaSchemaInfos, DeepEquals, ret[1:]) +} diff --git a/err1 b/err1 new file mode 100644 index 0000000..4190c05 --- /dev/null +++ b/err1 @@ -0,0 +1,113 @@ +cat checklist.md +# Following the checklist saves the reviewers' time and gets your PR reviewed faster. + +# Self Review +Have you reviewed every line of your changes by yourself? + +# Test +Have you added enough test cases to cover the new feature or bug fix? +Also, add comments to describe your test cases. + +# Naming +Do function names keep consistent with its behavior? +Is it easy to infer the function's behavior by its name? + +# Comment +Is there any code that confuses the reviewer? +Add comments on them! You'll be asked to do so anyway. +Make sure there is no syntax or spelling error in your comments. +Some online syntax checking tools like Grammarly may be helpful. + +# Refactor +Is there any way to refactor the code to make it more readable? +If the refactoring touches a lot of existing code, send another PR to do it. + +# Single Purpose +Make sure the PR does only one thing and nothing else. + +# Diff Size +Make sure the diff size is no more than 500, split it into small PRs if it is too large. +GO111MODULE=on go list -f '{{ join .Imports "\n" }}' github.com/pingcap/tidb/store/tikv | grep ^github.com/pingcap/parser$ || exit 0; exit 1 +GO111MODULE=on go build -o tools/bin/failpoint-ctl github.com/pingcap/failpoint/failpoint-ctl +Running in native mode. +ok github.com/pingcap/tidb/config 0.028s coverage: 44.4% of statements +ok github.com/pingcap/tidb/ddl 7.268s coverage: 76.9% of statements +ok github.com/pingcap/tidb/ddl/failtest 1.698s coverage: [no statements] +? github.com/pingcap/tidb/ddl/testutil [no test files] +ok github.com/pingcap/tidb/ddl/util 1.345s coverage: 64.4% of statements +ok github.com/pingcap/tidb/distsql 0.037s coverage: 87.3% of statements +ok github.com/pingcap/tidb/domain 0.067s coverage: 60.0% of statements +ok github.com/pingcap/tidb/executor 3.869s coverage: 74.7% of statements +ok github.com/pingcap/tidb/executor/aggfuncs 0.050s coverage: 82.6% of statements +ok github.com/pingcap/tidb/executor/seqtest 0.283s coverage: [no statements] +ok github.com/pingcap/tidb/expression 6.127s coverage: 75.6% of statements +ok github.com/pingcap/tidb/expression/aggregation 0.044s coverage: 46.7% of statements +? github.com/pingcap/tidb/expression/generator/helper [no test files] +ok github.com/pingcap/tidb/infoschema 0.270s coverage: 40.3% of statements +ok github.com/pingcap/tidb/kv 0.235s coverage: 81.8% of statements +ok github.com/pingcap/tidb/kv/memdb 0.407s coverage: 96.9% of statements +ok github.com/pingcap/tidb/meta 0.036s coverage: 85.5% of statements +ok github.com/pingcap/tidb/meta/autoid 0.062s coverage: 91.4% of statements +ok github.com/pingcap/tidb/owner 12.977s coverage: 68.4% of statements +ok github.com/pingcap/tidb/parser 0.054s coverage: 71.7% of statements +ok github.com/pingcap/tidb/parser/ast 0.028s coverage: 51.3% of statements +ok github.com/pingcap/tidb/parser/charset 0.020s coverage: 74.6% of statements +ok github.com/pingcap/tidb/parser/format 0.022s coverage: 70.9% of statements +? github.com/pingcap/tidb/parser/goyacc [no test files] +ok github.com/pingcap/tidb/parser/model 0.029s coverage: 53.5% of statements +ok github.com/pingcap/tidb/parser/mysql 0.024s coverage: 17.1% of statements +? github.com/pingcap/tidb/parser/opcode [no test files] +ok github.com/pingcap/tidb/parser/terror 0.028s coverage: 72.3% of statements +ok github.com/pingcap/tidb/parser/types 0.019s coverage: 64.0% of statements +? github.com/pingcap/tidb/planner [no test files] +ok github.com/pingcap/tidb/planner/cascades 0.394s coverage: 87.2% of statements +ok github.com/pingcap/tidb/planner/core 2.246s coverage: 78.4% of statements +ok github.com/pingcap/tidb/planner/implementation 0.020s coverage: 5.3% of statements +ok github.com/pingcap/tidb/planner/memo 0.025s coverage: 85.2% of statements +? github.com/pingcap/tidb/planner/property [no test files] +? github.com/pingcap/tidb/planner/util [no test files] +ok github.com/pingcap/tidb/server 0.510s coverage: 62.4% of statements +ok github.com/pingcap/tidb/session 2.513s coverage: 73.8% of statements +ok github.com/pingcap/tidb/sessionctx 0.032s coverage: 83.3% of statements +ok github.com/pingcap/tidb/sessionctx/stmtctx 0.045s coverage: 12.8% of statements +ok github.com/pingcap/tidb/sessionctx/variable 0.035s coverage: 36.5% of statements +ok github.com/pingcap/tidb/statistics 4.599s coverage: 81.5% of statements +ok github.com/pingcap/tidb/store 5.280s coverage: 95.5% of statements +? github.com/pingcap/tidb/store/mockoracle [no test files] +? github.com/pingcap/tidb/store/mockstore [no test files] +ok github.com/pingcap/tidb/store/mockstore/mocktikv 0.141s coverage: 36.5% of statements +ok github.com/pingcap/tidb/store/tikv 31.811s coverage: 71.3% of statements +? github.com/pingcap/tidb/store/tikv/oracle [no test files] +ok github.com/pingcap/tidb/store/tikv/oracle/oracles 0.078s coverage: 33.8% of statements +? github.com/pingcap/tidb/store/tikv/tikvrpc [no test files] +ok github.com/pingcap/tidb/structure 0.046s coverage: 85.8% of statements +ok github.com/pingcap/tidb/table 0.042s coverage: 50.0% of statements +ok github.com/pingcap/tidb/table/tables 0.372s coverage: 78.0% of statements +ok github.com/pingcap/tidb/tablecodec 0.030s coverage: 54.7% of statements +ok github.com/pingcap/tidb/tidb-server 0.033s coverage: 1.8% of statements +ok github.com/pingcap/tidb/types 0.250s coverage: 73.2% of statements +? github.com/pingcap/tidb/types/parser_driver [no test files] +ok github.com/pingcap/tidb/util 0.054s coverage: 86.9% of statements +ok github.com/pingcap/tidb/util/admin 0.043s coverage: 85.0% of statements +ok github.com/pingcap/tidb/util/arena 0.026s coverage: 100.0% of statements +ok github.com/pingcap/tidb/util/chunk 0.029s coverage: 47.8% of statements +ok github.com/pingcap/tidb/util/codec 0.036s coverage: 82.1% of statements +ok github.com/pingcap/tidb/util/disjointset 0.027s coverage: 100.0% of statements +ok github.com/pingcap/tidb/util/format 0.025s coverage: 83.6% of statements +ok github.com/pingcap/tidb/util/hack 0.022s coverage: 92.3% of statements +ok github.com/pingcap/tidb/util/logutil 0.029s coverage: 87.5% of statements +ok github.com/pingcap/tidb/util/math 1.391s coverage: 33.3% of statements +ok github.com/pingcap/tidb/util/mock 0.025s coverage: 18.1% of statements +ok github.com/pingcap/tidb/util/mvmap 0.022s coverage: 86.6% of statements +ok github.com/pingcap/tidb/util/ranger 0.210s coverage: 79.3% of statements +? github.com/pingcap/tidb/util/rowDecoder [no test files] +ok github.com/pingcap/tidb/util/rowcodec 0.072s coverage: 84.4% of statements +ok github.com/pingcap/tidb/util/set 0.021s coverage: 100.0% of statements +? github.com/pingcap/tidb/util/signal [no test files] +? github.com/pingcap/tidb/util/sqlexec [no test files] +ok github.com/pingcap/tidb/util/stringutil 0.023s coverage: 94.4% of statements +ok github.com/pingcap/tidb/util/testkit 0.020s coverage: 8.4% of statements +? github.com/pingcap/tidb/util/testleak [no test files] +ok github.com/pingcap/tidb/util/testutil 0.015s coverage: 15.3% of statements +go generate ./... +./tools/check/check-gogenerate.sh diff --git a/err2 b/err2 new file mode 100644 index 0000000..71dbdc3 --- /dev/null +++ b/err2 @@ -0,0 +1 @@ +Great, all tests passed. diff --git a/executor/adapter.go b/executor/adapter.go new file mode 100644 index 0000000..2ef0dd3 --- /dev/null +++ b/executor/adapter.go @@ -0,0 +1,259 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "context" + "fmt" + "strings" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/sqlexec" + "github.com/pingcap/tidb/util/stringutil" + "go.uber.org/zap" +) + +// recordSet wraps an executor, implements sqlexec.RecordSet interface +type recordSet struct { + fields []*ast.ResultField + executor Executor + stmt *ExecStmt + lastErr error + txnStartTS uint64 +} + +func (a *recordSet) Fields() []*ast.ResultField { + if len(a.fields) == 0 { + a.fields = colNames2ResultFields(a.executor.Schema(), a.stmt.OutputNames, a.stmt.Ctx.GetSessionVars().CurrentDB) + } + return a.fields +} + +func colNames2ResultFields(schema *expression.Schema, names []*types.FieldName, defaultDB string) []*ast.ResultField { + rfs := make([]*ast.ResultField, 0, schema.Len()) + defaultDBCIStr := model.NewCIStr(defaultDB) + for i := 0; i < schema.Len(); i++ { + dbName := names[i].DBName + if dbName.L == "" && names[i].TblName.L != "" { + dbName = defaultDBCIStr + } + origColName := names[i].OrigColName + if origColName.L == "" { + origColName = names[i].ColName + } + rf := &ast.ResultField{ + Column: &model.ColumnInfo{Name: origColName, FieldType: *schema.Columns[i].RetType}, + ColumnAsName: names[i].ColName, + Table: &model.TableInfo{Name: names[i].OrigTblName}, + TableAsName: names[i].TblName, + DBName: dbName, + } + // This is for compatibility. + // See issue https://github.com/pingcap/tidb/issues/10513 . + if len(rf.ColumnAsName.O) > mysql.MaxAliasIdentifierLen { + rf.ColumnAsName.O = rf.ColumnAsName.O[:mysql.MaxAliasIdentifierLen] + } + // Usually the length of O equals the length of L. + // Add this len judgement to avoid panic. + if len(rf.ColumnAsName.L) > mysql.MaxAliasIdentifierLen { + rf.ColumnAsName.L = rf.ColumnAsName.L[:mysql.MaxAliasIdentifierLen] + } + rfs = append(rfs, rf) + } + return rfs +} + +// Next use uses recordSet's executor to get next available chunk for later usage. +// If chunk does not contain any rows, then we update last query found rows in session variable as current found rows. +// The reason we need update is that chunk with 0 rows indicating we already finished current query, we need prepare for +// next query. +// If stmt is not nil and chunk with some rows inside, we simply update last query found rows by the number of row in chunk. +func (a *recordSet) Next(ctx context.Context, req *chunk.Chunk) (err error) { + defer func() { + r := recover() + if r == nil { + return + } + err = errors.Errorf("%v", r) + logutil.Logger(ctx).Error("execute sql panic", zap.String("sql", a.stmt.Text), zap.Stack("stack")) + }() + + err = Next(ctx, a.executor, req) + if err != nil { + a.lastErr = err + return err + } + numRows := req.NumRows() + if numRows == 0 { + if a.stmt != nil { + a.stmt.Ctx.GetSessionVars().LastFoundRows = a.stmt.Ctx.GetSessionVars().StmtCtx.FoundRows() + } + return nil + } + if a.stmt != nil { + a.stmt.Ctx.GetSessionVars().StmtCtx.AddFoundRows(uint64(numRows)) + } + return nil +} + +// NewChunk create a chunk base on top-level executor's newFirstChunk(). +func (a *recordSet) NewChunk() *chunk.Chunk { + return newFirstChunk(a.executor) +} + +func (a *recordSet) Close() error { + err := a.executor.Close() + sessVars := a.stmt.Ctx.GetSessionVars() + sessVars.PrevStmt = FormatSQL(a.stmt.OriginText()) + return err +} + +// ExecStmt implements the sqlexec.Statement interface, it builds a planner.Plan to an sqlexec.Statement. +type ExecStmt struct { + // InfoSchema stores a reference to the schema information. + InfoSchema infoschema.InfoSchema + // Plan stores a reference to the final physical plan. + Plan plannercore.Plan + // Text represents the origin query text. + Text string + + StmtNode ast.StmtNode + + Ctx sessionctx.Context + + // LowerPriority represents whether to lower the execution priority of a query. + LowerPriority bool + + // OutputNames will be set if using cached plan + OutputNames []*types.FieldName +} + +// OriginText returns original statement as a string. +func (a *ExecStmt) OriginText() string { + return a.Text +} + +// IsReadOnly returns true if a statement is read only. +func (a *ExecStmt) IsReadOnly() bool { + return ast.IsReadOnly(a.StmtNode) +} + +// Exec builds an Executor from a plan. If the Executor doesn't return result, +// like the INSERT, UPDATE statements, it executes in this function, if the Executor returns +// result, execution is done after this function returns, in the returned sqlexec.RecordSet Next method. +func (a *ExecStmt) Exec(ctx context.Context) (_ sqlexec.RecordSet, err error) { + defer func() { + r := recover() + if r == nil { + return + } + if _, ok := r.(string); !ok { + panic(r) + } + err = errors.Errorf("%v", r) + logutil.Logger(ctx).Error("execute sql panic", zap.String("sql", a.Text), zap.Stack("stack")) + }() + + sctx := a.Ctx + e, err := a.buildExecutor() + if err != nil { + return nil, err + } + + if err = e.Open(ctx); err != nil { + terror.Call(e.Close) + return nil, err + } + + if handled, result, err := a.handleNoDelay(ctx, e); handled { + return result, err + } + + var txnStartTS uint64 + txn, err := sctx.Txn(false) + if err != nil { + return nil, err + } + if txn.Valid() { + txnStartTS = txn.StartTS() + } + return &recordSet{ + executor: e, + stmt: a, + txnStartTS: txnStartTS, + }, nil +} + +func (a *ExecStmt) handleNoDelay(ctx context.Context, e Executor) (bool, sqlexec.RecordSet, error) { + toCheck := e + + // If the executor doesn't return any result to the client, we execute it without delay. + if toCheck.Schema().Len() == 0 { + r, err := a.handleNoDelayExecutor(ctx, e) + return true, r, err + } + + return false, nil, nil +} + +func (a *ExecStmt) handleNoDelayExecutor(ctx context.Context, e Executor) (sqlexec.RecordSet, error) { + var err error + defer func() { + terror.Log(e.Close()) + }() + + err = Next(ctx, e, newFirstChunk(e)) + if err != nil { + return nil, err + } + return nil, err +} + +// buildExecutor build a executor from plan, prepared statement may need additional procedure. +func (a *ExecStmt) buildExecutor() (Executor, error) { + ctx := a.Ctx + + b := newExecutorBuilder(ctx, a.InfoSchema) + e := b.build(a.Plan) + if b.err != nil { + return nil, errors.Trace(b.err) + } + + return e, nil +} + +// QueryReplacer replaces new line and tab for grep result including query string. +var QueryReplacer = strings.NewReplacer("\r", " ", "\n", " ", "\t", " ") + +// FormatSQL is used to format the original SQL, e.g. truncating long SQL, appending prepared arguments. +func FormatSQL(sql string) stringutil.StringerFunc { + return func() string { + length := len(sql) + if uint64(length) > logutil.DefaultQueryLogMaxLen { + sql = fmt.Sprintf("%.*q(len:%d)", logutil.DefaultQueryLogMaxLen, sql, length) + } + return QueryReplacer.Replace(sql) + } +} diff --git a/executor/adapter_test.go b/executor/adapter_test.go new file mode 100644 index 0000000..bf33a3b --- /dev/null +++ b/executor/adapter_test.go @@ -0,0 +1,37 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor_test + +import ( + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/util/testkit" +) + +func (s *testSuiteP2) TestQueryTime(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + + costTime := time.Since(tk.Se.GetSessionVars().StartTime) + c.Assert(costTime < 1*time.Second, IsTrue) + + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int)") + tk.MustExec("insert into t values(1), (1), (1), (1), (1)") + tk.MustExec("select * from t t1 join t t2 on t1.a = t2.a") + + costTime = time.Since(tk.Se.GetSessionVars().StartTime) + c.Assert(costTime < 1*time.Second, IsTrue) +} diff --git a/executor/aggfuncs/aggfunc_test.go b/executor/aggfuncs/aggfunc_test.go new file mode 100644 index 0000000..93adb31 --- /dev/null +++ b/executor/aggfuncs/aggfunc_test.go @@ -0,0 +1,205 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggfuncs_test + +import ( + "fmt" + "testing" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/executor/aggfuncs" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/expression/aggregation" + "github.com/pingcap/tidb/parser" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" + _ "github.com/pingcap/tidb/types/parser_driver" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/mock" +) + +var _ = Suite(&testSuite{}) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + *CustomParallelSuiteFlag = true + TestingT(t) +} + +type testSuite struct { + *parser.Parser + ctx sessionctx.Context +} + +func (s *testSuite) SetUpSuite(c *C) { + s.Parser = parser.New() + s.ctx = mock.NewContext() + s.ctx.GetSessionVars().StmtCtx.TimeZone = time.Local +} + +func (s *testSuite) TearDownSuite(c *C) { +} + +func (s *testSuite) SetUpTest(c *C) { + s.ctx.GetSessionVars().PlanColumnID = 0 +} + +func (s *testSuite) TearDownTest(c *C) { + s.ctx.GetSessionVars().StmtCtx.SetWarnings(nil) +} + +type aggTest struct { + dataType *types.FieldType + numRows int + dataGen func(i int) types.Datum + funcName string + results []types.Datum +} + +func (s *testSuite) testMergePartialResult(c *C, p aggTest) { + srcChk := chunk.NewChunkWithCapacity([]*types.FieldType{p.dataType}, p.numRows) + for i := 0; i < p.numRows; i++ { + dt := p.dataGen(i) + srcChk.AppendDatum(0, &dt) + } + iter := chunk.NewIterator4Chunk(srcChk) + + args := []expression.Expression{&expression.Column{RetType: p.dataType, Index: 0}} + desc, err := aggregation.NewAggFuncDesc(s.ctx, p.funcName, args) + c.Assert(err, IsNil) + partialDesc, finalDesc := desc.Split([]int{0, 1}) + + // build partial func for partial phase. + partialFunc := aggfuncs.Build(s.ctx, partialDesc, 0) + partialResult := partialFunc.AllocPartialResult() + + // build final func for final phase. + finalFunc := aggfuncs.Build(s.ctx, finalDesc, 0) + finalPr := finalFunc.AllocPartialResult() + resultChk := chunk.NewChunkWithCapacity([]*types.FieldType{p.dataType}, 1) + + // update partial result. + for row := iter.Begin(); row != iter.End(); row = iter.Next() { + partialFunc.UpdatePartialResult(s.ctx, []chunk.Row{row}, partialResult) + } + partialFunc.AppendFinalResult2Chunk(s.ctx, partialResult, resultChk) + dt := resultChk.GetRow(0).GetDatum(0, p.dataType) + result, err := dt.CompareDatum(s.ctx.GetSessionVars().StmtCtx, &p.results[0]) + c.Assert(err, IsNil) + c.Assert(result, Equals, 0) + + err = finalFunc.MergePartialResult(s.ctx, partialResult, finalPr) + c.Assert(err, IsNil) + partialFunc.ResetPartialResult(partialResult) + + iter.Begin() + iter.Next() + for row := iter.Next(); row != iter.End(); row = iter.Next() { + partialFunc.UpdatePartialResult(s.ctx, []chunk.Row{row}, partialResult) + } + resultChk.Reset() + partialFunc.AppendFinalResult2Chunk(s.ctx, partialResult, resultChk) + dt = resultChk.GetRow(0).GetDatum(0, p.dataType) + result, err = dt.CompareDatum(s.ctx.GetSessionVars().StmtCtx, &p.results[1]) + c.Assert(err, IsNil) + c.Assert(result, Equals, 0) + err = finalFunc.MergePartialResult(s.ctx, partialResult, finalPr) + c.Assert(err, IsNil) + + resultChk.Reset() + err = finalFunc.AppendFinalResult2Chunk(s.ctx, finalPr, resultChk) + c.Assert(err, IsNil) + + dt = resultChk.GetRow(0).GetDatum(0, p.dataType) + result, err = dt.CompareDatum(s.ctx.GetSessionVars().StmtCtx, &p.results[2]) + c.Assert(err, IsNil) + c.Assert(result, Equals, 0) +} + +func buildAggTester(funcName string, tp byte, numRows int, results ...interface{}) aggTest { + return buildAggTesterWithFieldType(funcName, types.NewFieldType(tp), numRows, results...) +} + +func buildAggTesterWithFieldType(funcName string, ft *types.FieldType, numRows int, results ...interface{}) aggTest { + pt := aggTest{ + dataType: ft, + numRows: numRows, + funcName: funcName, + dataGen: getDataGenFunc(ft), + } + for _, result := range results { + pt.results = append(pt.results, types.NewDatum(result)) + } + return pt +} + +func getDataGenFunc(ft *types.FieldType) func(i int) types.Datum { + switch ft.Tp { + case mysql.TypeLonglong: + return func(i int) types.Datum { return types.NewIntDatum(int64(i)) } + case mysql.TypeFloat: + return func(i int) types.Datum { return types.NewFloat32Datum(float32(i)) } + case mysql.TypeDouble: + return func(i int) types.Datum { return types.NewFloat64Datum(float64(i)) } + case mysql.TypeString: + return func(i int) types.Datum { return types.NewStringDatum(fmt.Sprintf("%d", i)) } + } + return nil +} + +func (s *testSuite) testAggFunc(c *C, p aggTest) { + srcChk := chunk.NewChunkWithCapacity([]*types.FieldType{p.dataType}, p.numRows) + for i := 0; i < p.numRows; i++ { + dt := p.dataGen(i) + srcChk.AppendDatum(0, &dt) + } + srcChk.AppendDatum(0, &types.Datum{}) + + args := []expression.Expression{&expression.Column{RetType: p.dataType, Index: 0}} + desc, err := aggregation.NewAggFuncDesc(s.ctx, p.funcName, args) + c.Assert(err, IsNil) + finalFunc := aggfuncs.Build(s.ctx, desc, 0) + finalPr := finalFunc.AllocPartialResult() + resultChk := chunk.NewChunkWithCapacity([]*types.FieldType{desc.RetTp}, 1) + + iter := chunk.NewIterator4Chunk(srcChk) + for row := iter.Begin(); row != iter.End(); row = iter.Next() { + finalFunc.UpdatePartialResult(s.ctx, []chunk.Row{row}, finalPr) + } + finalFunc.AppendFinalResult2Chunk(s.ctx, finalPr, resultChk) + dt := resultChk.GetRow(0).GetDatum(0, desc.RetTp) + result, err := dt.CompareDatum(s.ctx.GetSessionVars().StmtCtx, &p.results[1]) + c.Assert(err, IsNil) + c.Assert(result, Equals, 0) + + // test the empty input + resultChk.Reset() + finalFunc.ResetPartialResult(finalPr) + finalFunc.AppendFinalResult2Chunk(s.ctx, finalPr, resultChk) + dt = resultChk.GetRow(0).GetDatum(0, desc.RetTp) + result, err = dt.CompareDatum(s.ctx.GetSessionVars().StmtCtx, &p.results[0]) + c.Assert(err, IsNil) + c.Assert(result, Equals, 0) + + // test the empty input + resultChk.Reset() + finalFunc.ResetPartialResult(finalPr) + finalFunc.AppendFinalResult2Chunk(s.ctx, finalPr, resultChk) + dt = resultChk.GetRow(0).GetDatum(0, desc.RetTp) + result, err = dt.CompareDatum(s.ctx.GetSessionVars().StmtCtx, &p.results[0]) + c.Assert(err, IsNil) + c.Assert(result, Equals, 0) +} diff --git a/executor/aggfuncs/aggfuncs.go b/executor/aggfuncs/aggfuncs.go new file mode 100644 index 0000000..8cbcbcf --- /dev/null +++ b/executor/aggfuncs/aggfuncs.go @@ -0,0 +1,111 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggfuncs + +import ( + "unsafe" + + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/util/chunk" +) + +// All the AggFunc implementations are listed here for navigation. +var ( + // All the AggFunc implementations for "COUNT" are listed here. + _ AggFunc = (*countPartial)(nil) + _ AggFunc = (*countOriginal4Int)(nil) + _ AggFunc = (*countOriginal4Real)(nil) + _ AggFunc = (*countOriginal4String)(nil) + + // All the AggFunc implementations for "FIRSTROW" are listed here. + _ AggFunc = (*firstRow4Int)(nil) + _ AggFunc = (*firstRow4String)(nil) + _ AggFunc = (*firstRow4Float32)(nil) + _ AggFunc = (*firstRow4Float64)(nil) + + // All the AggFunc implementations for "MAX"/"MIN" are listed here. + _ AggFunc = (*maxMin4Int)(nil) + _ AggFunc = (*maxMin4Uint)(nil) + _ AggFunc = (*maxMin4Float32)(nil) + _ AggFunc = (*maxMin4Float64)(nil) + _ AggFunc = (*maxMin4String)(nil) + + // All the AggFunc implementations for "AVG" are listed here. + _ AggFunc = (*avgOriginal4Int64)(nil) + _ AggFunc = (*avgPartial4Int64)(nil) + + _ AggFunc = (*avgOriginal4Float64)(nil) + _ AggFunc = (*avgPartial4Float64)(nil) + + // All the AggFunc implementations for "SUM" are listed here. + _ AggFunc = (*sum4Int64)(nil) + _ AggFunc = (*sum4Float64)(nil) +) + +// PartialResult represents data structure to store the partial result for the +// aggregate functions. Here we use unsafe.Pointer to allow the partial result +// to be any type. +type PartialResult unsafe.Pointer + +// AggFunc is the interface to evaluate the aggregate functions. +type AggFunc interface { + // AllocPartialResult allocates a specific data structure to store the + // partial result, initializes it, and converts it to PartialResult to + // return back. Aggregate operator implementation, no matter it's a hash + // or stream, should hold this allocated PartialResult for the further + // operations like: "ResetPartialResult", "UpdatePartialResult". + AllocPartialResult() PartialResult + + // ResetPartialResult resets the partial result to the original state for a + // specific aggregate function. It converts the input PartialResult to the + // specific data structure which stores the partial result and then reset + // every field to the proper original state. + ResetPartialResult(pr PartialResult) + + // UpdatePartialResult updates the specific partial result for an aggregate + // function using the input rows which all belonging to the same data group. + // It converts the PartialResult to the specific data structure which stores + // the partial result and then iterates on the input rows and update that + // partial result according to the functionality and the state of the + // aggregate function. + UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error + + // MergePartialResult will be called in the final phase when parallelly + // executing. It converts the PartialResult `src`, `dst` to the same specific + // data structure which stores the partial results, and then evaluate the + // final result using the partial results as input values. + MergePartialResult(sctx sessionctx.Context, src, dst PartialResult) error + + // AppendFinalResult2Chunk finalizes the partial result and append the + // final result to the input chunk. Like other operations, it converts the + // input PartialResult to the specific data structure which stores the + // partial result and then calculates the final result and append that + // final result to the chunk provided. + AppendFinalResult2Chunk(sctx sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error +} + +type baseAggFunc struct { + // args stores the input arguments for an aggregate function, we should + // call arg.EvalXXX to get the actual input data for this function. + args []expression.Expression + + // ordinal stores the ordinal of the columns in the output chunk, which is + // used to append the final result of this function. + ordinal int +} + +func (*baseAggFunc) MergePartialResult(sctx sessionctx.Context, src, dst PartialResult) error { + return nil +} diff --git a/executor/aggfuncs/builder.go b/executor/aggfuncs/builder.go new file mode 100644 index 0000000..91d832d --- /dev/null +++ b/executor/aggfuncs/builder.go @@ -0,0 +1,172 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggfuncs + +import ( + "github.com/pingcap/tidb/expression/aggregation" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" +) + +// Build is used to build a specific AggFunc implementation according to the +// input aggFuncDesc. +func Build(ctx sessionctx.Context, aggFuncDesc *aggregation.AggFuncDesc, ordinal int) AggFunc { + switch aggFuncDesc.Name { + case ast.AggFuncCount: + return buildCount(aggFuncDesc, ordinal) + case ast.AggFuncSum: + return buildSum(aggFuncDesc, ordinal) + case ast.AggFuncAvg: + return buildAvg(aggFuncDesc, ordinal) + case ast.AggFuncFirstRow: + return buildFirstRow(aggFuncDesc, ordinal) + case ast.AggFuncMax: + return buildMaxMin(aggFuncDesc, ordinal, true) + case ast.AggFuncMin: + return buildMaxMin(aggFuncDesc, ordinal, false) + } + return nil +} + +// buildCount builds the AggFunc implementation for function "COUNT". +func buildCount(aggFuncDesc *aggregation.AggFuncDesc, ordinal int) AggFunc { + base := baseAggFunc{ + args: aggFuncDesc.Args, + ordinal: ordinal, + } + + switch aggFuncDesc.Mode { + case aggregation.CompleteMode, aggregation.Partial1Mode: + switch aggFuncDesc.Args[0].GetType().EvalType() { + case types.ETInt: + return &countOriginal4Int{baseCount{base}} + case types.ETReal: + return &countOriginal4Real{baseCount{base}} + case types.ETString: + return &countOriginal4String{baseCount{base}} + } + case aggregation.Partial2Mode, aggregation.FinalMode: + return &countPartial{baseCount{base}} + } + + return nil +} + +// buildSum builds the AggFunc implementation for function "SUM". +func buildSum(aggFuncDesc *aggregation.AggFuncDesc, ordinal int) AggFunc { + base := baseSumAggFunc{ + baseAggFunc: baseAggFunc{ + args: aggFuncDesc.Args, + ordinal: ordinal, + }, + } + switch aggFuncDesc.RetTp.EvalType() { + case types.ETInt: + return &sum4Int64{base} + default: + return &sum4Float64{base} + } +} + +// buildAvg builds the AggFunc implementation for function "AVG". +func buildAvg(aggFuncDesc *aggregation.AggFuncDesc, ordinal int) AggFunc { + base := baseAggFunc{ + args: aggFuncDesc.Args, + ordinal: ordinal, + } + switch aggFuncDesc.Mode { + // Build avg functions which consume the original data and update their + // partial results. + case aggregation.CompleteMode, aggregation.Partial1Mode: + switch aggFuncDesc.RetTp.EvalType() { + case types.ETInt: + return &avgOriginal4Int64{baseAvgInt64{base}} + default: + return &avgOriginal4Float64{baseAvgFloat64{base}} + } + + // Build avg functions which consume the partial result of other avg + // functions and update their partial results. + case aggregation.Partial2Mode, aggregation.FinalMode: + switch aggFuncDesc.RetTp.Tp { + case mysql.TypeLonglong: + return &avgPartial4Int64{baseAvgInt64{base}} + case mysql.TypeDouble: + return &avgPartial4Float64{baseAvgFloat64{base}} + } + } + return nil +} + +// buildFirstRow builds the AggFunc implementation for function "FIRST_ROW". +func buildFirstRow(aggFuncDesc *aggregation.AggFuncDesc, ordinal int) AggFunc { + base := baseAggFunc{ + args: aggFuncDesc.Args, + ordinal: ordinal, + } + + evalType, fieldType := aggFuncDesc.RetTp.EvalType(), aggFuncDesc.RetTp + if fieldType.Tp == mysql.TypeBit { + evalType = types.ETString + } + switch evalType { + case types.ETInt: + return &firstRow4Int{base} + case types.ETReal: + switch fieldType.Tp { + case mysql.TypeFloat: + return &firstRow4Float32{base} + case mysql.TypeDouble: + return &firstRow4Float64{base} + } + case types.ETString: + return &firstRow4String{base} + } + return nil +} + +// buildMaxMin builds the AggFunc implementation for function "MAX" and "MIN". +func buildMaxMin(aggFuncDesc *aggregation.AggFuncDesc, ordinal int, isMax bool) AggFunc { + base := baseMaxMinAggFunc{ + baseAggFunc: baseAggFunc{ + args: aggFuncDesc.Args, + ordinal: ordinal, + }, + isMax: isMax, + } + + evalType, fieldType := aggFuncDesc.RetTp.EvalType(), aggFuncDesc.RetTp + if fieldType.Tp == mysql.TypeBit { + evalType = types.ETString + } + switch evalType { + case types.ETInt: + if mysql.HasUnsignedFlag(fieldType.Flag) { + return &maxMin4Uint{base} + } + return &maxMin4Int{base} + case types.ETReal: + switch fieldType.Tp { + case mysql.TypeFloat: + return &maxMin4Float32{base} + case mysql.TypeDouble: + return &maxMin4Float64{base} + } + case types.ETString: + return &maxMin4String{base} + } + return nil +} diff --git a/executor/aggfuncs/func_avg.go b/executor/aggfuncs/func_avg.go new file mode 100644 index 0000000..ee16b3a --- /dev/null +++ b/executor/aggfuncs/func_avg.go @@ -0,0 +1,218 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggfuncs + +import ( + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" +) + +// All the following avg function implementations return the decimal result, +// which store the partial results in "partialResult4AvgInt64". +// +// "baseAvgDecimal" is wrapped by: +// - "avgOriginal4Int64" +// - "avgPartial4Int64" +type baseAvgInt64 struct { + baseAggFunc +} + +type partialResult4AvgInt64 struct { + sum int64 + count int64 +} + +func (e *baseAvgInt64) AllocPartialResult() PartialResult { + return PartialResult(&partialResult4AvgInt64{}) +} + +func (e *baseAvgInt64) ResetPartialResult(pr PartialResult) { + p := (*partialResult4AvgInt64)(pr) + p.sum = 0 + p.count = int64(0) +} + +func (e *baseAvgInt64) AppendFinalResult2Chunk(sctx sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error { + p := (*partialResult4AvgInt64)(pr) + if p.count == 0 { + chk.AppendNull(e.ordinal) + return nil + } + chk.AppendInt64(e.ordinal, p.sum/p.count) + return nil +} + +type avgOriginal4Int64 struct { + baseAvgInt64 +} + +func (e *avgOriginal4Int64) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { + p := (*partialResult4AvgInt64)(pr) + for _, row := range rowsInGroup { + input, isNull, err := e.args[0].EvalInt(sctx, row) + if err != nil { + return err + } + if isNull { + continue + } + + newSum, err := types.AddInt64(p.sum, input) + if err != nil { + return err + } + p.sum = newSum + p.count++ + } + return nil +} + +type avgPartial4Int64 struct { + baseAvgInt64 +} + +func (e *avgPartial4Int64) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { + p := (*partialResult4AvgInt64)(pr) + for _, row := range rowsInGroup { + inputSum, isNull, err := e.args[1].EvalInt(sctx, row) + if err != nil { + return err + } + if isNull { + continue + } + + inputCount, isNull, err := e.args[0].EvalInt(sctx, row) + if err != nil { + return err + } + if isNull { + continue + } + + newSum, err := types.AddInt64(p.sum, inputSum) + if err != nil { + return err + } + p.sum = newSum + p.count += inputCount + } + return nil +} + +func (e *avgPartial4Int64) MergePartialResult(sctx sessionctx.Context, src PartialResult, dst PartialResult) error { + p1, p2 := (*partialResult4AvgInt64)(src), (*partialResult4AvgInt64)(dst) + if p1.count == 0 { + return nil + } + newSum, err := types.AddInt64(p1.sum, p2.sum) + if err != nil { + return err + } + p2.sum = newSum + p2.count += p1.count + return nil +} + +// All the following avg function implementations return the float64 result, +// which store the partial results in "partialResult4AvgFloat64". +// +// "baseAvgFloat64" is wrapped by: +// - "avgOriginal4Float64" +// - "avgPartial4Float64" +type baseAvgFloat64 struct { + baseAggFunc +} + +type partialResult4AvgFloat64 struct { + sum float64 + count int64 +} + +func (e *baseAvgFloat64) AllocPartialResult() PartialResult { + return (PartialResult)(&partialResult4AvgFloat64{}) +} + +func (e *baseAvgFloat64) ResetPartialResult(pr PartialResult) { + p := (*partialResult4AvgFloat64)(pr) + p.sum = 0 + p.count = 0 +} + +func (e *baseAvgFloat64) AppendFinalResult2Chunk(sctx sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error { + p := (*partialResult4AvgFloat64)(pr) + if p.count == 0 { + chk.AppendNull(e.ordinal) + } else { + chk.AppendFloat64(e.ordinal, p.sum/float64(p.count)) + } + return nil +} + +type avgOriginal4Float64 struct { + baseAvgFloat64 +} + +func (e *avgOriginal4Float64) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { + p := (*partialResult4AvgFloat64)(pr) + for _, row := range rowsInGroup { + input, isNull, err := e.args[0].EvalReal(sctx, row) + if err != nil { + return err + } + if isNull { + continue + } + + p.sum += input + p.count++ + } + return nil +} + +type avgPartial4Float64 struct { + baseAvgFloat64 +} + +func (e *avgPartial4Float64) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { + p := (*partialResult4AvgFloat64)(pr) + for _, row := range rowsInGroup { + inputSum, isNull, err := e.args[1].EvalReal(sctx, row) + if err != nil { + return err + } + if isNull { + continue + } + + inputCount, isNull, err := e.args[0].EvalInt(sctx, row) + if err != nil { + return err + } + if isNull { + continue + } + p.sum += inputSum + p.count += inputCount + } + return nil +} + +func (e *avgPartial4Float64) MergePartialResult(sctx sessionctx.Context, src PartialResult, dst PartialResult) error { + p1, p2 := (*partialResult4AvgFloat64)(src), (*partialResult4AvgFloat64)(dst) + p2.sum += p1.sum + p2.count += p1.count + return nil +} diff --git a/executor/aggfuncs/func_avg_test.go b/executor/aggfuncs/func_avg_test.go new file mode 100644 index 0000000..307bdfb --- /dev/null +++ b/executor/aggfuncs/func_avg_test.go @@ -0,0 +1,40 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggfuncs_test + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" +) + +func (s *testSuite) TestMergePartialResult4Avg(c *C) { + tests := []aggTest{ + buildAggTester(ast.AggFuncAvg, mysql.TypeLonglong, 5, 2.0, 3.0, 2), + buildAggTester(ast.AggFuncAvg, mysql.TypeDouble, 5, 2.0, 3.0, 2.375), + } + for _, test := range tests { + s.testMergePartialResult(c, test) + } +} + +func (s *testSuite) TestAvg(c *C) { + tests := []aggTest{ + buildAggTester(ast.AggFuncAvg, mysql.TypeLonglong, 5, nil, 2.0), + buildAggTester(ast.AggFuncAvg, mysql.TypeDouble, 5, nil, 2.0), + } + for _, test := range tests { + s.testAggFunc(c, test) + } +} diff --git a/executor/aggfuncs/func_count.go b/executor/aggfuncs/func_count.go new file mode 100644 index 0000000..f407098 --- /dev/null +++ b/executor/aggfuncs/func_count.go @@ -0,0 +1,119 @@ +package aggfuncs + +import ( + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/util/chunk" +) + +type baseCount struct { + baseAggFunc +} + +type partialResult4Count = int64 + +func (e *baseCount) AllocPartialResult() PartialResult { + return PartialResult(new(partialResult4Count)) +} + +func (e *baseCount) ResetPartialResult(pr PartialResult) { + p := (*partialResult4Count)(pr) + *p = 0 +} + +func (e *baseCount) AppendFinalResult2Chunk(sctx sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error { + p := (*partialResult4Count)(pr) + chk.AppendInt64(e.ordinal, *p) + return nil +} + +type countOriginal4Int struct { + baseCount +} + +func (e *countOriginal4Int) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { + p := (*partialResult4Count)(pr) + + for _, row := range rowsInGroup { + _, isNull, err := e.args[0].EvalInt(sctx, row) + if err != nil { + return err + } + if isNull { + continue + } + + *p++ + } + + return nil +} + +type countOriginal4Real struct { + baseCount +} + +func (e *countOriginal4Real) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { + p := (*partialResult4Count)(pr) + + for _, row := range rowsInGroup { + _, isNull, err := e.args[0].EvalReal(sctx, row) + if err != nil { + return err + } + if isNull { + continue + } + + *p++ + } + + return nil +} + +type countOriginal4String struct { + baseCount +} + +func (e *countOriginal4String) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { + p := (*partialResult4Count)(pr) + + for _, row := range rowsInGroup { + _, isNull, err := e.args[0].EvalString(sctx, row) + if err != nil { + return err + } + if isNull { + continue + } + + *p++ + } + + return nil +} + +type countPartial struct { + baseCount +} + +func (e *countPartial) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { + p := (*partialResult4Count)(pr) + for _, row := range rowsInGroup { + input, isNull, err := e.args[0].EvalInt(sctx, row) + if err != nil { + return err + } + if isNull { + continue + } + + *p += input + } + return nil +} + +func (*countPartial) MergePartialResult(sctx sessionctx.Context, src, dst PartialResult) error { + p1, p2 := (*partialResult4Count)(src), (*partialResult4Count)(dst) + *p2 += *p1 + return nil +} diff --git a/executor/aggfuncs/func_count_test.go b/executor/aggfuncs/func_count_test.go new file mode 100644 index 0000000..1882e47 --- /dev/null +++ b/executor/aggfuncs/func_count_test.go @@ -0,0 +1,37 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggfuncs_test + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" +) + +func (s *testSuite) TestMergePartialResult4Count(c *C) { + tester := buildAggTester(ast.AggFuncCount, mysql.TypeLonglong, 5, 5, 3, 8) + s.testMergePartialResult(c, tester) +} + +func (s *testSuite) TestCount(c *C) { + tests := []aggTest{ + buildAggTester(ast.AggFuncCount, mysql.TypeLonglong, 5, 0, 5), + buildAggTester(ast.AggFuncCount, mysql.TypeFloat, 5, 0, 5), + buildAggTester(ast.AggFuncCount, mysql.TypeDouble, 5, 0, 5), + buildAggTester(ast.AggFuncCount, mysql.TypeString, 5, 0, 5), + } + for _, test := range tests { + s.testAggFunc(c, test) + } +} diff --git a/executor/aggfuncs/func_first_row.go b/executor/aggfuncs/func_first_row.go new file mode 100644 index 0000000..f9b6951 --- /dev/null +++ b/executor/aggfuncs/func_first_row.go @@ -0,0 +1,238 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggfuncs + +import ( + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/stringutil" +) + +type basePartialResult4FirstRow struct { + // isNull indicates whether the first row is null. + isNull bool + // gotFirstRow indicates whether the first row has been got, + // if so, we would avoid evaluating the values of the remained rows. + gotFirstRow bool +} + +type partialResult4FirstRowInt struct { + basePartialResult4FirstRow + + val int64 +} + +type partialResult4FirstRowFloat32 struct { + basePartialResult4FirstRow + + val float32 +} + +type partialResult4FirstRowFloat64 struct { + basePartialResult4FirstRow + + val float64 +} + +type partialResult4FirstRowString struct { + basePartialResult4FirstRow + + val string +} + +type firstRow4Int struct { + baseAggFunc +} + +func (e *firstRow4Int) AllocPartialResult() PartialResult { + return PartialResult(new(partialResult4FirstRowInt)) +} + +func (e *firstRow4Int) ResetPartialResult(pr PartialResult) { + p := (*partialResult4FirstRowInt)(pr) + p.val, p.isNull, p.gotFirstRow = 0, false, false +} + +func (e *firstRow4Int) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { + p := (*partialResult4FirstRowInt)(pr) + if p.gotFirstRow { + return nil + } + for _, row := range rowsInGroup { + input, isNull, err := e.args[0].EvalInt(sctx, row) + if err != nil { + return err + } + p.gotFirstRow, p.isNull, p.val = true, isNull, input + break + } + return nil +} + +func (*firstRow4Int) MergePartialResult(sctx sessionctx.Context, src PartialResult, dst PartialResult) error { + p1, p2 := (*partialResult4FirstRowInt)(src), (*partialResult4FirstRowInt)(dst) + if !p2.gotFirstRow { + *p2 = *p1 + } + return nil +} + +func (e *firstRow4Int) AppendFinalResult2Chunk(sctx sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error { + p := (*partialResult4FirstRowInt)(pr) + if p.isNull || !p.gotFirstRow { + chk.AppendNull(e.ordinal) + return nil + } + chk.AppendInt64(e.ordinal, p.val) + return nil +} + +type firstRow4Float32 struct { + baseAggFunc +} + +func (e *firstRow4Float32) AllocPartialResult() PartialResult { + return PartialResult(new(partialResult4FirstRowFloat32)) +} + +func (e *firstRow4Float32) ResetPartialResult(pr PartialResult) { + p := (*partialResult4FirstRowFloat32)(pr) + p.isNull, p.gotFirstRow = false, false +} + +func (e *firstRow4Float32) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { + p := (*partialResult4FirstRowFloat32)(pr) + if p.gotFirstRow { + return nil + } + for _, row := range rowsInGroup { + input, isNull, err := e.args[0].EvalReal(sctx, row) + if err != nil { + return err + } + p.gotFirstRow, p.isNull, p.val = true, isNull, float32(input) + break + } + return nil +} +func (*firstRow4Float32) MergePartialResult(sctx sessionctx.Context, src PartialResult, dst PartialResult) error { + p1, p2 := (*partialResult4FirstRowFloat32)(src), (*partialResult4FirstRowFloat32)(dst) + if !p2.gotFirstRow { + *p2 = *p1 + } + return nil +} + +func (e *firstRow4Float32) AppendFinalResult2Chunk(sctx sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error { + p := (*partialResult4FirstRowFloat32)(pr) + if p.isNull || !p.gotFirstRow { + chk.AppendNull(e.ordinal) + return nil + } + chk.AppendFloat32(e.ordinal, p.val) + return nil +} + +type firstRow4Float64 struct { + baseAggFunc +} + +func (e *firstRow4Float64) AllocPartialResult() PartialResult { + return PartialResult(new(partialResult4FirstRowFloat64)) +} + +func (e *firstRow4Float64) ResetPartialResult(pr PartialResult) { + p := (*partialResult4FirstRowFloat64)(pr) + p.isNull, p.gotFirstRow = false, false +} + +func (e *firstRow4Float64) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { + p := (*partialResult4FirstRowFloat64)(pr) + if p.gotFirstRow { + return nil + } + for _, row := range rowsInGroup { + input, isNull, err := e.args[0].EvalReal(sctx, row) + if err != nil { + return err + } + p.gotFirstRow, p.isNull, p.val = true, isNull, input + break + } + return nil +} + +func (*firstRow4Float64) MergePartialResult(sctx sessionctx.Context, src PartialResult, dst PartialResult) error { + p1, p2 := (*partialResult4FirstRowFloat64)(src), (*partialResult4FirstRowFloat64)(dst) + if !p2.gotFirstRow { + *p2 = *p1 + } + return nil +} +func (e *firstRow4Float64) AppendFinalResult2Chunk(sctx sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error { + p := (*partialResult4FirstRowFloat64)(pr) + if p.isNull || !p.gotFirstRow { + chk.AppendNull(e.ordinal) + return nil + } + chk.AppendFloat64(e.ordinal, p.val) + return nil +} + +type firstRow4String struct { + baseAggFunc +} + +func (e *firstRow4String) AllocPartialResult() PartialResult { + return PartialResult(new(partialResult4FirstRowString)) +} + +func (e *firstRow4String) ResetPartialResult(pr PartialResult) { + p := (*partialResult4FirstRowString)(pr) + p.isNull, p.gotFirstRow = false, false +} + +func (e *firstRow4String) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { + p := (*partialResult4FirstRowString)(pr) + if p.gotFirstRow { + return nil + } + for _, row := range rowsInGroup { + input, isNull, err := e.args[0].EvalString(sctx, row) + if err != nil { + return err + } + p.gotFirstRow, p.isNull, p.val = true, isNull, stringutil.Copy(input) + break + } + return nil +} + +func (*firstRow4String) MergePartialResult(sctx sessionctx.Context, src PartialResult, dst PartialResult) error { + p1, p2 := (*partialResult4FirstRowString)(src), (*partialResult4FirstRowString)(dst) + if !p2.gotFirstRow { + *p2 = *p1 + } + return nil +} + +func (e *firstRow4String) AppendFinalResult2Chunk(sctx sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error { + p := (*partialResult4FirstRowString)(pr) + if p.isNull || !p.gotFirstRow { + chk.AppendNull(e.ordinal) + return nil + } + chk.AppendString(e.ordinal, p.val) + return nil +} diff --git a/executor/aggfuncs/func_first_row_test.go b/executor/aggfuncs/func_first_row_test.go new file mode 100644 index 0000000..fc0b954 --- /dev/null +++ b/executor/aggfuncs/func_first_row_test.go @@ -0,0 +1,32 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggfuncs_test + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" +) + +func (s *testSuite) TestMergePartialResult4FirstRow(c *C) { + tests := []aggTest{ + buildAggTester(ast.AggFuncFirstRow, mysql.TypeLonglong, 5, 0, 2, 0), + buildAggTester(ast.AggFuncFirstRow, mysql.TypeFloat, 5, 0.0, 2.0, 0.0), + buildAggTester(ast.AggFuncFirstRow, mysql.TypeDouble, 5, 0.0, 2.0, 0.0), + buildAggTester(ast.AggFuncFirstRow, mysql.TypeString, 5, "0", "2", "0"), + } + for _, test := range tests { + s.testMergePartialResult(c, test) + } +} diff --git a/executor/aggfuncs/func_max_min.go b/executor/aggfuncs/func_max_min.go new file mode 100644 index 0000000..a8dc55d --- /dev/null +++ b/executor/aggfuncs/func_max_min.go @@ -0,0 +1,378 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggfuncs + +import ( + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/stringutil" +) + +type partialResult4MaxMinInt struct { + val int64 + // isNull is used to indicates: + // 1. whether the partial result is the initialization value which should not be compared during evaluation; + // 2. whether all the values of arg are all null, if so, we should return null as the default value for MAX/MIN. + isNull bool +} + +type partialResult4MaxMinUint struct { + val uint64 + isNull bool +} + +type partialResult4MaxMinFloat32 struct { + val float32 + isNull bool +} + +type partialResult4MaxMinFloat64 struct { + val float64 + isNull bool +} + +type partialResult4MaxMinString struct { + val string + isNull bool +} + +type baseMaxMinAggFunc struct { + baseAggFunc + + isMax bool +} + +type maxMin4Int struct { + baseMaxMinAggFunc +} + +func (e *maxMin4Int) AllocPartialResult() PartialResult { + p := new(partialResult4MaxMinInt) + p.isNull = true + return PartialResult(p) +} + +func (e *maxMin4Int) ResetPartialResult(pr PartialResult) { + p := (*partialResult4MaxMinInt)(pr) + p.val = 0 + p.isNull = true +} + +func (e *maxMin4Int) AppendFinalResult2Chunk(sctx sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error { + p := (*partialResult4MaxMinInt)(pr) + if p.isNull { + chk.AppendNull(e.ordinal) + return nil + } + chk.AppendInt64(e.ordinal, p.val) + return nil +} + +func (e *maxMin4Int) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { + p := (*partialResult4MaxMinInt)(pr) + for _, row := range rowsInGroup { + input, isNull, err := e.args[0].EvalInt(sctx, row) + if err != nil { + return err + } + if isNull { + continue + } + if p.isNull { + p.val = input + p.isNull = false + continue + } + if e.isMax && input > p.val || !e.isMax && input < p.val { + p.val = input + } + } + return nil +} + +func (e *maxMin4Int) MergePartialResult(sctx sessionctx.Context, src, dst PartialResult) error { + p1, p2 := (*partialResult4MaxMinInt)(src), (*partialResult4MaxMinInt)(dst) + if p1.isNull { + return nil + } + if p2.isNull { + *p2 = *p1 + return nil + } + if e.isMax && p1.val > p2.val || !e.isMax && p1.val < p2.val { + p2.val, p2.isNull = p1.val, false + } + return nil +} + +type maxMin4Uint struct { + baseMaxMinAggFunc +} + +func (e *maxMin4Uint) AllocPartialResult() PartialResult { + p := new(partialResult4MaxMinUint) + p.isNull = true + return PartialResult(p) +} + +func (e *maxMin4Uint) ResetPartialResult(pr PartialResult) { + p := (*partialResult4MaxMinUint)(pr) + p.val = 0 + p.isNull = true +} + +func (e *maxMin4Uint) AppendFinalResult2Chunk(sctx sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error { + p := (*partialResult4MaxMinUint)(pr) + if p.isNull { + chk.AppendNull(e.ordinal) + return nil + } + chk.AppendUint64(e.ordinal, p.val) + return nil +} + +func (e *maxMin4Uint) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { + p := (*partialResult4MaxMinUint)(pr) + for _, row := range rowsInGroup { + input, isNull, err := e.args[0].EvalInt(sctx, row) + if err != nil { + return err + } + if isNull { + continue + } + uintVal := uint64(input) + if p.isNull { + p.val = uintVal + p.isNull = false + continue + } + if e.isMax && uintVal > p.val || !e.isMax && uintVal < p.val { + p.val = uintVal + } + } + return nil +} + +func (e *maxMin4Uint) MergePartialResult(sctx sessionctx.Context, src, dst PartialResult) error { + p1, p2 := (*partialResult4MaxMinUint)(src), (*partialResult4MaxMinUint)(dst) + if p1.isNull { + return nil + } + if p2.isNull { + *p2 = *p1 + return nil + } + if e.isMax && p1.val > p2.val || !e.isMax && p1.val < p2.val { + p2.val, p2.isNull = p1.val, false + } + return nil +} + +// maxMin4Float32 gets a float32 input and returns a float32 result. +type maxMin4Float32 struct { + baseMaxMinAggFunc +} + +func (e *maxMin4Float32) AllocPartialResult() PartialResult { + p := new(partialResult4MaxMinFloat32) + p.isNull = true + return PartialResult(p) +} + +func (e *maxMin4Float32) ResetPartialResult(pr PartialResult) { + p := (*partialResult4MaxMinFloat32)(pr) + p.val = 0 + p.isNull = true +} + +func (e *maxMin4Float32) AppendFinalResult2Chunk(sctx sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error { + p := (*partialResult4MaxMinFloat32)(pr) + if p.isNull { + chk.AppendNull(e.ordinal) + return nil + } + chk.AppendFloat32(e.ordinal, p.val) + return nil +} + +func (e *maxMin4Float32) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { + p := (*partialResult4MaxMinFloat32)(pr) + for _, row := range rowsInGroup { + input, isNull, err := e.args[0].EvalReal(sctx, row) + if err != nil { + return err + } + if isNull { + continue + } + f := float32(input) + if p.isNull { + p.val = f + p.isNull = false + continue + } + if e.isMax && f > p.val || !e.isMax && f < p.val { + p.val = f + } + } + return nil +} + +func (e *maxMin4Float32) MergePartialResult(sctx sessionctx.Context, src, dst PartialResult) error { + p1, p2 := (*partialResult4MaxMinFloat32)(src), (*partialResult4MaxMinFloat32)(dst) + if p1.isNull { + return nil + } + if p2.isNull { + *p2 = *p1 + return nil + } + if e.isMax && p1.val > p2.val || !e.isMax && p1.val < p2.val { + p2.val, p2.isNull = p1.val, false + } + return nil +} + +type maxMin4Float64 struct { + baseMaxMinAggFunc +} + +func (e *maxMin4Float64) AllocPartialResult() PartialResult { + p := new(partialResult4MaxMinFloat64) + p.isNull = true + return PartialResult(p) +} + +func (e *maxMin4Float64) ResetPartialResult(pr PartialResult) { + p := (*partialResult4MaxMinFloat64)(pr) + p.val = 0 + p.isNull = true +} + +func (e *maxMin4Float64) AppendFinalResult2Chunk(sctx sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error { + p := (*partialResult4MaxMinFloat64)(pr) + if p.isNull { + chk.AppendNull(e.ordinal) + return nil + } + chk.AppendFloat64(e.ordinal, p.val) + return nil +} + +func (e *maxMin4Float64) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { + p := (*partialResult4MaxMinFloat64)(pr) + for _, row := range rowsInGroup { + input, isNull, err := e.args[0].EvalReal(sctx, row) + if err != nil { + return err + } + if isNull { + continue + } + if p.isNull { + p.val = input + p.isNull = false + continue + } + if e.isMax && input > p.val || !e.isMax && input < p.val { + p.val = input + } + } + return nil +} + +func (e *maxMin4Float64) MergePartialResult(sctx sessionctx.Context, src, dst PartialResult) error { + p1, p2 := (*partialResult4MaxMinFloat64)(src), (*partialResult4MaxMinFloat64)(dst) + if p1.isNull { + return nil + } + if p2.isNull { + *p2 = *p1 + return nil + } + if e.isMax && p1.val > p2.val || !e.isMax && p1.val < p2.val { + p2.val, p2.isNull = p1.val, false + } + return nil +} + +type maxMin4String struct { + baseMaxMinAggFunc +} + +func (e *maxMin4String) AllocPartialResult() PartialResult { + p := new(partialResult4MaxMinString) + p.isNull = true + return PartialResult(p) +} + +func (e *maxMin4String) ResetPartialResult(pr PartialResult) { + p := (*partialResult4MaxMinString)(pr) + p.isNull = true +} + +func (e *maxMin4String) AppendFinalResult2Chunk(sctx sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error { + p := (*partialResult4MaxMinString)(pr) + if p.isNull { + chk.AppendNull(e.ordinal) + return nil + } + chk.AppendString(e.ordinal, p.val) + return nil +} + +func (e *maxMin4String) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { + p := (*partialResult4MaxMinString)(pr) + for _, row := range rowsInGroup { + input, isNull, err := e.args[0].EvalString(sctx, row) + if err != nil { + return err + } + if isNull { + continue + } + if p.isNull { + // The string returned by `EvalString` may be referenced to an underlying buffer, + // for example ‘Chunk’, which could be reset and reused multiply times. + // We have to deep copy that string to avoid some potential risks + // when the content of that underlying buffer changed. + p.val = stringutil.Copy(input) + p.isNull = false + continue + } + cmp := types.CompareString(input, p.val) + if e.isMax && cmp == 1 || !e.isMax && cmp == -1 { + p.val = stringutil.Copy(input) + } + } + return nil +} + +func (e *maxMin4String) MergePartialResult(sctx sessionctx.Context, src, dst PartialResult) error { + p1, p2 := (*partialResult4MaxMinString)(src), (*partialResult4MaxMinString)(dst) + if p1.isNull { + return nil + } + if p2.isNull { + *p2 = *p1 + return nil + } + cmp := types.CompareString(p1.val, p2.val) + if e.isMax && cmp > 0 || !e.isMax && cmp < 0 { + p2.val, p2.isNull = p1.val, false + } + return nil +} diff --git a/executor/aggfuncs/func_max_min_test.go b/executor/aggfuncs/func_max_min_test.go new file mode 100644 index 0000000..f82cf7f --- /dev/null +++ b/executor/aggfuncs/func_max_min_test.go @@ -0,0 +1,63 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggfuncs_test + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" +) + +func (s *testSuite) TestMergePartialResult4MaxMin(c *C) { + unsignedType := types.NewFieldType(mysql.TypeLonglong) + unsignedType.Flag |= mysql.UnsignedFlag + tests := []aggTest{ + buildAggTester(ast.AggFuncMax, mysql.TypeLonglong, 5, 4, 4, 4), + buildAggTesterWithFieldType(ast.AggFuncMax, unsignedType, 5, 4, 4, 4), + buildAggTester(ast.AggFuncMax, mysql.TypeFloat, 5, 4.0, 4.0, 4.0), + buildAggTester(ast.AggFuncMax, mysql.TypeDouble, 5, 4.0, 4.0, 4.0), + buildAggTester(ast.AggFuncMax, mysql.TypeString, 5, "4", "4", "4"), + + buildAggTester(ast.AggFuncMin, mysql.TypeLonglong, 5, 0, 2, 0), + buildAggTesterWithFieldType(ast.AggFuncMin, unsignedType, 5, 0, 2, 0), + buildAggTester(ast.AggFuncMin, mysql.TypeFloat, 5, 0.0, 2.0, 0.0), + buildAggTester(ast.AggFuncMin, mysql.TypeDouble, 5, 0.0, 2.0, 0.0), + buildAggTester(ast.AggFuncMin, mysql.TypeString, 5, "0", "2", "0"), + } + for _, test := range tests { + s.testMergePartialResult(c, test) + } +} + +func (s *testSuite) TestMaxMin(c *C) { + unsignedType := types.NewFieldType(mysql.TypeLonglong) + unsignedType.Flag |= mysql.UnsignedFlag + tests := []aggTest{ + buildAggTester(ast.AggFuncMax, mysql.TypeLonglong, 5, nil, 4), + buildAggTesterWithFieldType(ast.AggFuncMax, unsignedType, 5, nil, 4), + buildAggTester(ast.AggFuncMax, mysql.TypeFloat, 5, nil, 4.0), + buildAggTester(ast.AggFuncMax, mysql.TypeDouble, 5, nil, 4.0), + buildAggTester(ast.AggFuncMax, mysql.TypeString, 5, nil, "4", "4"), + + buildAggTester(ast.AggFuncMin, mysql.TypeLonglong, 5, nil, 0), + buildAggTesterWithFieldType(ast.AggFuncMin, unsignedType, 5, nil, 0), + buildAggTester(ast.AggFuncMin, mysql.TypeFloat, 5, nil, 0.0), + buildAggTester(ast.AggFuncMin, mysql.TypeDouble, 5, nil, 0.0), + buildAggTester(ast.AggFuncMin, mysql.TypeString, 5, nil, "0"), + } + for _, test := range tests { + s.testAggFunc(c, test) + } +} diff --git a/executor/aggfuncs/func_sum.go b/executor/aggfuncs/func_sum.go new file mode 100644 index 0000000..656fc96 --- /dev/null +++ b/executor/aggfuncs/func_sum.go @@ -0,0 +1,154 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggfuncs + +import ( + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" +) + +type partialResult4SumFloat64 struct { + val float64 + isNull bool +} + +type partialResult4Int64 struct { + val int64 + isNull bool +} + +type baseSumAggFunc struct { + baseAggFunc +} + +type sum4Float64 struct { + baseSumAggFunc +} + +func (e *sum4Float64) AllocPartialResult() PartialResult { + p := new(partialResult4SumFloat64) + p.isNull = true + return PartialResult(p) +} + +func (e *sum4Float64) ResetPartialResult(pr PartialResult) { + p := (*partialResult4SumFloat64)(pr) + p.val = 0 + p.isNull = true +} + +func (e *sum4Float64) AppendFinalResult2Chunk(sctx sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error { + p := (*partialResult4SumFloat64)(pr) + if p.isNull { + chk.AppendNull(e.ordinal) + return nil + } + chk.AppendFloat64(e.ordinal, p.val) + return nil +} + +func (e *sum4Float64) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { + p := (*partialResult4SumFloat64)(pr) + for _, row := range rowsInGroup { + input, isNull, err := e.args[0].EvalReal(sctx, row) + if err != nil { + return err + } + if isNull { + continue + } + if p.isNull { + p.val = input + p.isNull = false + continue + } + p.val += input + } + return nil +} + +func (e *sum4Float64) MergePartialResult(sctx sessionctx.Context, src, dst PartialResult) error { + p1, p2 := (*partialResult4SumFloat64)(src), (*partialResult4SumFloat64)(dst) + if p1.isNull { + return nil + } + p2.val += p1.val + p2.isNull = false + return nil +} + +type sum4Int64 struct { + baseSumAggFunc +} + +func (e *sum4Int64) AllocPartialResult() PartialResult { + p := new(partialResult4Int64) + p.isNull = true + return PartialResult(p) +} + +func (e *sum4Int64) ResetPartialResult(pr PartialResult) { + p := (*partialResult4Int64)(pr) + p.isNull = true +} + +func (e *sum4Int64) AppendFinalResult2Chunk(sctx sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error { + p := (*partialResult4Int64)(pr) + if p.isNull { + chk.AppendNull(e.ordinal) + return nil + } + chk.AppendInt64(e.ordinal, p.val) + return nil +} + +func (e *sum4Int64) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { + p := (*partialResult4Int64)(pr) + for _, row := range rowsInGroup { + input, isNull, err := e.args[0].EvalInt(sctx, row) + if err != nil { + return err + } + if isNull { + continue + } + if p.isNull { + p.val = input + p.isNull = false + continue + } + + newSum, err := types.AddInt64(p.val, input) + if err != nil { + return err + } + p.val = newSum + } + return nil +} + +func (e *sum4Int64) MergePartialResult(sctx sessionctx.Context, src, dst PartialResult) error { + p1, p2 := (*partialResult4Int64)(src), (*partialResult4Int64)(dst) + if p1.isNull { + return nil + } + newSum, err := types.AddInt64(p1.val, p2.val) + if err != nil { + return err + } + p2.val = newSum + p2.isNull = false + return nil +} diff --git a/executor/aggfuncs/func_sum_test.go b/executor/aggfuncs/func_sum_test.go new file mode 100644 index 0000000..22e4816 --- /dev/null +++ b/executor/aggfuncs/func_sum_test.go @@ -0,0 +1,40 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggfuncs_test + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" +) + +func (s *testSuite) TestMergePartialResult4Sum(c *C) { + tests := []aggTest{ + buildAggTester(ast.AggFuncSum, mysql.TypeLonglong, 5, int64(10), int64(9), int64(19)), + buildAggTester(ast.AggFuncSum, mysql.TypeDouble, 5, 10.0, 9.0, 19.0), + } + for _, test := range tests { + s.testMergePartialResult(c, test) + } +} + +func (s *testSuite) TestSum(c *C) { + tests := []aggTest{ + buildAggTester(ast.AggFuncSum, mysql.TypeLonglong, 5, nil, int64(10)), + buildAggTester(ast.AggFuncSum, mysql.TypeDouble, 5, nil, 10.0), + } + for _, test := range tests { + s.testAggFunc(c, test) + } +} diff --git a/executor/aggregate.go b/executor/aggregate.go new file mode 100644 index 0000000..35c2000 --- /dev/null +++ b/executor/aggregate.go @@ -0,0 +1,588 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "context" + "sync" + + "github.com/cznic/mathutil" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/executor/aggfuncs" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/codec" + "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/set" + "go.uber.org/zap" +) + +type aggPartialResultMapper map[string][]aggfuncs.PartialResult + +// baseHashAggWorker stores the common attributes of HashAggFinalWorker and HashAggPartialWorker. +type baseHashAggWorker struct { + ctx sessionctx.Context + finishCh <-chan struct{} + aggFuncs []aggfuncs.AggFunc + maxChunkSize int +} + +func newBaseHashAggWorker(ctx sessionctx.Context, finishCh <-chan struct{}, aggFuncs []aggfuncs.AggFunc, maxChunkSize int) baseHashAggWorker { + return baseHashAggWorker{ + ctx: ctx, + finishCh: finishCh, + aggFuncs: aggFuncs, + maxChunkSize: maxChunkSize, + } +} + +// HashAggPartialWorker indicates the partial workers of parallel hash agg execution, +// the number of the worker can be set by `tidb_hashagg_partial_concurrency`. +type HashAggPartialWorker struct { + baseHashAggWorker + + inputCh chan *chunk.Chunk + outputChs []chan *HashAggIntermData + globalOutputCh chan *AfFinalResult + giveBackCh chan<- *HashAggInput + partialResultsMap aggPartialResultMapper + groupByItems []expression.Expression + groupKey [][]byte + // chk stores the input data from child, + // and is reused by childExec and partial worker. + chk *chunk.Chunk +} + +// HashAggFinalWorker indicates the final workers of parallel hash agg execution, +// the number of the worker can be set by `tidb_hashagg_final_concurrency`. +type HashAggFinalWorker struct { + baseHashAggWorker + + rowBuffer []types.Datum + mutableRow chunk.MutRow + partialResultMap aggPartialResultMapper + groupSet set.StringSet + inputCh chan *HashAggIntermData + outputCh chan *AfFinalResult + finalResultHolderCh chan *chunk.Chunk + groupKeys [][]byte +} + +// AfFinalResult indicates aggregation functions final result. +type AfFinalResult struct { + chk *chunk.Chunk + err error + giveBackCh chan *chunk.Chunk +} + +// HashAggExec deals with all the aggregate functions. +// It is built from the Aggregate Plan. When Next() is called, it reads all the data from Src +// and updates all the items in PartialAggFuncs. +// The parallel execution flow is as the following graph shows: +// +// +-------------+ +// | Main Thread | +// +------+------+ +// ^ +// | +// + +// +-+- +-+ +// | | ...... | | finalOutputCh +// +++- +-+ +// ^ +// | +// +---------------+ +// | | +// +--------------+ +--------------+ +// | final worker | ...... | final worker | +// +------------+-+ +-+------------+ +// ^ ^ +// | | +// +-+ +-+ ...... +-+ +// | | | | | | +// ... ... ... partialOutputChs +// | | | | | | +// +++ +++ +++ +// ^ ^ ^ +// +-+ | | | +// | | +--------o----+ | +// inputCh +-+ | +-----------------+---+ +// | | | | +// ... +---+------------+ +----+-----------+ +// | | | partial worker | ...... | partial worker | +// +++ +--------------+-+ +-+--------------+ +// | ^ ^ +// | | | +// +----v---------+ +++ +-+ +++ +// | data fetcher | +------> | | | | ...... | | partialInputChs +// +--------------+ +-+ +-+ +-+ +type HashAggExec struct { + baseExecutor + + sc *stmtctx.StatementContext + PartialAggFuncs []aggfuncs.AggFunc + FinalAggFuncs []aggfuncs.AggFunc + GroupByItems []expression.Expression + + finishCh chan struct{} + finalOutputCh chan *AfFinalResult + partialOutputChs []chan *HashAggIntermData + inputCh chan *HashAggInput + partialInputChs []chan *chunk.Chunk + partialWorkers []HashAggPartialWorker + finalWorkers []HashAggFinalWorker + defaultVal *chunk.Chunk + + // isChildReturnEmpty indicates whether the child executor only returns an empty input. + isChildReturnEmpty bool + prepared bool + executed bool +} + +// HashAggInput indicates the input of hash agg exec. +type HashAggInput struct { + chk *chunk.Chunk + // giveBackCh is bound with specific partial worker, + // it's used to reuse the `chk`, + // and tell the data-fetcher which partial worker it should send data to. + giveBackCh chan<- *chunk.Chunk +} + +// HashAggIntermData indicates the intermediate data of aggregation execution. +type HashAggIntermData struct { + groupKeys []string + cursor int + partialResultMap aggPartialResultMapper +} + +// getPartialResultBatch fetches a batch of partial results from HashAggIntermData. +func (d *HashAggIntermData) getPartialResultBatch(sc *stmtctx.StatementContext, prs [][]aggfuncs.PartialResult, aggFuncs []aggfuncs.AggFunc, maxChunkSize int) (_ [][]aggfuncs.PartialResult, groupKeys []string, reachEnd bool) { + keyStart := d.cursor + for ; d.cursor < len(d.groupKeys) && len(prs) < maxChunkSize; d.cursor++ { + prs = append(prs, d.partialResultMap[d.groupKeys[d.cursor]]) + } + if d.cursor == len(d.groupKeys) { + reachEnd = true + } + return prs, d.groupKeys[keyStart:d.cursor], reachEnd +} + +// Close implements the Executor Close interface. +func (e *HashAggExec) Close() error { + // `Close` may be called after `Open` without calling `Next` in test. + if !e.prepared { + close(e.inputCh) + for _, ch := range e.partialOutputChs { + close(ch) + } + for _, ch := range e.partialInputChs { + close(ch) + } + close(e.finalOutputCh) + } + close(e.finishCh) + for _, ch := range e.partialOutputChs { + for range ch { + } + } + for _, ch := range e.partialInputChs { + for range ch { + } + } + for range e.finalOutputCh { + } + e.executed = false + + return e.baseExecutor.Close() +} + +// Open implements the Executor Open interface. +func (e *HashAggExec) Open(ctx context.Context) error { + if err := e.baseExecutor.Open(ctx); err != nil { + return err + } + e.prepared = false + + e.initForParallelExec(e.ctx) + return nil +} + +func (e *HashAggExec) initForParallelExec(ctx sessionctx.Context) { + sessionVars := e.ctx.GetSessionVars() + finalConcurrency := sessionVars.HashAggFinalConcurrency + partialConcurrency := sessionVars.HashAggPartialConcurrency + e.isChildReturnEmpty = true + e.finalOutputCh = make(chan *AfFinalResult, finalConcurrency) + e.inputCh = make(chan *HashAggInput, partialConcurrency) + e.finishCh = make(chan struct{}, 1) + + e.partialInputChs = make([]chan *chunk.Chunk, partialConcurrency) + for i := range e.partialInputChs { + e.partialInputChs[i] = make(chan *chunk.Chunk, 1) + } + e.partialOutputChs = make([]chan *HashAggIntermData, finalConcurrency) + for i := range e.partialOutputChs { + e.partialOutputChs[i] = make(chan *HashAggIntermData, partialConcurrency) + } + + e.partialWorkers = make([]HashAggPartialWorker, partialConcurrency) + e.finalWorkers = make([]HashAggFinalWorker, finalConcurrency) + + // Init partial workers. + for i := 0; i < partialConcurrency; i++ { + w := HashAggPartialWorker{ + baseHashAggWorker: newBaseHashAggWorker(e.ctx, e.finishCh, e.PartialAggFuncs, e.maxChunkSize), + inputCh: e.partialInputChs[i], + outputChs: e.partialOutputChs, + giveBackCh: e.inputCh, + globalOutputCh: e.finalOutputCh, + partialResultsMap: make(aggPartialResultMapper), + groupByItems: e.GroupByItems, + chk: newFirstChunk(e.children[0]), + groupKey: make([][]byte, 0, 8), + } + + e.partialWorkers[i] = w + e.inputCh <- &HashAggInput{ + chk: newFirstChunk(e.children[0]), + giveBackCh: w.inputCh, + } + } + + // Init final workers. + for i := 0; i < finalConcurrency; i++ { + e.finalWorkers[i] = HashAggFinalWorker{ + baseHashAggWorker: newBaseHashAggWorker(e.ctx, e.finishCh, e.FinalAggFuncs, e.maxChunkSize), + partialResultMap: make(aggPartialResultMapper), + groupSet: set.NewStringSet(), + inputCh: e.partialOutputChs[i], + outputCh: e.finalOutputCh, + finalResultHolderCh: make(chan *chunk.Chunk, 1), + rowBuffer: make([]types.Datum, 0, e.Schema().Len()), + mutableRow: chunk.MutRowFromTypes(retTypes(e)), + groupKeys: make([][]byte, 0, 8), + } + e.finalWorkers[i].finalResultHolderCh <- newFirstChunk(e) + } +} + +func (w *HashAggPartialWorker) getChildInput() bool { + select { + case <-w.finishCh: + return false + case chk, ok := <-w.inputCh: + if !ok { + return false + } + w.chk.SwapColumns(chk) + w.giveBackCh <- &HashAggInput{ + chk: chk, + giveBackCh: w.inputCh, + } + } + return true +} + +func recoveryHashAgg(output chan *AfFinalResult, r interface{}) { + err := errors.Errorf("%v", r) + output <- &AfFinalResult{err: errors.Errorf("%v", r)} + logutil.BgLogger().Error("parallel hash aggregation panicked", zap.Error(err)) +} + +func (w *HashAggPartialWorker) run(ctx sessionctx.Context, waitGroup *sync.WaitGroup, finalConcurrency int) { + needShuffle, sc := false, ctx.GetSessionVars().StmtCtx + defer func() { + if r := recover(); r != nil { + recoveryHashAgg(w.globalOutputCh, r) + } + if needShuffle { + w.shuffleIntermData(sc, finalConcurrency) + } + waitGroup.Done() + }() + for { + if !w.getChildInput() { + return + } + if err := w.updatePartialResult(ctx, sc, w.chk, len(w.partialResultsMap)); err != nil { + w.globalOutputCh <- &AfFinalResult{err: err} + return + } + // The intermData can be promised to be not empty if reaching here, + // so we set needShuffle to be true. + needShuffle = true + } +} + +func (w *HashAggPartialWorker) updatePartialResult(ctx sessionctx.Context, sc *stmtctx.StatementContext, chk *chunk.Chunk, finalConcurrency int) (err error) { + w.groupKey, err = getGroupKey(w.ctx, chk, w.groupKey, w.groupByItems) + if err != nil { + return err + } + + partialResults := w.getPartialResult(sc, w.groupKey, w.partialResultsMap) + numRows := chk.NumRows() + rows := make([]chunk.Row, 1) + for i := 0; i < numRows; i++ { + for j, af := range w.aggFuncs { + rows[0] = chk.GetRow(i) + if err = af.UpdatePartialResult(ctx, rows, partialResults[i][j]); err != nil { + return err + } + } + } + return nil +} + +// shuffleIntermData shuffles the intermediate data of partial workers to corresponded final workers. +// We only support parallel execution for single-machine, so process of encode and decode can be skipped. +func (w *HashAggPartialWorker) shuffleIntermData(sc *stmtctx.StatementContext, finalConcurrency int) { + // TODO: implement the method body. Shuffle the data to final workers. +} + +// getGroupKey evaluates the group items and args of aggregate functions. +func getGroupKey(ctx sessionctx.Context, input *chunk.Chunk, groupKey [][]byte, groupByItems []expression.Expression) ([][]byte, error) { + numRows := input.NumRows() + avlGroupKeyLen := mathutil.Min(len(groupKey), numRows) + for i := 0; i < avlGroupKeyLen; i++ { + groupKey[i] = groupKey[i][:0] + } + for i := avlGroupKeyLen; i < numRows; i++ { + groupKey = append(groupKey, make([]byte, 0, 10*len(groupByItems))) + } + + for _, item := range groupByItems { + tp := item.GetType() + buf, err := expression.GetColumn(tp.EvalType(), numRows) + if err != nil { + return nil, err + } + + if err := expression.VecEval(ctx, item, input, buf); err != nil { + expression.PutColumn(buf) + return nil, err + } + // This check is used to avoid error during the execution of `EncodeDecimal`. + if item.GetType().Tp == mysql.TypeNewDecimal { + newTp := *tp + newTp.Flen = 0 + tp = &newTp + } + groupKey, err = codec.HashGroupKey(ctx.GetSessionVars().StmtCtx, input.NumRows(), buf, groupKey, tp) + if err != nil { + expression.PutColumn(buf) + return nil, err + } + expression.PutColumn(buf) + } + return groupKey, nil +} + +func (w baseHashAggWorker) getPartialResult(sc *stmtctx.StatementContext, groupKey [][]byte, mapper aggPartialResultMapper) [][]aggfuncs.PartialResult { + n := len(groupKey) + partialResults := make([][]aggfuncs.PartialResult, n) + for i := 0; i < n; i++ { + var ok bool + if partialResults[i], ok = mapper[string(groupKey[i])]; ok { + continue + } + for _, af := range w.aggFuncs { + partialResults[i] = append(partialResults[i], af.AllocPartialResult()) + } + mapper[string(groupKey[i])] = partialResults[i] + } + return partialResults +} + +func (w *HashAggFinalWorker) getPartialInput() (input *HashAggIntermData, ok bool) { + select { + case <-w.finishCh: + return nil, false + case input, ok = <-w.inputCh: + if !ok { + return nil, false + } + } + return +} + +func (w *HashAggFinalWorker) consumeIntermData(sctx sessionctx.Context) (err error) { + // TODO: implement the method body. This method consumes the data given by the partial workers. + return nil +} + +func (w *HashAggFinalWorker) getFinalResult(sctx sessionctx.Context) { + result, finished := w.receiveFinalResultHolder() + if finished { + return + } + w.groupKeys = w.groupKeys[:0] + for groupKey := range w.groupSet { + w.groupKeys = append(w.groupKeys, []byte(groupKey)) + } + partialResults := w.getPartialResult(sctx.GetSessionVars().StmtCtx, w.groupKeys, w.partialResultMap) + for i := 0; i < len(w.groupSet); i++ { + for j, af := range w.aggFuncs { + if err := af.AppendFinalResult2Chunk(sctx, partialResults[i][j], result); err != nil { + logutil.BgLogger().Error("HashAggFinalWorker failed to append final result to Chunk", zap.Error(err)) + } + } + if len(w.aggFuncs) == 0 { + result.SetNumVirtualRows(result.NumRows() + 1) + } + if result.IsFull() { + w.outputCh <- &AfFinalResult{chk: result, giveBackCh: w.finalResultHolderCh} + result, finished = w.receiveFinalResultHolder() + if finished { + return + } + } + } + w.outputCh <- &AfFinalResult{chk: result, giveBackCh: w.finalResultHolderCh} +} + +func (w *HashAggFinalWorker) receiveFinalResultHolder() (*chunk.Chunk, bool) { + select { + case <-w.finishCh: + return nil, true + case result, ok := <-w.finalResultHolderCh: + return result, !ok + } +} + +func (w *HashAggFinalWorker) run(ctx sessionctx.Context, waitGroup *sync.WaitGroup) { + defer func() { + if r := recover(); r != nil { + recoveryHashAgg(w.outputCh, r) + } + waitGroup.Done() + }() + if err := w.consumeIntermData(ctx); err != nil { + w.outputCh <- &AfFinalResult{err: err} + } + w.getFinalResult(ctx) +} + +// Next implements the Executor Next interface. +func (e *HashAggExec) Next(ctx context.Context, req *chunk.Chunk) error { + req.Reset() + return e.parallelExec(ctx, req) +} + +func (e *HashAggExec) fetchChildData(ctx context.Context) { + var ( + input *HashAggInput + chk *chunk.Chunk + ok bool + err error + ) + defer func() { + if r := recover(); r != nil { + recoveryHashAgg(e.finalOutputCh, r) + } + for i := range e.partialInputChs { + close(e.partialInputChs[i]) + } + }() + for { + select { + case <-e.finishCh: + return + case input, ok = <-e.inputCh: + if !ok { + return + } + chk = input.chk + } + err = Next(ctx, e.children[0], chk) + if err != nil { + e.finalOutputCh <- &AfFinalResult{err: err} + return + } + if chk.NumRows() == 0 { + return + } + input.giveBackCh <- chk + } +} + +func (e *HashAggExec) waitPartialWorkerAndCloseOutputChs(waitGroup *sync.WaitGroup) { + waitGroup.Wait() + for _, ch := range e.partialOutputChs { + close(ch) + } +} + +func (e *HashAggExec) waitFinalWorkerAndCloseFinalOutput(waitGroup *sync.WaitGroup) { + waitGroup.Wait() + close(e.finalOutputCh) +} + +func (e *HashAggExec) prepare4ParallelExec(ctx context.Context) { + go e.fetchChildData(ctx) + + partialWorkerWaitGroup := &sync.WaitGroup{} + partialWorkerWaitGroup.Add(len(e.partialWorkers)) + for i := range e.partialWorkers { + go e.partialWorkers[i].run(e.ctx, partialWorkerWaitGroup, len(e.finalWorkers)) + } + go e.waitPartialWorkerAndCloseOutputChs(partialWorkerWaitGroup) + + finalWorkerWaitGroup := &sync.WaitGroup{} + finalWorkerWaitGroup.Add(len(e.finalWorkers)) + for i := range e.finalWorkers { + go e.finalWorkers[i].run(e.ctx, finalWorkerWaitGroup) + } + go e.waitFinalWorkerAndCloseFinalOutput(finalWorkerWaitGroup) +} + +// HashAggExec employs one input reader, M partial workers and N final workers to execute parallelly. +// The parallel execution flow is: +// 1. input reader reads data from child executor and send them to partial workers. +// 2. partial worker receives the input data, updates the partial results, and shuffle the partial results to the final workers. +// 3. final worker receives partial results from all the partial workers, evaluates the final results and sends the final results to the main thread. +func (e *HashAggExec) parallelExec(ctx context.Context, chk *chunk.Chunk) error { + if !e.prepared { + e.prepare4ParallelExec(ctx) + e.prepared = true + } + + if e.executed { + return nil + } + for { + result, ok := <-e.finalOutputCh + if !ok { + e.executed = true + if e.isChildReturnEmpty && e.defaultVal != nil { + chk.Append(e.defaultVal, 0, 1) + } + return nil + } + if result.err != nil { + return result.err + } + chk.SwapColumns(result.chk) + result.chk.Reset() + result.giveBackCh <- result.chk + if chk.NumRows() > 0 { + e.isChildReturnEmpty = false + return nil + } + } +} diff --git a/executor/aggregate_test.go b/executor/aggregate_test.go new file mode 100644 index 0000000..970ef7e --- /dev/null +++ b/executor/aggregate_test.go @@ -0,0 +1,99 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor_test + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/util/testkit" + "github.com/pingcap/tidb/util/testutil" +) + +type testSuiteAgg struct { + *baseTestSuite + testData testutil.TestData +} + +func (s *testSuiteAgg) SetUpSuite(c *C) { + s.baseTestSuite.SetUpSuite(c) + var err error + s.testData, err = testutil.LoadTestSuiteData("testdata", "agg_suite") + c.Assert(err, IsNil) +} + +func (s *testSuiteAgg) TearDownSuite(c *C) { + s.baseTestSuite.TearDownSuite(c) + c.Assert(s.testData.GenerateOutputIfNeeded(), IsNil) +} + +func (s *testSuiteAgg) TestSelectDistinct(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + s.fillData(tk, "select_distinct_test") + + tk.MustExec("begin") + r := tk.MustQuery("select distinct name from select_distinct_test;") + r.Check(testkit.Rows("hello")) + tk.MustExec("commit") + +} + +func (s *testSuiteAgg) TestAggPushDown(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (a int, b int, c int)") + tk.MustExec("alter table t add index idx(a, b, c)") + // test for empty table + tk.MustQuery("select count(a) from t group by a;").Check(testkit.Rows()) + tk.MustQuery("select count(a) from t;").Check(testkit.Rows("0")) + // test for one row + tk.MustExec("insert t values(0,0,0)") + tk.MustQuery("select distinct b from t").Check(testkit.Rows("0")) + tk.MustQuery("select count(b) from t group by a;").Check(testkit.Rows("1")) + // test for rows + tk.MustExec("insert t values(1,1,1),(3,3,6),(3,2,5),(2,1,4),(1,1,3),(1,1,2);") + tk.MustQuery("select count(a) from t where b>0 group by a, b;").Sort().Check(testkit.Rows("1", "1", "1", "3")) + tk.MustQuery("select count(a) from t where b>0 group by a, b order by a;").Check(testkit.Rows("3", "1", "1", "1")) + tk.MustQuery("select count(a) from t where b>0 group by a, b order by a limit 1;").Check(testkit.Rows("3")) +} + +func (s *testSuiteAgg) TestAggEliminator(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + + tk.MustExec("create table t(a int primary key, b int)") + tk.MustQuery("select min(a), min(a) from t").Check(testkit.Rows(" ")) + tk.MustExec("insert into t values(1, -1), (2, -2), (3, 1), (4, NULL)") + tk.MustQuery("select max(a) from t").Check(testkit.Rows("4")) + tk.MustQuery("select min(b) from t").Check(testkit.Rows("-2")) + tk.MustQuery("select max(b*b) from t").Check(testkit.Rows("4")) + tk.MustQuery("select min(b*b) from t").Check(testkit.Rows("1")) +} + +func (s *testSuiteAgg) TestInjectProjBelowTopN(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (i int);") + tk.MustExec("insert into t values (1), (1), (1),(2),(3),(2),(3),(2),(3);") + var ( + input []string + output [][]string + ) + s.testData.GetTestCases(c, &input, &output) + for i, tt := range input { + s.testData.OnRecord(func() { + output[i] = s.testData.ConvertRowsToStrings(tk.MustQuery(tt).Rows()) + }) + tk.MustQuery(tt).Check(testkit.Rows(output[i]...)) + } +} diff --git a/executor/analyze.go b/executor/analyze.go new file mode 100644 index 0000000..34bc833 --- /dev/null +++ b/executor/analyze.go @@ -0,0 +1,475 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "context" + "math" + "runtime" + "strconv" + "sync" + + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/distsql" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/statistics" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/ranger" + "github.com/pingcap/tipb/go-tipb" + "go.uber.org/zap" +) + +var _ Executor = &AnalyzeExec{} + +// AnalyzeExec represents Analyze executor. +type AnalyzeExec struct { + baseExecutor + tasks []*analyzeTask + wg *sync.WaitGroup +} + +var ( + // RandSeed is the seed for randing package. + // It's public for test. + RandSeed = int64(1) +) + +const ( + defaultMaxSampleSize = 10000 + maxRegionSampleSize = 1000 + maxSketchSize = 10000 + defaultCMSketchWidth = 2048 + defaultCMSketchDepth = 5 + defaultNumBuckets = 256 +) + +// Next implements the Executor Next interface. +func (e *AnalyzeExec) Next(ctx context.Context, req *chunk.Chunk) error { + concurrency, err := getBuildStatsConcurrency(e.ctx) + if err != nil { + return err + } + taskCh := make(chan *analyzeTask, len(e.tasks)) + resultCh := make(chan analyzeResult, len(e.tasks)) + e.wg.Add(concurrency) + for i := 0; i < concurrency; i++ { + go e.analyzeWorker(taskCh, resultCh, i == 0) + } + for _, task := range e.tasks { + taskCh <- task + } + close(taskCh) + statsHandle := domain.GetDomain(e.ctx).StatsHandle() + panicCnt := 0 + for panicCnt < concurrency { + result, ok := <-resultCh + if !ok { + break + } + if result.Err != nil { + err = result.Err + if err == errAnalyzeWorkerPanic { + panicCnt++ + } else { + logutil.Logger(ctx).Error("analyze failed", zap.Error(err)) + } + continue + } + for i, hg := range result.Hist { + err1 := statsHandle.SaveStatsToStorage(result.PhysicalTableID, result.Count, result.IsIndex, hg, result.Cms[i]) + if err1 != nil { + err = err1 + logutil.Logger(ctx).Error("save stats to storage failed", zap.Error(err)) + continue + } + } + } + if err != nil { + return err + } + return statsHandle.Update(infoschema.GetInfoSchema(e.ctx)) +} + +func getBuildStatsConcurrency(ctx sessionctx.Context) (int, error) { + sessionVars := ctx.GetSessionVars() + concurrency, err := variable.GetSessionSystemVar(sessionVars, variable.TiDBBuildStatsConcurrency) + if err != nil { + return 0, err + } + c, err := strconv.ParseInt(concurrency, 10, 64) + return int(c), err +} + +type taskType int + +const ( + colTask taskType = iota + idxTask +) + +type analyzeTask struct { + taskType taskType + idxExec *AnalyzeIndexExec + colExec *AnalyzeColumnsExec +} + +var errAnalyzeWorkerPanic = errors.New("analyze worker panic") + +func (e *AnalyzeExec) analyzeWorker(taskCh <-chan *analyzeTask, resultCh chan<- analyzeResult, isCloseChanThread bool) { + var task *analyzeTask + defer func() { + if r := recover(); r != nil { + buf := make([]byte, 4096) + stackSize := runtime.Stack(buf, false) + buf = buf[:stackSize] + logutil.BgLogger().Error("analyze worker panicked", zap.String("stack", string(buf))) + + resultCh <- analyzeResult{ + Err: errAnalyzeWorkerPanic, + } + } + e.wg.Done() + if isCloseChanThread { + e.wg.Wait() + close(resultCh) + } + }() + for { + var ok bool + task, ok = <-taskCh + if !ok { + break + } + switch task.taskType { + case colTask: + resultCh <- analyzeColumnsPushdown(task.colExec) + case idxTask: + resultCh <- analyzeIndexPushdown(task.idxExec) + } + } +} + +func analyzeIndexPushdown(idxExec *AnalyzeIndexExec) analyzeResult { + ranges := ranger.FullRange() + // For single-column index, we do not load null rows from TiKV, so the built histogram would not include + // null values, and its `NullCount` would be set by result of another distsql call to get null rows. + // For multi-column index, we cannot define null for the rows, so we still use full range, and the rows + // containing null fields would exist in built histograms. Note that, the `NullCount` of histograms for + // multi-column index is always 0 then. + if len(idxExec.idxInfo.Columns) == 1 { + ranges = ranger.FullNotNullRange() + } + hist, cms, err := idxExec.buildStats(ranges, true) + if err != nil { + return analyzeResult{Err: err} + } + result := analyzeResult{ + PhysicalTableID: idxExec.physicalTableID, + Hist: []*statistics.Histogram{hist}, + Cms: []*statistics.CMSketch{cms}, + IsIndex: 1, + } + result.Count = hist.NullCount + if hist.Len() > 0 { + result.Count += hist.Buckets[hist.Len()-1].Count + } + return result +} + +// AnalyzeIndexExec represents analyze index push down executor. +type AnalyzeIndexExec struct { + ctx sessionctx.Context + physicalTableID int64 + idxInfo *model.IndexInfo + concurrency int + analyzePB *tipb.AnalyzeReq + result distsql.SelectResult + countNullRes distsql.SelectResult +} + +// fetchAnalyzeResult builds and dispatches the `kv.Request` from given ranges, and stores the `SelectResult` +// in corresponding fields based on the input `isNullRange` argument, which indicates if the range is the +// special null range for single-column index to get the null count. +func (e *AnalyzeIndexExec) fetchAnalyzeResult(ranges []*ranger.Range, isNullRange bool) error { + var builder distsql.RequestBuilder + kvReq, err := builder.SetIndexRanges(e.ctx.GetSessionVars().StmtCtx, e.physicalTableID, e.idxInfo.ID, ranges). + SetAnalyzeRequest(e.analyzePB). + SetStartTS(math.MaxUint64). + SetKeepOrder(true). + SetConcurrency(e.concurrency). + Build() + if err != nil { + return err + } + ctx := context.TODO() + result, err := distsql.Analyze(ctx, e.ctx.GetClient(), kvReq, e.ctx.GetSessionVars().KVVars) + if err != nil { + return err + } + if isNullRange { + e.countNullRes = result + } else { + e.result = result + } + return nil +} + +func (e *AnalyzeIndexExec) open(ranges []*ranger.Range, considerNull bool) error { + err := e.fetchAnalyzeResult(ranges, false) + if err != nil { + return err + } + if considerNull && len(e.idxInfo.Columns) == 1 { + ranges = ranger.NullRange() + err = e.fetchAnalyzeResult(ranges, true) + if err != nil { + return err + } + } + return nil +} + +func (e *AnalyzeIndexExec) buildStatsFromResult(result distsql.SelectResult, needCMS bool) (*statistics.Histogram, *statistics.CMSketch, error) { + failpoint.Inject("buildStatsFromResult", func(val failpoint.Value) { + if val.(bool) { + failpoint.Return(nil, nil, errors.New("mock buildStatsFromResult error")) + } + }) + hist := &statistics.Histogram{} + var cms *statistics.CMSketch + if needCMS { + cms = statistics.NewCMSketch(int32(defaultCMSketchDepth), int32(defaultCMSketchWidth)) + } + for { + data, err := result.NextRaw(context.TODO()) + if err != nil { + return nil, nil, err + } + if data == nil { + break + } + resp := &tipb.AnalyzeIndexResp{} + err = resp.Unmarshal(data) + if err != nil { + return nil, nil, err + } + respHist := statistics.HistogramFromProto(resp.Hist) + hist, err = statistics.MergeHistograms(e.ctx.GetSessionVars().StmtCtx, hist, respHist, defaultNumBuckets) + if err != nil { + return nil, nil, err + } + if needCMS { + if resp.Cms == nil { + logutil.Logger(context.TODO()).Warn("nil CMS in response", zap.String("table", e.idxInfo.Table.O), zap.String("index", e.idxInfo.Name.O)) + } else if err := cms.MergeCMSketch(statistics.CMSketchFromProto(resp.Cms)); err != nil { + return nil, nil, err + } + } + } + return hist, cms, nil +} + +func (e *AnalyzeIndexExec) buildStats(ranges []*ranger.Range, considerNull bool) (hist *statistics.Histogram, cms *statistics.CMSketch, err error) { + if err = e.open(ranges, considerNull); err != nil { + return nil, nil, err + } + defer func() { + err1 := closeAll(e.result, e.countNullRes) + if err == nil { + err = err1 + } + }() + hist, cms, err = e.buildStatsFromResult(e.result, true) + if err != nil { + return nil, nil, err + } + if e.countNullRes != nil { + nullHist, _, err := e.buildStatsFromResult(e.countNullRes, false) + if err != nil { + return nil, nil, err + } + if l := nullHist.Len(); l > 0 { + hist.NullCount = nullHist.Buckets[l-1].Count + } + } + hist.ID = e.idxInfo.ID + return hist, cms, nil +} + +func analyzeColumnsPushdown(colExec *AnalyzeColumnsExec) analyzeResult { + var ranges []*ranger.Range + if colExec.pkInfo != nil { + ranges = ranger.FullIntRange(mysql.HasUnsignedFlag(colExec.pkInfo.Flag)) + } else { + ranges = ranger.FullIntRange(false) + } + hists, cms, err := colExec.buildStats(ranges) + if err != nil { + return analyzeResult{Err: err} + } + result := analyzeResult{ + PhysicalTableID: colExec.physicalTableID, + Hist: hists, + Cms: cms, + } + hist := hists[0] + result.Count = hist.NullCount + if hist.Len() > 0 { + result.Count += hist.Buckets[hist.Len()-1].Count + } + return result +} + +// AnalyzeColumnsExec represents Analyze columns push down executor. +type AnalyzeColumnsExec struct { + ctx sessionctx.Context + physicalTableID int64 + colsInfo []*model.ColumnInfo + pkInfo *model.ColumnInfo + concurrency int + analyzePB *tipb.AnalyzeReq + resultHandler *tableResultHandler +} + +func (e *AnalyzeColumnsExec) open(ranges []*ranger.Range) error { + e.resultHandler = &tableResultHandler{} + firstPartRanges, secondPartRanges := splitRanges(ranges, true, false) + firstResult, err := e.buildResp(firstPartRanges) + if err != nil { + return err + } + if len(secondPartRanges) == 0 { + e.resultHandler.open(nil, firstResult) + return nil + } + var secondResult distsql.SelectResult + secondResult, err = e.buildResp(secondPartRanges) + if err != nil { + return err + } + e.resultHandler.open(firstResult, secondResult) + + return nil +} + +func (e *AnalyzeColumnsExec) buildResp(ranges []*ranger.Range) (distsql.SelectResult, error) { + var builder distsql.RequestBuilder + // Always set KeepOrder of the request to be true, in order to compute + // correct `correlation` of columns. + kvReq, err := builder.SetTableRanges(e.physicalTableID, ranges). + SetAnalyzeRequest(e.analyzePB). + SetStartTS(math.MaxUint64). + SetKeepOrder(true). + SetConcurrency(e.concurrency). + Build() + if err != nil { + return nil, err + } + ctx := context.TODO() + return distsql.Analyze(ctx, e.ctx.GetClient(), kvReq, e.ctx.GetSessionVars().KVVars) +} + +func (e *AnalyzeColumnsExec) buildStats(ranges []*ranger.Range) (hists []*statistics.Histogram, cms []*statistics.CMSketch, err error) { + if err = e.open(ranges); err != nil { + return nil, nil, err + } + defer func() { + if err1 := e.resultHandler.Close(); err1 != nil { + hists = nil + cms = nil + err = err1 + } + }() + pkHist := &statistics.Histogram{} + collectors := make([]*statistics.SampleCollector, len(e.colsInfo)) + for i := range collectors { + collectors[i] = &statistics.SampleCollector{ + IsMerger: true, + FMSketch: statistics.NewFMSketch(maxSketchSize), + MaxSampleSize: int64(defaultMaxSampleSize), + CMSketch: statistics.NewCMSketch(int32(defaultCMSketchDepth), int32(defaultCMSketchWidth)), + } + } + for { + data, err1 := e.resultHandler.nextRaw(context.TODO()) + if err1 != nil { + return nil, nil, err1 + } + if data == nil { + break + } + resp := &tipb.AnalyzeColumnsResp{} + err = resp.Unmarshal(data) + if err != nil { + return nil, nil, err + } + sc := e.ctx.GetSessionVars().StmtCtx + if e.pkInfo != nil { + respHist := statistics.HistogramFromProto(resp.PkHist) + pkHist, err = statistics.MergeHistograms(sc, pkHist, respHist, defaultNumBuckets) + if err != nil { + return nil, nil, err + } + } + for i, rc := range resp.Collectors { + respSample := statistics.SampleCollectorFromProto(rc) + collectors[i].MergeSampleCollector(sc, respSample) + } + } + timeZone := e.ctx.GetSessionVars().Location() + if e.pkInfo != nil { + pkHist.ID = e.pkInfo.ID + err = pkHist.DecodeTo(&e.pkInfo.FieldType, timeZone) + if err != nil { + return nil, nil, err + } + hists = append(hists, pkHist) + cms = append(cms, nil) + } + for i, col := range e.colsInfo { + for j, s := range collectors[i].Samples { + collectors[i].Samples[j].Ordinal = j + collectors[i].Samples[j].Value, err = tablecodec.DecodeColumnValue(s.Value.GetBytes(), &col.FieldType, timeZone) + if err != nil { + return nil, nil, err + } + } + hg, err := statistics.BuildColumn(e.ctx, int64(defaultNumBuckets), col.ID, collectors[i], &col.FieldType) + if err != nil { + return nil, nil, err + } + hists = append(hists, hg) + cms = append(cms, collectors[i].CMSketch) + } + return hists, cms, nil +} + +// analyzeResult is used to represent analyze result. +type analyzeResult struct { + // PhysicalTableID is the id of a partition or a table. + PhysicalTableID int64 + Hist []*statistics.Histogram + Cms []*statistics.CMSketch + Count int64 + IsIndex int + Err error +} diff --git a/executor/analyze_test.go b/executor/analyze_test.go new file mode 100644 index 0000000..a54c2d9 --- /dev/null +++ b/executor/analyze_test.go @@ -0,0 +1,41 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor_test + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/util/testkit" +) + +func (s *testSuite1) TestAnalyzeReplicaReadFollower(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int)") + ctx := tk.Se.(sessionctx.Context) + ctx.GetSessionVars().SetReplicaRead(kv.ReplicaReadFollower) + tk.MustExec("analyze table t") +} + +func (s *testSuite1) TestAnalyzeRestrict(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int)") + ctx := tk.Se.(sessionctx.Context) + ctx.GetSessionVars().InRestrictedSQL = true + tk.MustExec("analyze table t") +} diff --git a/executor/batch_checker.go b/executor/batch_checker.go new file mode 100644 index 0000000..97d5556 --- /dev/null +++ b/executor/batch_checker.go @@ -0,0 +1,184 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "context" + "strconv" + + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/table/tables" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/types" +) + +type keyValue struct { + key kv.Key + value []byte +} + +type keyValueWithDupInfo struct { + newKV keyValue + dupErr error +} + +type toBeCheckedRow struct { + row []types.Datum + rowValue []byte + handleKey *keyValueWithDupInfo + uniqueKeys []*keyValueWithDupInfo + // t is the table or partition this row belongs to. + t table.Table +} + +// encodeNewRow encodes a new row to value. +func encodeNewRow(ctx sessionctx.Context, t table.Table, row []types.Datum) ([]byte, error) { + colIDs := make([]int64, 0, len(row)) + skimmedRow := make([]types.Datum, 0, len(row)) + for _, col := range t.Cols() { + if !tables.CanSkip(t.Meta(), col, row[col.Offset]) { + colIDs = append(colIDs, col.ID) + skimmedRow = append(skimmedRow, row[col.Offset]) + } + } + rd := &ctx.GetSessionVars().RowEncoder + newRowValue, err := tablecodec.EncodeRow(ctx.GetSessionVars().StmtCtx, skimmedRow, colIDs, nil, nil, rd) + if err != nil { + return nil, err + } + return newRowValue, nil +} + +// getKeysNeedCheck gets keys converted from to-be-insert rows to record keys and unique index keys, +// which need to be checked whether they are duplicate keys. +func getKeysNeedCheck(ctx context.Context, sctx sessionctx.Context, t table.Table, rows [][]types.Datum) ([]toBeCheckedRow, error) { + nUnique := 0 + for _, v := range t.WritableIndices() { + if v.Meta().Unique { + nUnique++ + } + } + toBeCheckRows := make([]toBeCheckedRow, 0, len(rows)) + + var handleCol *table.Column + // Get handle column if PK is handle. + if t.Meta().PKIsHandle { + for _, col := range t.Cols() { + if col.IsPKHandleColumn(t.Meta()) { + handleCol = col + break + } + } + } + + var err error + for _, row := range rows { + toBeCheckRows, err = getKeysNeedCheckOneRow(sctx, t, row, nUnique, handleCol, toBeCheckRows) + if err != nil { + return nil, err + } + } + return toBeCheckRows, nil +} + +func getKeysNeedCheckOneRow(ctx sessionctx.Context, t table.Table, row []types.Datum, nUnique int, handleCol *table.Column, result []toBeCheckedRow) ([]toBeCheckedRow, error) { + var err error + var handleKey *keyValueWithDupInfo + uniqueKeys := make([]*keyValueWithDupInfo, 0, nUnique) + newRowValue, err := encodeNewRow(ctx, t, row) + if err != nil { + return nil, err + } + // Append record keys and errors. + if handleCol != nil { + handle := row[handleCol.Offset].GetInt64() + handleKey = &keyValueWithDupInfo{ + newKV: keyValue{ + key: t.RecordKey(handle), + value: newRowValue, + }, + dupErr: kv.ErrKeyExists.FastGenByArgs(strconv.FormatInt(handle, 10), "PRIMARY"), + } + } + + // append unique keys and errors + for _, v := range t.WritableIndices() { + if !v.Meta().Unique { + continue + } + colVals, err1 := v.FetchValues(row, nil) + if err1 != nil { + return nil, err1 + } + // Pass handle = 0 to GenIndexKey, + // due to we only care about distinct key. + key, distinct, err1 := v.GenIndexKey(ctx.GetSessionVars().StmtCtx, + colVals, 0, nil) + if err1 != nil { + return nil, err1 + } + // Skip the non-distinct keys. + if !distinct { + continue + } + colValStr, err1 := types.DatumsToString(colVals, false) + if err1 != nil { + return nil, err1 + } + uniqueKeys = append(uniqueKeys, &keyValueWithDupInfo{ + newKV: keyValue{ + key: key, + }, + dupErr: kv.ErrKeyExists.FastGenByArgs(colValStr, v.Meta().Name), + }) + } + result = append(result, toBeCheckedRow{ + row: row, + rowValue: newRowValue, + handleKey: handleKey, + uniqueKeys: uniqueKeys, + t: t, + }) + return result, nil +} + +// getOldRow gets the table record row from storage for batch check. +// t could be a normal table or a partition, but it must not be a PartitionedTable. +func getOldRow(ctx context.Context, sctx sessionctx.Context, txn kv.Transaction, t table.Table, handle int64) ([]types.Datum, error) { + oldValue, err := txn.Get(ctx, t.RecordKey(handle)) + if err != nil { + return nil, err + } + + cols := t.WritableCols() + oldRow, oldRowMap, err := tables.DecodeRawRowData(sctx, t.Meta(), handle, cols, oldValue) + if err != nil { + return nil, err + } + for _, col := range cols { + if col.State != model.StatePublic && oldRow[col.Offset].IsNull() { + _, found := oldRowMap[col.ID] + if !found { + oldRow[col.Offset], err = table.GetColOriginDefaultValue(sctx, col.ToInfo()) + if err != nil { + return nil, err + } + } + } + } + return oldRow, nil +} diff --git a/executor/benchmark_test.go b/executor/benchmark_test.go new file mode 100644 index 0000000..1028af7 --- /dev/null +++ b/executor/benchmark_test.go @@ -0,0 +1,457 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "context" + "fmt" + "math/rand" + "sort" + "strings" + "testing" + + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/expression/aggregation" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/mock" + "github.com/pingcap/tidb/util/stringutil" +) + +var ( + _ Executor = &mockDataSource{} +) + +type mockDataSourceParameters struct { + schema *expression.Schema + genDataFunc func(row int, typ *types.FieldType) interface{} + ndvs []int // number of distinct values on columns[i] and zero represents no limit + orders []bool // columns[i] should be ordered if orders[i] is true + rows int // number of rows the DataSource should output + ctx sessionctx.Context +} + +type mockDataSource struct { + baseExecutor + p mockDataSourceParameters + genData []*chunk.Chunk + chunks []*chunk.Chunk + chunkPtr int +} + +func (mds *mockDataSource) genColDatums(col int) (results []interface{}) { + typ := mds.retFieldTypes[col] + order := false + if col < len(mds.p.orders) { + order = mds.p.orders[col] + } + rows := mds.p.rows + NDV := 0 + if col < len(mds.p.ndvs) { + NDV = mds.p.ndvs[col] + } + results = make([]interface{}, 0, rows) + if mds.p.genDataFunc != nil { + for i := 0; i < rows; i++ { + results = append(results, mds.p.genDataFunc(i, typ)) + } + } else if NDV == 0 { + for i := 0; i < rows; i++ { + results = append(results, mds.randDatum(typ)) + } + } else { + datumSet := make(map[string]bool, NDV) + datums := make([]interface{}, 0, NDV) + for len(datums) < NDV { + d := mds.randDatum(typ) + str := fmt.Sprintf("%v", d) + if datumSet[str] { + continue + } + datumSet[str] = true + datums = append(datums, d) + } + + for i := 0; i < rows; i++ { + results = append(results, datums[rand.Intn(NDV)]) + } + } + + if order { + sort.Slice(results, func(i, j int) bool { + switch typ.Tp { + case mysql.TypeLong, mysql.TypeLonglong: + return results[i].(int64) < results[j].(int64) + case mysql.TypeDouble: + return results[i].(float64) < results[j].(float64) + case mysql.TypeVarString: + return results[i].(string) < results[j].(string) + default: + panic("not implement") + } + }) + } + + return +} + +func (mds *mockDataSource) randDatum(typ *types.FieldType) interface{} { + switch typ.Tp { + case mysql.TypeLong, mysql.TypeLonglong: + return int64(rand.Int()) + case mysql.TypeDouble: + return rand.Float64() + case mysql.TypeVarString: + return rawData + default: + panic("not implement") + } +} + +func (mds *mockDataSource) prepareChunks() { + mds.chunks = make([]*chunk.Chunk, len(mds.genData)) + for i := range mds.chunks { + mds.chunks[i] = mds.genData[i].CopyConstruct() + } + mds.chunkPtr = 0 +} + +func (mds *mockDataSource) Next(ctx context.Context, req *chunk.Chunk) error { + if mds.chunkPtr >= len(mds.chunks) { + req.Reset() + return nil + } + dataChk := mds.chunks[mds.chunkPtr] + dataChk.SwapColumns(req) + mds.chunkPtr++ + return nil +} + +func buildMockDataSource(opt mockDataSourceParameters) *mockDataSource { + baseExec := newBaseExecutor(opt.ctx, opt.schema, nil) + m := &mockDataSource{baseExec, opt, nil, nil, 0} + types := retTypes(m) + colData := make([][]interface{}, len(types)) + for i := 0; i < len(types); i++ { + colData[i] = m.genColDatums(i) + } + + m.genData = make([]*chunk.Chunk, (m.p.rows+m.maxChunkSize-1)/m.maxChunkSize) + for i := range m.genData { + m.genData[i] = chunk.NewChunkWithCapacity(retTypes(m), m.maxChunkSize) + } + + for i := 0; i < m.p.rows; i++ { + idx := i / m.maxChunkSize + retTypes := retTypes(m) + for colIdx := 0; colIdx < len(types); colIdx++ { + switch retTypes[colIdx].Tp { + case mysql.TypeLong, mysql.TypeLonglong: + m.genData[idx].AppendInt64(colIdx, colData[colIdx][i].(int64)) + case mysql.TypeDouble: + m.genData[idx].AppendFloat64(colIdx, colData[colIdx][i].(float64)) + case mysql.TypeVarString: + m.genData[idx].AppendString(colIdx, colData[colIdx][i].(string)) + default: + panic("not implement") + } + } + } + return m +} + +type aggTestCase struct { + aggFunc string // sum, avg, count .... + groupByNDV int // the number of distinct group-by keys + rows int + concurrency int + ctx sessionctx.Context +} + +func (a aggTestCase) columns() []*expression.Column { + return []*expression.Column{ + {Index: 0, RetType: types.NewFieldType(mysql.TypeDouble)}, + {Index: 1, RetType: types.NewFieldType(mysql.TypeLonglong)}, + } +} + +func (a aggTestCase) String() string { + return fmt.Sprintf("(aggFunc:%v, ndv:%v, rows:%v, concurrency:%v)", + a.aggFunc, a.groupByNDV, a.rows, a.concurrency) +} + +func defaultAggTestCase() *aggTestCase { + ctx := mock.NewContext() + ctx.GetSessionVars().InitChunkSize = variable.DefInitChunkSize + ctx.GetSessionVars().MaxChunkSize = variable.DefMaxChunkSize + return &aggTestCase{ast.AggFuncSum, 1000, 10000000, 4, ctx} +} + +func buildHashAggExecutor(ctx sessionctx.Context, src Executor, schema *expression.Schema, + aggFuncs []*aggregation.AggFuncDesc, groupItems []expression.Expression) Executor { + plan := new(core.PhysicalHashAgg) + plan.AggFuncs = aggFuncs + plan.GroupByItems = groupItems + plan.SetSchema(schema) + plan.Init(ctx, nil) + plan.SetChildren(nil) + b := newExecutorBuilder(ctx, nil) + exec := b.build(plan) + hashAgg := exec.(*HashAggExec) + hashAgg.children[0] = src + return exec +} + +func buildAggExecutor(b *testing.B, testCase *aggTestCase, child Executor) Executor { + ctx := testCase.ctx + if err := ctx.GetSessionVars().SetSystemVar(variable.TiDBHashAggFinalConcurrency, fmt.Sprintf("%v", testCase.concurrency)); err != nil { + b.Fatal(err) + } + if err := ctx.GetSessionVars().SetSystemVar(variable.TiDBHashAggPartialConcurrency, fmt.Sprintf("%v", testCase.concurrency)); err != nil { + b.Fatal(err) + } + + childCols := testCase.columns() + schema := expression.NewSchema(childCols...) + groupBy := []expression.Expression{childCols[1]} + aggFunc, err := aggregation.NewAggFuncDesc(testCase.ctx, testCase.aggFunc, []expression.Expression{childCols[0]}) + if err != nil { + b.Fatal(err) + } + aggFuncs := []*aggregation.AggFuncDesc{aggFunc} + aggExec := buildHashAggExecutor(testCase.ctx, child, schema, aggFuncs, groupBy) + return aggExec +} + +func benchmarkAggExecWithCase(b *testing.B, casTest *aggTestCase) { + cols := casTest.columns() + dataSource := buildMockDataSource(mockDataSourceParameters{ + schema: expression.NewSchema(cols...), + ndvs: []int{0, casTest.groupByNDV}, + orders: []bool{false}, + rows: casTest.rows, + ctx: casTest.ctx, + }) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() // prepare a new agg-executor + aggExec := buildAggExecutor(b, casTest, dataSource) + tmpCtx := context.Background() + chk := newFirstChunk(aggExec) + dataSource.prepareChunks() + + b.StartTimer() + if err := aggExec.Open(tmpCtx); err != nil { + b.Fatal(err) + } + for { + if err := aggExec.Next(tmpCtx, chk); err != nil { + b.Fatal(b) + } + if chk.NumRows() == 0 { + break + } + } + + if err := aggExec.Close(); err != nil { + b.Fatal(err) + } + b.StopTimer() + } +} + +func BenchmarkAggRows(b *testing.B) { + rows := []int{100000, 1000000, 10000000} + concurrencies := []int{1, 4, 8, 15, 20, 30, 40} + for _, row := range rows { + for _, con := range concurrencies { + cas := defaultAggTestCase() + cas.rows = row + cas.concurrency = con + b.Run(fmt.Sprintf("%v", cas), func(b *testing.B) { + benchmarkAggExecWithCase(b, cas) + }) + } + } +} + +func BenchmarkAggGroupByNDV(b *testing.B) { + NDVs := []int{10, 100, 1000, 10000, 100000, 1000000, 10000000} + for _, NDV := range NDVs { + cas := defaultAggTestCase() + cas.groupByNDV = NDV + b.Run(fmt.Sprintf("%v", cas), func(b *testing.B) { + benchmarkAggExecWithCase(b, cas) + }) + } +} + +func BenchmarkAggConcurrency(b *testing.B) { + concs := []int{1, 4, 8, 15, 20, 30, 40} + for _, con := range concs { + cas := defaultAggTestCase() + cas.concurrency = con + b.Run(fmt.Sprintf("%v", cas), func(b *testing.B) { + benchmarkAggExecWithCase(b, cas) + }) + } +} + +func BenchmarkAggDistinct(b *testing.B) { + rows := []int{100000, 1000000, 10000000} + for _, row := range rows { + cas := defaultAggTestCase() + cas.rows = row + b.Run(fmt.Sprintf("%v", cas), func(b *testing.B) { + benchmarkAggExecWithCase(b, cas) + }) + } +} + +var rawData = strings.Repeat("x", 5*1024) + +type hashJoinTestCase struct { + rows int + cols []*types.FieldType + concurrency int + ctx sessionctx.Context + keyIdx []int +} + +func (tc hashJoinTestCase) columns() []*expression.Column { + ret := make([]*expression.Column, 0) + for i, t := range tc.cols { + column := &expression.Column{Index: i, RetType: t} + ret = append(ret, column) + } + return ret +} + +func (tc hashJoinTestCase) String() string { + return fmt.Sprintf("(rows:%v, concurency:%v, joinKeyIdx: %v)", + tc.rows, tc.concurrency, tc.keyIdx) +} + +func defaultHashJoinTestCase(cols []*types.FieldType) *hashJoinTestCase { + ctx := mock.NewContext() + ctx.GetSessionVars().InitChunkSize = variable.DefInitChunkSize + ctx.GetSessionVars().MaxChunkSize = variable.DefMaxChunkSize + ctx.GetSessionVars().IndexLookupJoinConcurrency = 4 + tc := &hashJoinTestCase{rows: 100000, concurrency: 4, ctx: ctx, keyIdx: []int{0, 1}} + tc.cols = cols + return tc +} + +func prepare4HashJoin(testCase *hashJoinTestCase, innerExec, outerExec Executor) *HashJoinExec { + cols0 := testCase.columns() + cols1 := testCase.columns() + joinSchema := expression.NewSchema(cols0...) + joinSchema.Append(cols1...) + joinKeys := make([]*expression.Column, 0, len(testCase.keyIdx)) + for _, keyIdx := range testCase.keyIdx { + joinKeys = append(joinKeys, cols0[keyIdx]) + } + e := &HashJoinExec{ + baseExecutor: newBaseExecutor(testCase.ctx, joinSchema, stringutil.StringerStr("HashJoin"), innerExec, outerExec), + concurrency: uint(testCase.concurrency), + joinType: 0, // InnerJoin + innerKeys: joinKeys, + outerKeys: joinKeys, + innerSideExec: innerExec, + outerSideExec: outerExec, + innerSideEstCount: float64(testCase.rows), + } + defaultValues := make([]types.Datum, e.innerSideExec.Schema().Len()) + lhsTypes, rhsTypes := retTypes(innerExec), retTypes(outerExec) + e.joiners = make([]joiner, e.concurrency) + for i := uint(0); i < e.concurrency; i++ { + e.joiners[i] = newJoiner(testCase.ctx, e.joinType, true, defaultValues, + nil, lhsTypes, rhsTypes) + } + return e +} + +func benchmarkHashJoinExecWithCase(b *testing.B, casTest *hashJoinTestCase) { + opt := mockDataSourceParameters{ + schema: expression.NewSchema(casTest.columns()...), + rows: casTest.rows, + ctx: casTest.ctx, + genDataFunc: func(row int, typ *types.FieldType) interface{} { + switch typ.Tp { + case mysql.TypeLong, mysql.TypeLonglong: + return int64(row) + case mysql.TypeVarString: + return rawData + case mysql.TypeDouble: + return float64(row) + default: + panic("not implement") + } + }, + } + dataSource1 := buildMockDataSource(opt) + dataSource2 := buildMockDataSource(opt) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + exec := prepare4HashJoin(casTest, dataSource1, dataSource2) + tmpCtx := context.Background() + chk := newFirstChunk(exec) + dataSource1.prepareChunks() + dataSource2.prepareChunks() + + b.StartTimer() + if err := exec.Open(tmpCtx); err != nil { + b.Fatal(err) + } + for { + if err := exec.Next(tmpCtx, chk); err != nil { + b.Fatal(err) + } + if chk.NumRows() == 0 { + break + } + } + + if err := exec.Close(); err != nil { + b.Fatal(err) + } + b.StopTimer() + } +} + +func BenchmarkHashJoinExec(b *testing.B) { + cols := []*types.FieldType{ + types.NewFieldType(mysql.TypeLonglong), + types.NewFieldType(mysql.TypeVarString), + } + + b.ReportAllocs() + cas := defaultHashJoinTestCase(cols) + b.Run(fmt.Sprintf("%v", cas), func(b *testing.B) { + benchmarkHashJoinExecWithCase(b, cas) + }) + + cas.keyIdx = []int{0} + b.Run(fmt.Sprintf("%v", cas), func(b *testing.B) { + benchmarkHashJoinExecWithCase(b, cas) + }) +} diff --git a/executor/builder.go b/executor/builder.go new file mode 100644 index 0000000..ca17146 --- /dev/null +++ b/executor/builder.go @@ -0,0 +1,967 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "context" + "math" + "sort" + "sync" + "time" + + "github.com/cznic/mathutil" + "github.com/cznic/sortutil" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/distsql" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/executor/aggfuncs" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/expression/aggregation" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/model" + plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/admin" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tipb/go-tipb" +) + +// executorBuilder builds an Executor from a Plan. +// The InfoSchema must not change during execution. +type executorBuilder struct { + ctx sessionctx.Context + is infoschema.InfoSchema + startTS uint64 // cached when the first time getStartTS() is called + // err is set when there is error happened during Executor building process. + err error +} + +func newExecutorBuilder(ctx sessionctx.Context, is infoschema.InfoSchema) *executorBuilder { + return &executorBuilder{ + ctx: ctx, + is: is, + } +} + +// MockPhysicalPlan is used to return a specified executor in when build. +// It is mainly used for testing. +type MockPhysicalPlan interface { + plannercore.PhysicalPlan + GetExecutor() Executor +} + +func (b *executorBuilder) build(p plannercore.Plan) Executor { + switch v := p.(type) { + case nil: + return nil + case *plannercore.DDL: + return b.buildDDL(v) + case *plannercore.Delete: + return b.buildDelete(v) + case *plannercore.Explain: + return b.buildExplain(v) + case *plannercore.Insert: + return b.buildInsert(v) + case *plannercore.PhysicalLimit: + return b.buildLimit(v) + case *plannercore.ShowDDL: + return b.buildShowDDL(v) + case *plannercore.PhysicalShowDDLJobs: + return b.buildShowDDLJobs(v) + case *plannercore.PhysicalShow: + return b.buildShow(v) + case *plannercore.Simple: + return b.buildSimple(v) + case *plannercore.Set: + return b.buildSet(v) + case *plannercore.PhysicalSort: + return b.buildSort(v) + case *plannercore.PhysicalTopN: + return b.buildTopN(v) + case *plannercore.PhysicalUnionScan: + return b.buildUnionScanExec(v) + case *plannercore.PhysicalHashJoin: + return b.buildHashJoin(v) + case *plannercore.PhysicalMergeJoin: + return b.buildMergeJoin(v) + case *plannercore.PhysicalSelection: + return b.buildSelection(v) + case *plannercore.PhysicalHashAgg: + return b.buildHashAgg(v) + case *plannercore.PhysicalProjection: + return b.buildProjection(v) + case *plannercore.PhysicalMemTable: + return b.buildMemTable(v) + case *plannercore.PhysicalTableDual: + return b.buildTableDual(v) + case *plannercore.Analyze: + return b.buildAnalyze(v) + case *plannercore.PhysicalTableReader: + return b.buildTableReader(v) + case *plannercore.PhysicalIndexReader: + return b.buildIndexReader(v) + case *plannercore.PhysicalIndexLookUpReader: + return b.buildIndexLookUpReader(v) + default: + if mp, ok := p.(MockPhysicalPlan); ok { + return mp.GetExecutor() + } + + b.err = ErrUnknownPlan.GenWithStack("Unknown Plan %T", p) + return nil + } +} + +func (b *executorBuilder) buildShowDDL(v *plannercore.ShowDDL) Executor { + // We get DDLInfo here because for Executors that returns result set, + // next will be called after transaction has been committed. + // We need the transaction to get DDLInfo. + e := &ShowDDLExec{ + baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ExplainID()), + } + + var err error + ownerManager := domain.GetDomain(e.ctx).DDL().OwnerManager() + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + e.ddlOwnerID, err = ownerManager.GetOwnerID(ctx) + cancel() + if err != nil { + b.err = err + return nil + } + txn, err := e.ctx.Txn(true) + if err != nil { + b.err = err + return nil + } + + ddlInfo, err := admin.GetDDLInfo(txn) + if err != nil { + b.err = err + return nil + } + e.ddlInfo = ddlInfo + e.selfID = ownerManager.ID() + return e +} + +func (b *executorBuilder) buildShowDDLJobs(v *plannercore.PhysicalShowDDLJobs) Executor { + e := &ShowDDLJobsExec{ + jobNumber: v.JobNumber, + is: b.is, + baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ExplainID()), + } + return e +} + +func (b *executorBuilder) buildLimit(v *plannercore.PhysicalLimit) Executor { + childExec := b.build(v.Children()[0]) + if b.err != nil { + return nil + } + n := int(mathutil.MinUint64(v.Count, uint64(b.ctx.GetSessionVars().MaxChunkSize))) + base := newBaseExecutor(b.ctx, v.Schema(), v.ExplainID(), childExec) + base.initCap = n + e := &LimitExec{ + baseExecutor: base, + begin: v.Offset, + end: v.Offset + v.Count, + } + return e +} + +func (b *executorBuilder) buildShow(v *plannercore.PhysicalShow) Executor { + e := &ShowExec{ + baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ExplainID()), + Tp: v.Tp, + DBName: model.NewCIStr(v.DBName), + Table: v.Table, + Column: v.Column, + IndexName: v.IndexName, + IfNotExists: v.IfNotExists, + Flag: v.Flag, + Full: v.Full, + GlobalScope: v.GlobalScope, + is: b.is, + } + return e +} + +func (b *executorBuilder) buildSimple(v *plannercore.Simple) Executor { + base := newBaseExecutor(b.ctx, v.Schema(), v.ExplainID()) + base.initCap = chunk.ZeroCapacity + e := &SimpleExec{ + baseExecutor: base, + Statement: v.Statement, + is: b.is, + } + return e +} + +func (b *executorBuilder) buildSet(v *plannercore.Set) Executor { + base := newBaseExecutor(b.ctx, v.Schema(), v.ExplainID()) + base.initCap = chunk.ZeroCapacity + e := &SetExecutor{ + baseExecutor: base, + vars: v.VarAssigns, + } + return e +} + +func (b *executorBuilder) buildInsert(v *plannercore.Insert) Executor { + b.startTS = b.ctx.GetSessionVars().TxnCtx.GetForUpdateTS() + selectExec := b.build(v.SelectPlan) + if b.err != nil { + return nil + } + var baseExec baseExecutor + if selectExec != nil { + baseExec = newBaseExecutor(b.ctx, nil, v.ExplainID(), selectExec) + } else { + baseExec = newBaseExecutor(b.ctx, nil, v.ExplainID()) + } + baseExec.initCap = chunk.ZeroCapacity + + ivs := &InsertValues{ + baseExecutor: baseExec, + Table: v.Table, + Columns: v.Columns, + Lists: v.Lists, + SetList: v.SetList, + allAssignmentsAreConstant: v.AllAssignmentsAreConstant, + hasRefCols: v.NeedFillDefaultValue, + SelectExec: selectExec, + } + err := ivs.initInsertColumns() + if err != nil { + b.err = err + return nil + } + + if v.IsReplace { + return b.buildReplace(ivs) + } + insert := &InsertExec{ + InsertValues: ivs, + } + return insert +} + +func (b *executorBuilder) buildReplace(vals *InsertValues) Executor { + replaceExec := &ReplaceExec{ + InsertValues: vals, + } + return replaceExec +} + +func (b *executorBuilder) buildDDL(v *plannercore.DDL) Executor { + e := &DDLExec{ + baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ExplainID()), + stmt: v.Statement, + is: b.is, + } + return e +} + +// buildExplain builds a explain executor. `e.rows` collects final result to `ExplainExec`. +func (b *executorBuilder) buildExplain(v *plannercore.Explain) Executor { + explainExec := &ExplainExec{ + baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ExplainID()), + explain: v, + } + return explainExec +} + +func (b *executorBuilder) buildUnionScanExec(v *plannercore.PhysicalUnionScan) Executor { + reader := b.build(v.Children()[0]) + if b.err != nil { + return nil + } + return b.buildUnionScanFromReader(reader, v) +} + +// buildUnionScanFromReader builds union scan executor from child executor. +// Note that this function may be called by inner workers of index lookup join concurrently. +// Be careful to avoid data race. +func (b *executorBuilder) buildUnionScanFromReader(reader Executor, v *plannercore.PhysicalUnionScan) Executor { + us := &UnionScanExec{baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ExplainID(), reader)} + // Get the handle column index of the below Plan. + us.belowHandleIndex = v.HandleCol.Index + us.mutableRow = chunk.MutRowFromTypes(retTypes(us)) + switch x := reader.(type) { + case *TableReaderExecutor: + us.desc = x.desc + // Union scan can only be in a write transaction, so DirtyDB should has non-nil value now, thus + // GetDirtyDB() is safe here. If this table has been modified in the transaction, non-nil DirtyTable + // can be found in DirtyDB now, so GetDirtyTable is safe; if this table has not been modified in the + // transaction, empty DirtyTable would be inserted into DirtyDB, it does not matter when multiple + // goroutines write empty DirtyTable to DirtyDB for this table concurrently. Although the DirtyDB looks + // safe for data race in all the cases, the map of golang will throw panic when it's accessed in parallel. + // So we lock it when getting dirty table. + physicalTableID := getPhysicalTableID(x.table) + us.dirty = GetDirtyDB(b.ctx).GetDirtyTable(physicalTableID) + us.conditions = v.Conditions + us.columns = x.columns + us.table = x.table + case *IndexReaderExecutor: + us.desc = x.desc + for _, ic := range x.index.Columns { + for i, col := range x.columns { + if col.Name.L == ic.Name.L { + us.usedIndex = append(us.usedIndex, i) + break + } + } + } + physicalTableID := getPhysicalTableID(x.table) + us.dirty = GetDirtyDB(b.ctx).GetDirtyTable(physicalTableID) + us.conditions = v.Conditions + us.columns = x.columns + us.table = x.table + case *IndexLookUpExecutor: + us.desc = x.desc + for _, ic := range x.index.Columns { + for i, col := range x.columns { + if col.Name.L == ic.Name.L { + us.usedIndex = append(us.usedIndex, i) + break + } + } + } + physicalTableID := getPhysicalTableID(x.table) + us.dirty = GetDirtyDB(b.ctx).GetDirtyTable(physicalTableID) + us.conditions = v.Conditions + us.columns = x.columns + us.table = x.table + default: + // The mem table will not be written by sql directly, so we can omit the union scan to avoid err reporting. + return reader + } + return us +} + +// buildMergeJoin builds MergeJoinExec executor. +func (b *executorBuilder) buildMergeJoin(v *plannercore.PhysicalMergeJoin) Executor { + leftExec := b.build(v.Children()[0]) + if b.err != nil { + return nil + } + + rightExec := b.build(v.Children()[1]) + if b.err != nil { + return nil + } + + defaultValues := v.DefaultValues + if defaultValues == nil { + if v.JoinType == plannercore.RightOuterJoin { + defaultValues = make([]types.Datum, leftExec.Schema().Len()) + } else { + defaultValues = make([]types.Datum, rightExec.Schema().Len()) + } + } + + e := &MergeJoinExec{ + stmtCtx: b.ctx.GetSessionVars().StmtCtx, + baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ExplainID(), leftExec, rightExec), + compareFuncs: v.CompareFuncs, + joiner: newJoiner( + b.ctx, + v.JoinType, + v.JoinType == plannercore.RightOuterJoin, + defaultValues, + v.OtherConditions, + retTypes(leftExec), + retTypes(rightExec), + ), + isOuterJoin: v.JoinType.IsOuterJoin(), + } + + leftKeys := v.LeftJoinKeys + rightKeys := v.RightJoinKeys + + e.outerIdx = 0 + innerFilter := v.RightConditions + + e.innerTable = &mergeJoinInnerTable{ + reader: rightExec, + joinKeys: rightKeys, + } + + e.outerTable = &mergeJoinOuterTable{ + reader: leftExec, + filter: v.LeftConditions, + keys: leftKeys, + } + + if v.JoinType == plannercore.RightOuterJoin { + e.outerIdx = 1 + e.outerTable.reader = rightExec + e.outerTable.filter = v.RightConditions + e.outerTable.keys = rightKeys + + innerFilter = v.LeftConditions + e.innerTable.reader = leftExec + e.innerTable.joinKeys = leftKeys + } + + // optimizer should guarantee that filters on inner table are pushed down + // to tikv or extracted to a Selection. + if len(innerFilter) != 0 { + b.err = errors.Annotate(ErrBuildExecutor, "merge join's inner filter should be empty.") + return nil + } + return e +} + +func (b *executorBuilder) buildHashJoin(v *plannercore.PhysicalHashJoin) Executor { + leftExec := b.build(v.Children()[0]) + if b.err != nil { + return nil + } + + rightExec := b.build(v.Children()[1]) + if b.err != nil { + return nil + } + + e := &HashJoinExec{ + baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ExplainID(), leftExec, rightExec), + concurrency: v.Concurrency, + joinType: v.JoinType, + innerSideEstCount: v.Children()[v.InnerChildIdx].StatsCount(), + } + + defaultValues := v.DefaultValues + lhsTypes, rhsTypes := retTypes(leftExec), retTypes(rightExec) + if v.InnerChildIdx == 0 { + if len(v.LeftConditions) > 0 { + b.err = errors.Annotate(ErrBuildExecutor, "join's inner condition should be empty") + return nil + } + e.innerSideExec = leftExec + e.outerSideExec = rightExec + e.outerSideFilter = v.RightConditions + e.innerKeys = v.LeftJoinKeys + e.outerKeys = v.RightJoinKeys + if defaultValues == nil { + defaultValues = make([]types.Datum, e.innerSideExec.Schema().Len()) + } + } else { + if len(v.RightConditions) > 0 { + b.err = errors.Annotate(ErrBuildExecutor, "join's inner condition should be empty") + return nil + } + e.innerSideExec = rightExec + e.outerSideExec = leftExec + e.outerSideFilter = v.LeftConditions + e.innerKeys = v.RightJoinKeys + e.outerKeys = v.LeftJoinKeys + if defaultValues == nil { + defaultValues = make([]types.Datum, e.innerSideExec.Schema().Len()) + } + } + e.joiners = make([]joiner, e.concurrency) + for i := uint(0); i < e.concurrency; i++ { + e.joiners[i] = newJoiner(b.ctx, v.JoinType, v.InnerChildIdx == 0, defaultValues, + v.OtherConditions, lhsTypes, rhsTypes) + } + return e +} + +func (b *executorBuilder) buildHashAgg(v *plannercore.PhysicalHashAgg) Executor { + src := b.build(v.Children()[0]) + if b.err != nil { + return nil + } + sessionVars := b.ctx.GetSessionVars() + e := &HashAggExec{ + baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ExplainID(), src), + sc: sessionVars.StmtCtx, + PartialAggFuncs: make([]aggfuncs.AggFunc, 0, len(v.AggFuncs)), + GroupByItems: v.GroupByItems, + } + // We take `create table t(a int, b int);` as example. + // + // 1. If all the aggregation functions are FIRST_ROW, we do not need to set the defaultVal for them: + // e.g. + // mysql> select distinct a, b from t; + // 0 rows in set (0.00 sec) + // + // 2. If there exists group by items, we do not need to set the defaultVal for them either: + // e.g. + // mysql> select avg(a) from t group by b; + // Empty set (0.00 sec) + // + // mysql> select avg(a) from t group by a; + // +--------+ + // | avg(a) | + // +--------+ + // | NULL | + // +--------+ + // 1 row in set (0.00 sec) + if len(v.GroupByItems) != 0 || aggregation.IsAllFirstRow(v.AggFuncs) { + e.defaultVal = nil + } else { + e.defaultVal = chunk.NewChunkWithCapacity(retTypes(e), 1) + } + partialOrdinal := 0 + for i, aggDesc := range v.AggFuncs { + + ordinal := []int{partialOrdinal} + partialOrdinal++ + if aggDesc.Name == ast.AggFuncAvg { + ordinal = append(ordinal, partialOrdinal+1) + partialOrdinal++ + } + partialAggDesc, finalDesc := aggDesc.Split(ordinal) + partialAggFunc := aggfuncs.Build(b.ctx, partialAggDesc, i) + finalAggFunc := aggfuncs.Build(b.ctx, finalDesc, i) + e.PartialAggFuncs = append(e.PartialAggFuncs, partialAggFunc) + e.FinalAggFuncs = append(e.FinalAggFuncs, finalAggFunc) + if e.defaultVal != nil { + value := aggDesc.GetDefaultValue() + e.defaultVal.AppendDatum(i, &value) + } + } + return e +} + +func (b *executorBuilder) buildSelection(v *plannercore.PhysicalSelection) Executor { + childExec := b.build(v.Children()[0]) + if b.err != nil { + return nil + } + e := &SelectionExec{ + baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ExplainID(), childExec), + filters: v.Conditions, + } + return e +} + +func (b *executorBuilder) buildProjection(v *plannercore.PhysicalProjection) Executor { + childExec := b.build(v.Children()[0]) + if b.err != nil { + return nil + } + e := &ProjectionExec{ + baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ExplainID(), childExec), + numWorkers: b.ctx.GetSessionVars().ProjectionConcurrency, + evaluatorSuit: expression.NewEvaluatorSuite(v.Exprs), + } + + // If the calculation row count for this Projection operator is smaller + // than a Chunk size, we turn back to the un-parallel Projection + // implementation to reduce the goroutine overhead. + if int64(v.StatsCount()) < int64(b.ctx.GetSessionVars().MaxChunkSize) { + e.numWorkers = 0 + } + return e +} + +func (b *executorBuilder) buildTableDual(v *plannercore.PhysicalTableDual) Executor { + if v.RowCount != 0 && v.RowCount != 1 { + b.err = errors.Errorf("buildTableDual failed, invalid row count for dual table: %v", v.RowCount) + return nil + } + base := newBaseExecutor(b.ctx, v.Schema(), v.ExplainID()) + base.initCap = v.RowCount + e := &TableDualExec{ + baseExecutor: base, + numDualRows: v.RowCount, + } + return e +} + +func (b *executorBuilder) getStartTS() (uint64, error) { + if b.startTS != 0 { + // Return the cached value. + return b.startTS, nil + } + + txn, err := b.ctx.Txn(true) + if err != nil { + return 0, err + } + b.startTS = txn.StartTS() + if b.startTS == 0 { + return 0, errors.Trace(ErrGetStartTS) + } + return b.startTS, nil +} + +func (b *executorBuilder) buildMemTable(v *plannercore.PhysicalMemTable) Executor { + var e Executor + tb, _ := b.is.TableByID(v.Table.ID) + e = &TableScanExec{ + baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ExplainID()), + t: tb, + columns: v.Columns, + seekHandle: math.MinInt64, + isVirtualTable: !tb.Type().IsNormalTable(), + } + return e +} + +func (b *executorBuilder) buildSort(v *plannercore.PhysicalSort) Executor { + childExec := b.build(v.Children()[0]) + if b.err != nil { + return nil + } + sortExec := SortExec{ + baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ExplainID(), childExec), + ByItems: v.ByItems, + schema: v.Schema(), + } + return &sortExec +} + +func (b *executorBuilder) buildTopN(v *plannercore.PhysicalTopN) Executor { + childExec := b.build(v.Children()[0]) + if b.err != nil { + return nil + } + sortExec := SortExec{ + baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ExplainID(), childExec), + ByItems: v.ByItems, + schema: v.Schema(), + } + return &TopNExec{ + SortExec: sortExec, + limit: &plannercore.PhysicalLimit{Count: v.Count, Offset: v.Offset}, + } +} + +func (b *executorBuilder) buildDelete(v *plannercore.Delete) Executor { + tblID2table := make(map[int64]table.Table) + for _, info := range v.TblColPosInfos { + tblID2table[info.TblID], _ = b.is.TableByID(info.TblID) + } + b.startTS = b.ctx.GetSessionVars().TxnCtx.GetForUpdateTS() + selExec := b.build(v.SelectPlan) + if b.err != nil { + return nil + } + base := newBaseExecutor(b.ctx, v.Schema(), v.ExplainID(), selExec) + base.initCap = chunk.ZeroCapacity + deleteExec := &DeleteExec{ + baseExecutor: base, + tblID2Table: tblID2table, + tblColPosInfos: v.TblColPosInfos, + } + return deleteExec +} + +func (b *executorBuilder) buildAnalyzeIndexPushdown(task plannercore.AnalyzeIndexTask) *analyzeTask { + sc := b.ctx.GetSessionVars().StmtCtx + e := &AnalyzeIndexExec{ + ctx: b.ctx, + physicalTableID: task.PhysicalTableID, + idxInfo: task.IndexInfo, + concurrency: b.ctx.GetSessionVars().IndexSerialScanConcurrency, + analyzePB: &tipb.AnalyzeReq{ + Tp: tipb.AnalyzeType_TypeIndex, + Flags: sc.PushDownFlags(), + }, + } + e.analyzePB.IdxReq = &tipb.AnalyzeIndexReq{ + BucketSize: int64(defaultNumBuckets), + NumColumns: int32(len(task.IndexInfo.Columns)), + } + depth := int32(defaultCMSketchDepth) + width := int32(defaultCMSketchWidth) + e.analyzePB.IdxReq.CmsketchDepth = &depth + e.analyzePB.IdxReq.CmsketchWidth = &width + return &analyzeTask{taskType: idxTask, idxExec: e} +} + +func (b *executorBuilder) buildAnalyzeColumnsPushdown(task plannercore.AnalyzeColumnsTask) *analyzeTask { + cols := task.ColsInfo + if task.PKInfo != nil { + cols = append([]*model.ColumnInfo{task.PKInfo}, cols...) + } + + sc := b.ctx.GetSessionVars().StmtCtx + e := &AnalyzeColumnsExec{ + ctx: b.ctx, + physicalTableID: task.PhysicalTableID, + colsInfo: task.ColsInfo, + pkInfo: task.PKInfo, + concurrency: b.ctx.GetSessionVars().DistSQLScanConcurrency, + analyzePB: &tipb.AnalyzeReq{ + Tp: tipb.AnalyzeType_TypeColumn, + Flags: sc.PushDownFlags(), + }, + } + depth := int32(defaultCMSketchDepth) + width := int32(defaultCMSketchWidth) + e.analyzePB.ColReq = &tipb.AnalyzeColumnsReq{ + BucketSize: int64(defaultNumBuckets), + SampleSize: maxRegionSampleSize, + SketchSize: maxSketchSize, + ColumnsInfo: model.ColumnsToProto(cols, task.PKInfo != nil), + CmsketchDepth: &depth, + CmsketchWidth: &width, + } + b.err = plannercore.SetPBColumnsDefaultValue(b.ctx, e.analyzePB.ColReq.ColumnsInfo, cols) + return &analyzeTask{taskType: colTask, colExec: e} +} + +func (b *executorBuilder) buildAnalyze(v *plannercore.Analyze) Executor { + e := &AnalyzeExec{ + baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ExplainID()), + tasks: make([]*analyzeTask, 0, len(v.ColTasks)+len(v.IdxTasks)), + wg: &sync.WaitGroup{}, + } + for _, task := range v.ColTasks { + e.tasks = append(e.tasks, b.buildAnalyzeColumnsPushdown(task)) + if b.err != nil { + return nil + } + } + for _, task := range v.IdxTasks { + e.tasks = append(e.tasks, b.buildAnalyzeIndexPushdown(task)) + if b.err != nil { + return nil + } + } + return e +} + +func constructDistExec(sctx sessionctx.Context, plans []plannercore.PhysicalPlan) ([]*tipb.Executor, error) { + executors := make([]*tipb.Executor, 0, len(plans)) + for _, p := range plans { + execPB, err := p.ToPB(sctx) + if err != nil { + return nil, err + } + executors = append(executors, execPB) + } + return executors, nil +} + +func (b *executorBuilder) constructDAGReq(plans []plannercore.PhysicalPlan) (dagReq *tipb.DAGRequest, err error) { + dagReq = &tipb.DAGRequest{} + sc := b.ctx.GetSessionVars().StmtCtx + dagReq.Flags = sc.PushDownFlags() + dagReq.Executors, err = constructDistExec(b.ctx, plans) + return dagReq, err +} + +func buildNoRangeTableReader(b *executorBuilder, v *plannercore.PhysicalTableReader) (*TableReaderExecutor, error) { + dagReq, err := b.constructDAGReq(v.TablePlans) + if err != nil { + return nil, err + } + ts := v.TablePlans[0].(*plannercore.PhysicalTableScan) + tbl, _ := b.is.TableByID(ts.Table.ID) + startTS, err := b.getStartTS() + if err != nil { + return nil, err + } + e := &TableReaderExecutor{ + baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ExplainID()), + dagPB: dagReq, + startTS: startTS, + table: tbl, + keepOrder: ts.KeepOrder, + desc: ts.Desc, + columns: ts.Columns, + plans: v.TablePlans, + } + + for i := range v.Schema().Columns { + dagReq.OutputOffsets = append(dagReq.OutputOffsets, uint32(i)) + } + + return e, nil +} + +// buildTableReader builds a table reader executor. It first build a no range table reader, +// and then update it ranges from table scan plan. +func (b *executorBuilder) buildTableReader(v *plannercore.PhysicalTableReader) *TableReaderExecutor { + ret, err := buildNoRangeTableReader(b, v) + if err != nil { + b.err = err + return nil + } + + ts := v.TablePlans[0].(*plannercore.PhysicalTableScan) + ret.ranges = ts.Ranges + sctx := b.ctx.GetSessionVars().StmtCtx + sctx.TableIDs = append(sctx.TableIDs, ts.Table.ID) + return ret +} + +func buildNoRangeIndexReader(b *executorBuilder, v *plannercore.PhysicalIndexReader) (*IndexReaderExecutor, error) { + dagReq, err := b.constructDAGReq(v.IndexPlans) + if err != nil { + return nil, err + } + is := v.IndexPlans[0].(*plannercore.PhysicalIndexScan) + tbl, _ := b.is.TableByID(is.Table.ID) + startTS, err := b.getStartTS() + if err != nil { + return nil, err + } + e := &IndexReaderExecutor{ + baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ExplainID()), + dagPB: dagReq, + startTS: startTS, + physicalTableID: is.Table.ID, + table: tbl, + index: is.Index, + keepOrder: is.KeepOrder, + desc: is.Desc, + columns: is.Columns, + idxCols: is.IdxCols, + colLens: is.IdxColLens, + plans: v.IndexPlans, + outputColumns: v.OutputColumns, + } + + for _, col := range v.OutputColumns { + dagReq.OutputOffsets = append(dagReq.OutputOffsets, uint32(col.Index)) + } + + return e, nil +} + +func (b *executorBuilder) buildIndexReader(v *plannercore.PhysicalIndexReader) *IndexReaderExecutor { + ret, err := buildNoRangeIndexReader(b, v) + if err != nil { + b.err = err + return nil + } + + is := v.IndexPlans[0].(*plannercore.PhysicalIndexScan) + ret.ranges = is.Ranges + sctx := b.ctx.GetSessionVars().StmtCtx + sctx.IndexNames = append(sctx.IndexNames, is.Table.Name.O+":"+is.Index.Name.O) + return ret +} + +func buildNoRangeIndexLookUpReader(b *executorBuilder, v *plannercore.PhysicalIndexLookUpReader) (*IndexLookUpExecutor, error) { + indexReq, err := b.constructDAGReq(v.IndexPlans) + if err != nil { + return nil, err + } + tableReq, err := b.constructDAGReq(v.TablePlans) + if err != nil { + return nil, err + } + is := v.IndexPlans[0].(*plannercore.PhysicalIndexScan) + indexReq.OutputOffsets = []uint32{uint32(len(is.Index.Columns))} + tbl, _ := b.is.TableByID(is.Table.ID) + + for i := 0; i < v.Schema().Len(); i++ { + tableReq.OutputOffsets = append(tableReq.OutputOffsets, uint32(i)) + } + + ts := v.TablePlans[0].(*plannercore.PhysicalTableScan) + startTS, err := b.getStartTS() + if err != nil { + return nil, err + } + e := &IndexLookUpExecutor{ + baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ExplainID()), + dagPB: indexReq, + startTS: startTS, + table: tbl, + index: is.Index, + keepOrder: is.KeepOrder, + desc: is.Desc, + tableRequest: tableReq, + columns: ts.Columns, + dataReaderBuilder: &dataReaderBuilder{executorBuilder: b}, + idxCols: is.IdxCols, + colLens: is.IdxColLens, + idxPlans: v.IndexPlans, + tblPlans: v.TablePlans, + } + + if v.ExtraHandleCol != nil { + e.handleIdx = v.ExtraHandleCol.Index + } + return e, nil +} + +func (b *executorBuilder) buildIndexLookUpReader(v *plannercore.PhysicalIndexLookUpReader) *IndexLookUpExecutor { + ret, err := buildNoRangeIndexLookUpReader(b, v) + if err != nil { + b.err = err + return nil + } + + is := v.IndexPlans[0].(*plannercore.PhysicalIndexScan) + ts := v.TablePlans[0].(*plannercore.PhysicalTableScan) + + ret.ranges = is.Ranges + sctx := b.ctx.GetSessionVars().StmtCtx + sctx.IndexNames = append(sctx.IndexNames, is.Table.Name.O+":"+is.Index.Name.O) + sctx.TableIDs = append(sctx.TableIDs, ts.Table.ID) + return ret +} + +// dataReaderBuilder build an executor. +// The executor can be used to read data in the ranges which are constructed by datums. +// Differences from executorBuilder: +// 1. dataReaderBuilder calculate data range from argument, rather than plan. +// 2. the result executor is already opened. +type dataReaderBuilder struct { + plannercore.Plan + *executorBuilder +} + +func (builder *dataReaderBuilder) buildTableReaderFromHandles(ctx context.Context, e *TableReaderExecutor, handles []int64) (Executor, error) { + if e.dagPB.CollectExecutionSummaries == nil { + colExec := true + e.dagPB.CollectExecutionSummaries = &colExec + } + startTS, err := builder.getStartTS() + if err != nil { + return nil, err + } + + sort.Sort(sortutil.Int64Slice(handles)) + var b distsql.RequestBuilder + kvReq, err := b.SetTableHandles(getPhysicalTableID(e.table), handles). + SetDAGRequest(e.dagPB). + SetStartTS(startTS). + SetDesc(e.desc). + SetKeepOrder(e.keepOrder). + SetFromSessionVars(e.ctx.GetSessionVars()). + Build() + if err != nil { + return nil, err + } + e.kvRanges = append(e.kvRanges, kvReq.KeyRanges...) + e.resultHandler = &tableResultHandler{} + result, err := distsql.Select(ctx, builder.ctx, kvReq, retTypes(e)) + if err != nil { + return nil, err + } + e.resultHandler.open(nil, result) + return e, nil +} + +func getPhysicalTableID(t table.Table) int64 { + if p, ok := t.(table.PhysicalTable); ok { + return p.GetPhysicalID() + } + return t.Meta().ID +} diff --git a/executor/compiler.go b/executor/compiler.go new file mode 100644 index 0000000..af91fcc --- /dev/null +++ b/executor/compiler.go @@ -0,0 +1,50 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "context" + + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/planner" + plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/sessionctx" +) + +// Compiler compiles an ast.StmtNode to a physical plan. +type Compiler struct { + Ctx sessionctx.Context +} + +// Compile compiles an ast.StmtNode to a physical plan. +func (c *Compiler) Compile(ctx context.Context, stmtNode ast.StmtNode) (*ExecStmt, error) { + infoSchema := infoschema.GetInfoSchema(c.Ctx) + if err := plannercore.Preprocess(c.Ctx, stmtNode, infoSchema); err != nil { + return nil, err + } + + finalPlan, names, err := planner.Optimize(ctx, c.Ctx, stmtNode, infoSchema) + if err != nil { + return nil, err + } + return &ExecStmt{ + InfoSchema: infoSchema, + Plan: finalPlan, + Text: stmtNode.Text(), + StmtNode: stmtNode, + Ctx: c.Ctx, + OutputNames: names, + }, nil +} diff --git a/executor/ddl.go b/executor/ddl.go new file mode 100644 index 0000000..aa29b5c --- /dev/null +++ b/executor/ddl.go @@ -0,0 +1,246 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "context" + "strings" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +// DDLExec represents a DDL executor. +// It grabs a DDL instance from Domain, calling the DDL methods to do the work. +type DDLExec struct { + baseExecutor + + stmt ast.StmtNode + is infoschema.InfoSchema + done bool +} + +// toErr converts the error to the ErrInfoSchemaChanged when the schema is outdated. +func (e *DDLExec) toErr(err error) error { + // The err may be cause by schema changed, here we distinguish the ErrInfoSchemaChanged error from other errors. + dom := domain.GetDomain(e.ctx) + checker := domain.NewSchemaChecker(dom, e.is.SchemaMetaVersion(), nil) + txn, err1 := e.ctx.Txn(true) + if err1 != nil { + logutil.BgLogger().Error("active txn failed", zap.Error(err)) + return err1 + } + schemaInfoErr := checker.Check(txn.StartTS()) + if schemaInfoErr != nil { + return errors.Trace(schemaInfoErr) + } + return err +} + +// Next implements the Executor Next interface. +func (e *DDLExec) Next(ctx context.Context, req *chunk.Chunk) (err error) { + if e.done { + return nil + } + e.done = true + + // For each DDL, we should commit the previous transaction and create a new transaction. + if err = e.ctx.NewTxn(ctx); err != nil { + return err + } + defer func() { e.ctx.GetSessionVars().StmtCtx.IsDDLJobInQueue = false }() + + switch x := e.stmt.(type) { + case *ast.AlterTableStmt: + err = e.executeAlterTable(x) + case *ast.CreateIndexStmt: + err = e.executeCreateIndex(x) + case *ast.CreateDatabaseStmt: + err = e.executeCreateDatabase(x) + case *ast.CreateTableStmt: + err = e.executeCreateTable(x) + case *ast.DropIndexStmt: + err = e.executeDropIndex(x) + case *ast.DropDatabaseStmt: + err = e.executeDropDatabase(x) + case *ast.DropTableStmt: + err = e.executeDropTableOrView(x) + } + if err != nil { + // If the owner return ErrTableNotExists error when running this DDL, it may be caused by schema changed, + // otherwise, ErrTableNotExists can be returned before putting this DDL job to the job queue. + if (e.ctx.GetSessionVars().StmtCtx.IsDDLJobInQueue && infoschema.ErrTableNotExists.Equal(err)) || + !e.ctx.GetSessionVars().StmtCtx.IsDDLJobInQueue { + return e.toErr(err) + } + return err + + } + + dom := domain.GetDomain(e.ctx) + // Update InfoSchema in TxnCtx, so it will pass schema check. + is := dom.InfoSchema() + txnCtx := e.ctx.GetSessionVars().TxnCtx + txnCtx.InfoSchema = is + txnCtx.SchemaVersion = is.SchemaMetaVersion() + // DDL will force commit old transaction, after DDL, in transaction status should be false. + e.ctx.GetSessionVars().SetStatusFlag(mysql.ServerStatusInTrans, false) + return nil +} + +func (e *DDLExec) executeCreateDatabase(s *ast.CreateDatabaseStmt) error { + var opt *ast.CharsetOpt + if len(s.Options) != 0 { + opt = &ast.CharsetOpt{} + for _, val := range s.Options { + switch val.Tp { + case ast.DatabaseOptionCharset: + opt.Chs = val.Value + case ast.DatabaseOptionCollate: + opt.Col = val.Value + } + } + } + err := domain.GetDomain(e.ctx).DDL().CreateSchema(e.ctx, model.NewCIStr(s.Name), opt) + if err != nil { + if infoschema.ErrDatabaseExists.Equal(err) && s.IfNotExists { + err = nil + } + } + return err +} + +func (e *DDLExec) executeCreateTable(s *ast.CreateTableStmt) error { + err := domain.GetDomain(e.ctx).DDL().CreateTable(e.ctx, s) + return err +} + +func (e *DDLExec) executeCreateIndex(s *ast.CreateIndexStmt) error { + ident := ast.Ident{Schema: s.Table.Schema, Name: s.Table.Name} + err := domain.GetDomain(e.ctx).DDL().CreateIndex(e.ctx, ident, s.KeyType, model.NewCIStr(s.IndexName), + s.IndexPartSpecifications, s.IndexOption, s.IfNotExists) + return err +} + +func (e *DDLExec) executeDropDatabase(s *ast.DropDatabaseStmt) error { + dbName := model.NewCIStr(s.Name) + + // Protect important system table from been dropped by a mistake. + // I can hardly find a case that a user really need to do this. + if dbName.L == "mysql" { + return errors.New("Drop 'mysql' database is forbidden") + } + + err := domain.GetDomain(e.ctx).DDL().DropSchema(e.ctx, dbName) + if infoschema.ErrDatabaseNotExists.Equal(err) { + if s.IfExists { + err = nil + } else { + err = infoschema.ErrDatabaseDropExists.GenWithStackByArgs(s.Name) + } + } + sessionVars := e.ctx.GetSessionVars() + if err == nil && strings.ToLower(sessionVars.CurrentDB) == dbName.L { + sessionVars.CurrentDB = "" + err = variable.SetSessionSystemVar(sessionVars, variable.CharsetDatabase, types.NewStringDatum("utf8")) + if err != nil { + return err + } + err = variable.SetSessionSystemVar(sessionVars, variable.CollationDatabase, types.NewStringDatum("utf8_unicode_ci")) + if err != nil { + return err + } + } + return err +} + +// If one drop those tables by mistake, it's difficult to recover. +// In the worst case, the whole TiDB cluster fails to bootstrap, so we prevent user from dropping them. +var systemTables = map[string]struct{}{ + "tidb": {}, + "gc_delete_range": {}, + "gc_delete_range_done": {}, +} + +func isSystemTable(schema, table string) bool { + if schema != "mysql" { + return false + } + if _, ok := systemTables[table]; ok { + return true + } + return false +} + +func (e *DDLExec) executeDropTableOrView(s *ast.DropTableStmt) error { + var notExistTables []string + for _, tn := range s.Tables { + fullti := ast.Ident{Schema: tn.Schema, Name: tn.Name} + _, ok := e.is.SchemaByName(tn.Schema) + if !ok { + // TODO: we should return special error for table not exist, checking "not exist" is not enough, + // because some other errors may contain this error string too. + notExistTables = append(notExistTables, fullti.String()) + continue + } + _, err := e.is.TableByName(tn.Schema, tn.Name) + if err != nil && infoschema.ErrTableNotExists.Equal(err) { + notExistTables = append(notExistTables, fullti.String()) + continue + } else if err != nil { + return err + } + + // Protect important system table from been dropped by a mistake. + // I can hardly find a case that a user really need to do this. + if isSystemTable(tn.Schema.L, tn.Name.L) { + return errors.Errorf("Drop tidb system table '%s.%s' is forbidden", tn.Schema.L, tn.Name.L) + } + + err = domain.GetDomain(e.ctx).DDL().DropTable(e.ctx, fullti) + if infoschema.ErrDatabaseNotExists.Equal(err) || infoschema.ErrTableNotExists.Equal(err) { + notExistTables = append(notExistTables, fullti.String()) + } else if err != nil { + return err + } + } + if len(notExistTables) > 0 && !s.IfExists { + return infoschema.ErrTableDropExists.GenWithStackByArgs(strings.Join(notExistTables, ",")) + } + return nil +} + +func (e *DDLExec) executeDropIndex(s *ast.DropIndexStmt) error { + ti := ast.Ident{Schema: s.Table.Schema, Name: s.Table.Name} + err := domain.GetDomain(e.ctx).DDL().DropIndex(e.ctx, ti, model.NewCIStr(s.IndexName), s.IfExists) + if (infoschema.ErrDatabaseNotExists.Equal(err) || infoschema.ErrTableNotExists.Equal(err)) && s.IfExists { + err = nil + } + return err +} + +func (e *DDLExec) executeAlterTable(s *ast.AlterTableStmt) error { + ti := ast.Ident{Schema: s.Table.Schema, Name: s.Table.Name} + err := domain.GetDomain(e.ctx).DDL().AlterTable(e.ctx, ti, s.Specs) + return err +} diff --git a/executor/ddl_test.go b/executor/ddl_test.go new file mode 100644 index 0000000..3218486 --- /dev/null +++ b/executor/ddl_test.go @@ -0,0 +1,265 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor_test + +import ( + "fmt" + . "github.com/pingcap/check" + ddlutil "github.com/pingcap/tidb/ddl/util" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/util/testkit" + "math" + "strings" +) + +func (s *testSuite6) TestCreateDropDatabase(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("create database if not exists drop_test;") + tk.MustExec("drop database if exists drop_test;") + tk.MustExec("create database drop_test;") + tk.MustExec("use drop_test;") + tk.MustExec("drop database drop_test;") + _, err := tk.Exec("drop table t;") + c.Assert(err.Error(), Equals, plannercore.ErrNoDB.Error()) + err = tk.ExecToErr("select * from t;") + c.Assert(err.Error(), Equals, plannercore.ErrNoDB.Error()) + + _, err = tk.Exec("drop database mysql") + c.Assert(err, NotNil) +} + +func (s *testSuite6) TestCreateDropTable(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("create table if not exists drop_test (a int)") + tk.MustExec("drop table if exists drop_test") + tk.MustExec("create table drop_test (a int)") + tk.MustExec("drop table drop_test") + + _, err := tk.Exec("drop table mysql.gc_delete_range") + c.Assert(err, NotNil) +} + +func (s *testSuite6) TestCreateDropIndex(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("create table if not exists drop_test (a int)") + tk.MustExec("create index idx_a on drop_test (a)") + tk.MustExec("drop index idx_a on drop_test") + tk.MustExec("drop table drop_test") +} + +func (s *testSuite6) TestAddNotNullColumnNoDefault(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("create table nn (c1 int)") + tk.MustExec("insert nn values (1), (2)") + tk.MustExec("alter table nn add column c2 int not null") + + tbl, err := domain.GetDomain(tk.Se).InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("nn")) + c.Assert(err, IsNil) + col2 := tbl.Meta().Columns[1] + c.Assert(col2.DefaultValue, IsNil) + c.Assert(col2.OriginDefaultValue, Equals, "0") + + tk.MustQuery("select * from nn").Check(testkit.Rows("1 0", "2 0")) + _, err = tk.Exec("insert nn (c1) values (3)") + c.Check(err, NotNil) + tk.MustExec("set sql_mode=''") + tk.MustExec("insert nn (c1) values (3)") + tk.MustQuery("select * from nn").Check(testkit.Rows("1 0", "2 0", "3 0")) +} + +func (s *testSuite6) TestAlterTableModifyColumn(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists mc") + tk.MustExec("create table mc(c1 int, c2 varchar(10), c3 bit)") + _, err := tk.Exec("alter table mc modify column c1 short") + c.Assert(err, NotNil) + tk.MustExec("alter table mc modify column c1 bigint") + + _, err = tk.Exec("alter table mc modify column c2 blob") + c.Assert(err, NotNil) + + _, err = tk.Exec("alter table mc modify column c2 varchar(8)") + c.Assert(err, NotNil) + tk.MustExec("alter table mc modify column c2 varchar(11)") + tk.MustExec("alter table mc modify column c2 text(13)") + tk.MustExec("alter table mc modify column c2 text") + tk.MustExec("alter table mc modify column c3 bit") + result := tk.MustQuery("show create table mc") + createSQL := result.Rows()[0][1] + expected := "CREATE TABLE `mc` (\n `c1` bigint(20) DEFAULT NULL,\n `c2` text DEFAULT NULL,\n `c3` bit(1) DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin" + c.Assert(createSQL, Equals, expected) +} + +func (s *testSuite6) TestTooLargeIdentifierLength(c *C) { + tk := testkit.NewTestKit(c, s.store) + + // for database. + dbName1, dbName2 := strings.Repeat("a", mysql.MaxDatabaseNameLength), strings.Repeat("a", mysql.MaxDatabaseNameLength+1) + tk.MustExec(fmt.Sprintf("create database %s", dbName1)) + tk.MustExec(fmt.Sprintf("drop database %s", dbName1)) + _, err := tk.Exec(fmt.Sprintf("create database %s", dbName2)) + c.Assert(err.Error(), Equals, fmt.Sprintf("[ddl:1059]Identifier name '%s' is too long", dbName2)) + + // for table. + tk.MustExec("use test") + tableName1, tableName2 := strings.Repeat("b", mysql.MaxTableNameLength), strings.Repeat("b", mysql.MaxTableNameLength+1) + tk.MustExec(fmt.Sprintf("create table %s(c int)", tableName1)) + tk.MustExec(fmt.Sprintf("drop table %s", tableName1)) + _, err = tk.Exec(fmt.Sprintf("create table %s(c int)", tableName2)) + c.Assert(err.Error(), Equals, fmt.Sprintf("[ddl:1059]Identifier name '%s' is too long", tableName2)) + + // for column. + tk.MustExec("drop table if exists t;") + columnName1, columnName2 := strings.Repeat("c", mysql.MaxColumnNameLength), strings.Repeat("c", mysql.MaxColumnNameLength+1) + tk.MustExec(fmt.Sprintf("create table t(%s int)", columnName1)) + tk.MustExec("drop table t") + _, err = tk.Exec(fmt.Sprintf("create table t(%s int)", columnName2)) + c.Assert(err.Error(), Equals, fmt.Sprintf("[ddl:1059]Identifier name '%s' is too long", columnName2)) + + // for index. + tk.MustExec("create table t(c int);") + indexName1, indexName2 := strings.Repeat("d", mysql.MaxIndexIdentifierLen), strings.Repeat("d", mysql.MaxIndexIdentifierLen+1) + tk.MustExec(fmt.Sprintf("create index %s on t(c)", indexName1)) + tk.MustExec(fmt.Sprintf("drop index %s on t", indexName1)) + _, err = tk.Exec(fmt.Sprintf("create index %s on t(c)", indexName2)) + c.Assert(err.Error(), Equals, fmt.Sprintf("[ddl:1059]Identifier name '%s' is too long", indexName2)) + + // for create table with index. + tk.MustExec("drop table t;") + _, err = tk.Exec(fmt.Sprintf("create table t(c int, index %s(c));", indexName2)) + c.Assert(err.Error(), Equals, fmt.Sprintf("[ddl:1059]Identifier name '%s' is too long", indexName2)) +} + +func (s *testSuite6) TestMaxHandleAddIndex(c *C) { + tk := testkit.NewTestKit(c, s.store) + + tk.MustExec("use test") + tk.MustExec("create table t(a bigint PRIMARY KEY, b int)") + tk.MustExec(fmt.Sprintf("insert into t values(%v, 1)", math.MaxInt64)) + tk.MustExec(fmt.Sprintf("insert into t values(%v, 1)", math.MinInt64)) + tk.MustExec("alter table t add index idx_b(b)") + + tk.MustExec("create table t1(a bigint UNSIGNED PRIMARY KEY, b int)") + tk.MustExec(fmt.Sprintf("insert into t1 values(%v, 1)", uint64(math.MaxUint64))) + tk.MustExec(fmt.Sprintf("insert into t1 values(%v, 1)", 0)) + tk.MustExec("alter table t1 add index idx_b(b)") + +} + +func (s *testSuite6) TestSetDDLReorgWorkerCnt(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + err := ddlutil.LoadDDLReorgVars(tk.Se) + c.Assert(err, IsNil) + c.Assert(variable.GetDDLReorgWorkerCounter(), Equals, int32(variable.DefTiDBDDLReorgWorkerCount)) + tk.MustExec("set @@global.tidb_ddl_reorg_worker_cnt = 1") + err = ddlutil.LoadDDLReorgVars(tk.Se) + c.Assert(err, IsNil) + c.Assert(variable.GetDDLReorgWorkerCounter(), Equals, int32(1)) + tk.MustExec("set @@global.tidb_ddl_reorg_worker_cnt = 100") + err = ddlutil.LoadDDLReorgVars(tk.Se) + c.Assert(err, IsNil) + c.Assert(variable.GetDDLReorgWorkerCounter(), Equals, int32(100)) + _, err = tk.Exec("set @@global.tidb_ddl_reorg_worker_cnt = invalid_val") + c.Assert(terror.ErrorEqual(err, variable.ErrWrongTypeForVar), IsTrue, Commentf("err %v", err)) + tk.MustExec("set @@global.tidb_ddl_reorg_worker_cnt = 100") + err = ddlutil.LoadDDLReorgVars(tk.Se) + c.Assert(err, IsNil) + c.Assert(variable.GetDDLReorgWorkerCounter(), Equals, int32(100)) + _, err = tk.Exec("set @@global.tidb_ddl_reorg_worker_cnt = -1") + c.Assert(terror.ErrorEqual(err, variable.ErrWrongValueForVar), IsTrue, Commentf("err %v", err)) + + tk.MustExec("set @@global.tidb_ddl_reorg_worker_cnt = 100") + res := tk.MustQuery("select @@global.tidb_ddl_reorg_worker_cnt") + res.Check(testkit.Rows("100")) + + res = tk.MustQuery("select @@global.tidb_ddl_reorg_worker_cnt") + res.Check(testkit.Rows("100")) + tk.MustExec("set @@global.tidb_ddl_reorg_worker_cnt = 100") + res = tk.MustQuery("select @@global.tidb_ddl_reorg_worker_cnt") + res.Check(testkit.Rows("100")) +} + +func (s *testSuite6) TestSetDDLReorgBatchSize(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + err := ddlutil.LoadDDLReorgVars(tk.Se) + c.Assert(err, IsNil) + c.Assert(variable.GetDDLReorgBatchSize(), Equals, int32(variable.DefTiDBDDLReorgBatchSize)) + + tk.MustExec("set @@global.tidb_ddl_reorg_batch_size = 1") + tk.MustQuery("show warnings;").Check(testkit.Rows("Warning 1292 Truncated incorrect tidb_ddl_reorg_batch_size value: '1'")) + err = ddlutil.LoadDDLReorgVars(tk.Se) + c.Assert(err, IsNil) + c.Assert(variable.GetDDLReorgBatchSize(), Equals, int32(variable.MinDDLReorgBatchSize)) + tk.MustExec(fmt.Sprintf("set @@global.tidb_ddl_reorg_batch_size = %v", variable.MaxDDLReorgBatchSize+1)) + tk.MustQuery("show warnings;").Check(testkit.Rows(fmt.Sprintf("Warning 1292 Truncated incorrect tidb_ddl_reorg_batch_size value: '%d'", variable.MaxDDLReorgBatchSize+1))) + err = ddlutil.LoadDDLReorgVars(tk.Se) + c.Assert(err, IsNil) + c.Assert(variable.GetDDLReorgBatchSize(), Equals, int32(variable.MaxDDLReorgBatchSize)) + _, err = tk.Exec("set @@global.tidb_ddl_reorg_batch_size = invalid_val") + c.Assert(terror.ErrorEqual(err, variable.ErrWrongTypeForVar), IsTrue, Commentf("err %v", err)) + tk.MustExec("set @@global.tidb_ddl_reorg_batch_size = 100") + err = ddlutil.LoadDDLReorgVars(tk.Se) + c.Assert(err, IsNil) + c.Assert(variable.GetDDLReorgBatchSize(), Equals, int32(100)) + tk.MustExec("set @@global.tidb_ddl_reorg_batch_size = -1") + tk.MustQuery("show warnings;").Check(testkit.Rows("Warning 1292 Truncated incorrect tidb_ddl_reorg_batch_size value: '-1'")) + + tk.MustExec("set @@global.tidb_ddl_reorg_batch_size = 100") + res := tk.MustQuery("select @@global.tidb_ddl_reorg_batch_size") + res.Check(testkit.Rows("100")) + + res = tk.MustQuery("select @@global.tidb_ddl_reorg_batch_size") + res.Check(testkit.Rows(fmt.Sprintf("%v", 100))) + tk.MustExec("set @@global.tidb_ddl_reorg_batch_size = 1000") + res = tk.MustQuery("select @@global.tidb_ddl_reorg_batch_size") + res.Check(testkit.Rows("1000")) +} + +func (s *testSuite6) TestSetDDLErrorCountLimit(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + err := ddlutil.LoadDDLVars(tk.Se) + c.Assert(err, IsNil) + c.Assert(variable.GetDDLErrorCountLimit(), Equals, int64(variable.DefTiDBDDLErrorCountLimit)) + + tk.MustExec("set @@global.tidb_ddl_error_count_limit = -1") + tk.MustQuery("show warnings;").Check(testkit.Rows("Warning 1292 Truncated incorrect tidb_ddl_error_count_limit value: '-1'")) + err = ddlutil.LoadDDLVars(tk.Se) + c.Assert(err, IsNil) + c.Assert(variable.GetDDLErrorCountLimit(), Equals, int64(0)) + tk.MustExec(fmt.Sprintf("set @@global.tidb_ddl_error_count_limit = %v", uint64(math.MaxInt64)+1)) + tk.MustQuery("show warnings;").Check(testkit.Rows(fmt.Sprintf("Warning 1292 Truncated incorrect tidb_ddl_error_count_limit value: '%d'", uint64(math.MaxInt64)+1))) + err = ddlutil.LoadDDLVars(tk.Se) + c.Assert(err, IsNil) + c.Assert(variable.GetDDLErrorCountLimit(), Equals, int64(math.MaxInt64)) + _, err = tk.Exec("set @@global.tidb_ddl_error_count_limit = invalid_val") + c.Assert(terror.ErrorEqual(err, variable.ErrWrongTypeForVar), IsTrue, Commentf("err %v", err)) + tk.MustExec("set @@global.tidb_ddl_error_count_limit = 100") + err = ddlutil.LoadDDLVars(tk.Se) + c.Assert(err, IsNil) + c.Assert(variable.GetDDLErrorCountLimit(), Equals, int64(100)) + res := tk.MustQuery("select @@global.tidb_ddl_error_count_limit") + res.Check(testkit.Rows("100")) +} diff --git a/executor/delete.go b/executor/delete.go new file mode 100644 index 0000000..b8c5825 --- /dev/null +++ b/executor/delete.go @@ -0,0 +1,115 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "context" + + plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" +) + +// DeleteExec represents a delete executor. +// See https://dev.mysql.com/doc/refman/5.7/en/delete.html +type DeleteExec struct { + baseExecutor + + tblID2Table map[int64]table.Table + + // tblColPosInfos stores relationship between column ordinal to its table handle. + // the columns ordinals is present in ordinal range format, @see plannercore.TblColPosInfos + tblColPosInfos plannercore.TblColPosInfoSlice +} + +// Next implements the Executor Next interface. +func (e *DeleteExec) Next(ctx context.Context, req *chunk.Chunk) error { + req.Reset() + return e.deleteSingleTableByChunk(ctx) +} + +func (e *DeleteExec) deleteOneRow(tbl table.Table, handleIndex int, isExtraHandle bool, row []types.Datum) error { + end := len(row) + if isExtraHandle { + end-- + } + handle := row[handleIndex].GetInt64() + err := e.removeRow(e.ctx, tbl, handle, row[:end]) + if err != nil { + return err + } + + return nil +} + +func (e *DeleteExec) deleteSingleTableByChunk(ctx context.Context) error { + var ( + tbl table.Table + isExtrahandle bool + handleIndex int + rowCount int + ) + for _, info := range e.tblColPosInfos { + tbl = e.tblID2Table[info.TblID] + handleIndex = info.HandleOrdinal + isExtrahandle = handleIsExtra(e.children[0].Schema().Columns[info.HandleOrdinal]) + } + + fields := retTypes(e.children[0]) + chk := newFirstChunk(e.children[0]) + for { + iter := chunk.NewIterator4Chunk(chk) + + err := Next(ctx, e.children[0], chk) + if err != nil { + return err + } + if chk.NumRows() == 0 { + break + } + + for chunkRow := iter.Begin(); chunkRow != iter.End(); chunkRow = iter.Next() { + datumRow := chunkRow.GetDatumRow(fields) + err = e.deleteOneRow(tbl, handleIndex, isExtrahandle, datumRow) + if err != nil { + return err + } + rowCount++ + } + chk = chunk.Renew(chk, e.maxChunkSize) + } + + return nil +} + +func (e *DeleteExec) removeRow(ctx sessionctx.Context, t table.Table, h int64, data []types.Datum) error { + err := t.RemoveRecord(ctx, h, data) + if err != nil { + return err + } + ctx.GetSessionVars().StmtCtx.AddAffectedRows(1) + return nil +} + +// Close implements the Executor Close interface. +func (e *DeleteExec) Close() error { + return e.children[0].Close() +} + +// Open implements the Executor Open interface. +func (e *DeleteExec) Open(ctx context.Context) error { + return e.children[0].Open(ctx) +} diff --git a/executor/distsql.go b/executor/distsql.go new file mode 100644 index 0000000..c3939b1 --- /dev/null +++ b/executor/distsql.go @@ -0,0 +1,637 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "context" + "go.uber.org/zap" + "math" + "runtime" + "sort" + "sync" + "sync/atomic" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/distsql" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/ranger" + "github.com/pingcap/tidb/util/stringutil" + "github.com/pingcap/tipb/go-tipb" +) + +var ( + _ Executor = &TableReaderExecutor{} + _ Executor = &IndexReaderExecutor{} + _ Executor = &IndexLookUpExecutor{} +) + +// LookupTableTaskChannelSize represents the channel size of the index double read taskChan. +var LookupTableTaskChannelSize int32 = 50 + +// lookupTableTask is created from a partial result of an index request which +// contains the handles in those index keys. +type lookupTableTask struct { + handles []int64 + rowIdx []int // rowIdx represents the handle index for every row. Only used when keep order. + rows []chunk.Row + idxRows *chunk.Chunk + cursor int + + doneCh chan error + + // indexOrder map is used to save the original index order for the handles. + // Without this map, the original index order might be lost. + // The handles fetched from index is originally ordered by index, but we need handles to be ordered by itself + // to do table request. + indexOrder map[int64]int + // duplicatedIndexOrder map likes indexOrder. But it's used when checkIndexValue isn't nil and + // the same handle of index has multiple values. + duplicatedIndexOrder map[int64]int +} + +func (task *lookupTableTask) Len() int { + return len(task.rows) +} + +func (task *lookupTableTask) Less(i, j int) bool { + return task.rowIdx[i] < task.rowIdx[j] +} + +func (task *lookupTableTask) Swap(i, j int) { + task.rowIdx[i], task.rowIdx[j] = task.rowIdx[j], task.rowIdx[i] + task.rows[i], task.rows[j] = task.rows[j], task.rows[i] +} + +// Closeable is a interface for closeable structures. +type Closeable interface { + // Close closes the object. + Close() error +} + +// closeAll closes all objects even if an object returns an error. +// If multiple objects returns error, the first error will be returned. +func closeAll(objs ...Closeable) error { + var err error + for _, obj := range objs { + if obj != nil { + err1 := obj.Close() + if err == nil && err1 != nil { + err = err1 + } + } + } + if err != nil { + return errors.Trace(err) + } + return nil +} + +// handleIsExtra checks whether this column is a extra handle column generated during plan building phase. +func handleIsExtra(col *expression.Column) bool { + if col != nil && col.ID == model.ExtraHandleID { + return true + } + return false +} + +func splitRanges(ranges []*ranger.Range, keepOrder bool, desc bool) ([]*ranger.Range, []*ranger.Range) { + if len(ranges) == 0 || ranges[0].LowVal[0].Kind() == types.KindInt64 { + return ranges, nil + } + idx := sort.Search(len(ranges), func(i int) bool { return ranges[i].HighVal[0].GetUint64() > math.MaxInt64 }) + if idx == len(ranges) { + return ranges, nil + } + if ranges[idx].LowVal[0].GetUint64() > math.MaxInt64 { + signedRanges := ranges[0:idx] + unsignedRanges := ranges[idx:] + if !keepOrder { + return append(unsignedRanges, signedRanges...), nil + } + if desc { + return unsignedRanges, signedRanges + } + return signedRanges, unsignedRanges + } + signedRanges := make([]*ranger.Range, 0, idx+1) + unsignedRanges := make([]*ranger.Range, 0, len(ranges)-idx) + signedRanges = append(signedRanges, ranges[0:idx]...) + if !(ranges[idx].LowVal[0].GetUint64() == math.MaxInt64 && ranges[idx].LowExclude) { + signedRanges = append(signedRanges, &ranger.Range{ + LowVal: ranges[idx].LowVal, + LowExclude: ranges[idx].LowExclude, + HighVal: []types.Datum{types.NewUintDatum(math.MaxInt64)}, + }) + } + if !(ranges[idx].HighVal[0].GetUint64() == math.MaxInt64+1 && ranges[idx].HighExclude) { + unsignedRanges = append(unsignedRanges, &ranger.Range{ + LowVal: []types.Datum{types.NewUintDatum(math.MaxInt64 + 1)}, + HighVal: ranges[idx].HighVal, + HighExclude: ranges[idx].HighExclude, + }) + } + if idx < len(ranges) { + unsignedRanges = append(unsignedRanges, ranges[idx+1:]...) + } + if !keepOrder { + return append(unsignedRanges, signedRanges...), nil + } + if desc { + return unsignedRanges, signedRanges + } + return signedRanges, unsignedRanges +} + +// IndexReaderExecutor sends dag request and reads index data from kv layer. +type IndexReaderExecutor struct { + baseExecutor + + // For a partitioned table, the IndexReaderExecutor works on a partition, so + // the type of this table field is actually `table.PhysicalTable`. + table table.Table + index *model.IndexInfo + physicalTableID int64 + keepOrder bool + desc bool + ranges []*ranger.Range + // kvRanges are only used for union scan. + kvRanges []kv.KeyRange + dagPB *tipb.DAGRequest + startTS uint64 + + // result returns one or more distsql.PartialResult and each PartialResult is returned by one region. + result distsql.SelectResult + // columns are only required by union scan. + columns []*model.ColumnInfo + // outputColumns are only required by union scan. + outputColumns []*expression.Column + + idxCols []*expression.Column + colLens []int + plans []plannercore.PhysicalPlan +} + +// Close clears all resources hold by current object. +func (e *IndexReaderExecutor) Close() error { + err := e.result.Close() + e.result = nil + return err +} + +// Next implements the Executor Next interface. +func (e *IndexReaderExecutor) Next(ctx context.Context, req *chunk.Chunk) error { + err := e.result.Next(ctx, req) + return err +} + +// Open implements the Executor Open interface. +func (e *IndexReaderExecutor) Open(ctx context.Context) error { + var err error + kvRanges, err := distsql.IndexRangesToKVRanges(e.ctx.GetSessionVars().StmtCtx, e.physicalTableID, e.index.ID, e.ranges) + if err != nil { + return err + } + return e.open(ctx, kvRanges) +} + +func (e *IndexReaderExecutor) open(ctx context.Context, kvRanges []kv.KeyRange) error { + var err error + e.kvRanges = kvRanges + + var builder distsql.RequestBuilder + kvReq, err := builder.SetKeyRanges(kvRanges). + SetDAGRequest(e.dagPB). + SetStartTS(e.startTS). + SetDesc(e.desc). + SetKeepOrder(e.keepOrder). + SetFromSessionVars(e.ctx.GetSessionVars()). + Build() + if err != nil { + return err + } + e.result, err = distsql.Select(ctx, e.ctx, kvReq, retTypes(e)) + return err +} + +// IndexLookUpExecutor implements double read for index scan. +type IndexLookUpExecutor struct { + baseExecutor + + table table.Table + index *model.IndexInfo + keepOrder bool + desc bool + ranges []*ranger.Range + dagPB *tipb.DAGRequest + startTS uint64 + // handleIdx is the index of handle, which is only used for case of keeping order. + handleIdx int + tableRequest *tipb.DAGRequest + // columns are only required by union scan. + columns []*model.ColumnInfo + *dataReaderBuilder + // All fields above are immutable. + + idxWorkerWg sync.WaitGroup + tblWorkerWg sync.WaitGroup + finished chan struct{} + + kvRanges []kv.KeyRange + workerStarted bool + + resultCh chan *lookupTableTask + resultCurr *lookupTableTask + + idxPlans []plannercore.PhysicalPlan + tblPlans []plannercore.PhysicalPlan + idxCols []*expression.Column + colLens []int +} + +// Open implements the Executor Open interface. +func (e *IndexLookUpExecutor) Open(ctx context.Context) error { + var err error + e.kvRanges, err = distsql.IndexRangesToKVRanges(e.ctx.GetSessionVars().StmtCtx, getPhysicalTableID(e.table), e.index.ID, e.ranges) + if err != nil { + return err + } + err = e.open(ctx) + return err +} + +func (e *IndexLookUpExecutor) open(ctx context.Context) error { + e.finished = make(chan struct{}) + e.resultCh = make(chan *lookupTableTask, atomic.LoadInt32(&LookupTableTaskChannelSize)) + return nil +} + +func (e *IndexLookUpExecutor) startWorkers(ctx context.Context, initBatchSize int) error { + // indexWorker will write to workCh and tableWorker will read from workCh, + // so fetching index and getting table data can run concurrently. + workCh := make(chan *lookupTableTask, 1) + if err := e.startIndexWorker(ctx, e.kvRanges, workCh, initBatchSize); err != nil { + return err + } + e.startTableWorker(ctx, workCh) + e.workerStarted = true + return nil +} + +// startIndexWorker launch a background goroutine to fetch handles, send the results to workCh. +func (e *IndexLookUpExecutor) startIndexWorker(ctx context.Context, kvRanges []kv.KeyRange, workCh chan<- *lookupTableTask, initBatchSize int) error { + var builder distsql.RequestBuilder + kvReq, err := builder.SetKeyRanges(kvRanges). + SetDAGRequest(e.dagPB). + SetStartTS(e.startTS). + SetDesc(e.desc). + SetKeepOrder(e.keepOrder). + SetFromSessionVars(e.ctx.GetSessionVars()). + Build() + if err != nil { + return err + } + tps := []*types.FieldType{types.NewFieldType(mysql.TypeLonglong)} + result, err := distsql.Select(ctx, e.ctx, kvReq, tps) + if err != nil { + return err + } + worker := &indexWorker{ + idxLookup: e, + workCh: workCh, + finished: e.finished, + resultCh: e.resultCh, + keepOrder: e.keepOrder, + batchSize: initBatchSize, + maxBatchSize: e.ctx.GetSessionVars().IndexLookupSize, + maxChunkSize: e.maxChunkSize, + } + if worker.batchSize > worker.maxBatchSize { + worker.batchSize = worker.maxBatchSize + } + e.idxWorkerWg.Add(1) + go func() { + ctx1, cancel := context.WithCancel(ctx) + _, err = worker.fetchHandles(ctx1, result) + if err != nil { + logutil.Logger(ctx).Error("Fetch handles failed", zap.Error(err)) + } + cancel() + if err := result.Close(); err != nil { + logutil.Logger(ctx).Error("close Select result failed", zap.Error(err)) + } + close(workCh) + close(e.resultCh) + e.idxWorkerWg.Done() + }() + return nil +} + +// startTableWorker launchs some background goroutines which pick tasks from workCh and execute the task. +func (e *IndexLookUpExecutor) startTableWorker(ctx context.Context, workCh <-chan *lookupTableTask) { + lookupConcurrencyLimit := e.ctx.GetSessionVars().IndexLookupConcurrency + e.tblWorkerWg.Add(lookupConcurrencyLimit) + for i := 0; i < lookupConcurrencyLimit; i++ { + worker := &tableWorker{ + idxLookup: e, + workCh: workCh, + finished: e.finished, + buildTblReader: e.buildTableReader, + keepOrder: e.keepOrder, + handleIdx: e.handleIdx, + } + ctx1, cancel := context.WithCancel(ctx) + go func() { + worker.pickAndExecTask(ctx1) + cancel() + e.tblWorkerWg.Done() + }() + } +} + +func (e *IndexLookUpExecutor) buildTableReader(ctx context.Context, handles []int64) (Executor, error) { + tableReaderExec := &TableReaderExecutor{ + baseExecutor: newBaseExecutor(e.ctx, e.schema, stringutil.MemoizeStr(func() string { return e.id.String() + "_tableReader" })), + table: e.table, + dagPB: e.tableRequest, + startTS: e.startTS, + columns: e.columns, + plans: e.tblPlans, + } + tableReader, err := e.dataReaderBuilder.buildTableReaderFromHandles(ctx, tableReaderExec, handles) + if err != nil { + logutil.Logger(ctx).Error("build table reader from handles failed", zap.Error(err)) + return nil, err + } + return tableReader, nil +} + +// Close implements Exec Close interface. +func (e *IndexLookUpExecutor) Close() error { + if !e.workerStarted || e.finished == nil { + return nil + } + + close(e.finished) + // Drain the resultCh and discard the result, in case that Next() doesn't fully + // consume the data, background worker still writing to resultCh and block forever. + for range e.resultCh { + } + e.idxWorkerWg.Wait() + e.tblWorkerWg.Wait() + e.finished = nil + e.workerStarted = false + return nil +} + +// Next implements Exec Next interface. +func (e *IndexLookUpExecutor) Next(ctx context.Context, req *chunk.Chunk) error { + if !e.workerStarted { + if err := e.startWorkers(ctx, req.RequiredRows()); err != nil { + return err + } + } + req.Reset() + for { + resultTask, err := e.getResultTask() + if err != nil { + return err + } + if resultTask == nil { + return nil + } + for resultTask.cursor < len(resultTask.rows) { + req.AppendRow(resultTask.rows[resultTask.cursor]) + resultTask.cursor++ + if req.IsFull() { + return nil + } + } + } +} + +func (e *IndexLookUpExecutor) getResultTask() (*lookupTableTask, error) { + if e.resultCurr != nil && e.resultCurr.cursor < len(e.resultCurr.rows) { + return e.resultCurr, nil + } + task, ok := <-e.resultCh + if !ok { + return nil, nil + } + if err := <-task.doneCh; err != nil { + return nil, err + } + + e.resultCurr = task + return e.resultCurr, nil +} + +// indexWorker is used by IndexLookUpExecutor to maintain index lookup background goroutines. +type indexWorker struct { + idxLookup *IndexLookUpExecutor + workCh chan<- *lookupTableTask + finished <-chan struct{} + resultCh chan<- *lookupTableTask + keepOrder bool + + // batchSize is for lightweight startup. It will be increased exponentially until reaches the max batch size value. + batchSize int + maxBatchSize int + maxChunkSize int +} + +// fetchHandles fetches a batch of handles from index data and builds the index lookup tasks. +// The tasks are sent to workCh to be further processed by tableWorker, and sent to e.resultCh +// at the same time to keep data ordered. +func (w *indexWorker) fetchHandles(ctx context.Context, result distsql.SelectResult) (count uint64, err error) { + defer func() { + if r := recover(); r != nil { + buf := make([]byte, 4096) + stackSize := runtime.Stack(buf, false) + buf = buf[:stackSize] + logutil.Logger(ctx).Error("indexWorker in IndexLookupExecutor panicked", zap.String("stack", string(buf))) + err4Panic := errors.Errorf("%v", r) + doneCh := make(chan error, 1) + doneCh <- err4Panic + w.resultCh <- &lookupTableTask{ + doneCh: doneCh, + } + if err != nil { + err = errors.Trace(err4Panic) + } + } + }() + chk := chunk.NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeLonglong)}, w.idxLookup.maxChunkSize) + for { + handles, retChunk, scannedKeys, err := w.extractTaskHandles(ctx, chk, result, count) + if err != nil { + doneCh := make(chan error, 1) + doneCh <- err + w.resultCh <- &lookupTableTask{ + doneCh: doneCh, + } + return count, err + } + count += scannedKeys + if len(handles) == 0 { + return count, nil + } + task := w.buildTableTask(handles, retChunk) + select { + case <-ctx.Done(): + return count, nil + case <-w.finished: + return count, nil + case w.workCh <- task: + w.resultCh <- task + } + } +} + +func (w *indexWorker) extractTaskHandles(ctx context.Context, chk *chunk.Chunk, idxResult distsql.SelectResult, count uint64) ( + handles []int64, retChk *chunk.Chunk, scannedKeys uint64, err error) { + handleOffset := chk.NumCols() - 1 + handles = make([]int64, 0, w.batchSize) + for len(handles) < w.batchSize { + requiredRows := w.batchSize - len(handles) + chk.SetRequiredRows(requiredRows, w.maxChunkSize) + err = errors.Trace(idxResult.Next(ctx, chk)) + if err != nil { + return handles, nil, scannedKeys, err + } + if chk.NumRows() == 0 { + return handles, retChk, scannedKeys, nil + } + for i := 0; i < chk.NumRows(); i++ { + scannedKeys++ + h := chk.GetRow(i).GetInt64(handleOffset) + handles = append(handles, h) + } + } + w.batchSize *= 2 + if w.batchSize > w.maxBatchSize { + w.batchSize = w.maxBatchSize + } + return handles, retChk, scannedKeys, nil +} + +func (w *indexWorker) buildTableTask(handles []int64, retChk *chunk.Chunk) *lookupTableTask { + var indexOrder map[int64]int + var duplicatedIndexOrder map[int64]int + if w.keepOrder { + // Save the index order. + indexOrder = make(map[int64]int, len(handles)) + for i, h := range handles { + indexOrder[h] = i + } + } + + task := &lookupTableTask{ + handles: handles, + indexOrder: indexOrder, + duplicatedIndexOrder: duplicatedIndexOrder, + idxRows: retChk, + } + + task.doneCh = make(chan error, 1) + return task +} + +// tableWorker is used by IndexLookUpExecutor to maintain table lookup background goroutines. +type tableWorker struct { + idxLookup *IndexLookUpExecutor + workCh <-chan *lookupTableTask + finished <-chan struct{} + buildTblReader func(ctx context.Context, handles []int64) (Executor, error) + keepOrder bool + handleIdx int +} + +// pickAndExecTask picks tasks from workCh, and execute them. +func (w *tableWorker) pickAndExecTask(ctx context.Context) { + var task *lookupTableTask + var ok bool + defer func() { + if r := recover(); r != nil { + buf := make([]byte, 4096) + stackSize := runtime.Stack(buf, false) + buf = buf[:stackSize] + logutil.Logger(ctx).Error("tableWorker in IndexLookUpExecutor panicked", zap.String("stack", string(buf))) + task.doneCh <- errors.Errorf("%v", r) + } + }() + for { + // Don't check ctx.Done() on purpose. If background worker get the signal and all + // exit immediately, session's goroutine doesn't know this and still calling Next(), + // it may block reading task.doneCh forever. + select { + case task, ok = <-w.workCh: + if !ok { + return + } + case <-w.finished: + return + } + err := w.executeTask(ctx, task) + task.doneCh <- err + } +} + +// executeTask executes the table look up tasks. We will construct a table reader and send request by handles. +// Then we hold the returning rows and finish this task. +func (w *tableWorker) executeTask(ctx context.Context, task *lookupTableTask) error { + tableReader, err := w.buildTblReader(ctx, task.handles) + if err != nil { + logutil.Logger(ctx).Error("build table reader failed", zap.Error(err)) + return err + } + defer terror.Call(tableReader.Close) + + handleCnt := len(task.handles) + task.rows = make([]chunk.Row, 0, handleCnt) + for { + chk := newFirstChunk(tableReader) + err = Next(ctx, tableReader, chk) + if err != nil { + logutil.Logger(ctx).Error("table reader fetch next chunk failed", zap.Error(err)) + return err + } + if chk.NumRows() == 0 { + break + } + iter := chunk.NewIterator4Chunk(chk) + for row := iter.Begin(); row != iter.End(); row = iter.Next() { + task.rows = append(task.rows, row) + } + } + + if w.keepOrder { + task.rowIdx = make([]int, 0, len(task.rows)) + for i := range task.rows { + handle := task.rows[i].GetInt64(w.handleIdx) + task.rowIdx = append(task.rowIdx, task.indexOrder[handle]) + } + sort.Sort(task) + } + + return nil +} diff --git a/executor/distsql_test.go b/executor/distsql_test.go new file mode 100644 index 0000000..ca53b67 --- /dev/null +++ b/executor/distsql_test.go @@ -0,0 +1,160 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor_test + +import ( + "bytes" + "context" + "fmt" + "runtime/pprof" + "strings" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/store/tikv" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/util/testkit" +) + +func checkGoroutineExists(keyword string) bool { + buf := new(bytes.Buffer) + profile := pprof.Lookup("goroutine") + profile.WriteTo(buf, 1) + str := buf.String() + return strings.Contains(str, keyword) +} + +func (s *testSuite3) TestCopClientSend(c *C) { + c.Skip("not stable") + if _, ok := s.store.GetClient().(*tikv.CopClient); !ok { + // Make sure the store is tikv store. + return + } + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("create table copclient (id int primary key)") + + // Insert 1000 rows. + var values []string + for i := 0; i < 1000; i++ { + values = append(values, fmt.Sprintf("(%d)", i)) + } + tk.MustExec("insert copclient values " + strings.Join(values, ",")) + + // Get table ID for split. + dom := domain.GetDomain(tk.Se) + is := dom.InfoSchema() + tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("copclient")) + c.Assert(err, IsNil) + tblID := tbl.Meta().ID + + // Split the table. + s.cluster.SplitTable(s.mvccStore, tblID, 100) + + ctx := context.Background() + // Send coprocessor request when the table split. + rs, err := tk.Exec("select sum(id) from copclient") + c.Assert(err, IsNil) + req := rs.NewChunk() + err = rs.Next(ctx, req) + c.Assert(err, IsNil) + c.Assert(req.GetRow(0).GetInt64(0), Equals, int64(499500)) + rs.Close() + + // Split one region. + key := tablecodec.EncodeRowKeyWithHandle(tblID, 500) + region, _ := s.cluster.GetRegionByKey([]byte(key)) + peerID := s.cluster.AllocID() + s.cluster.Split(region.GetId(), s.cluster.AllocID(), key, []uint64{peerID}, peerID) + + // Check again. + rs, err = tk.Exec("select sum(id) from copclient") + c.Assert(err, IsNil) + req = rs.NewChunk() + err = rs.Next(ctx, req) + c.Assert(err, IsNil) + c.Assert(req.GetRow(0).GetInt64(0), Equals, int64(499500)) + rs.Close() + + // Check there is no goroutine leak. + rs, err = tk.Exec("select * from copclient order by id") + c.Assert(err, IsNil) + req = rs.NewChunk() + err = rs.Next(ctx, req) + c.Assert(err, IsNil) + rs.Close() + keyword := "(*copIterator).work" + c.Check(checkGoroutineExists(keyword), IsFalse) +} + +func (s *testSuite3) TestBigIntPK(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("create table t(a bigint unsigned primary key, b int, c int, index idx(a, b))") + tk.MustExec("insert into t values(1, 1, 1), (9223372036854775807, 2, 2)") + tk.MustQuery("select * from t use index(idx) order by a").Check(testkit.Rows("1 1 1", "9223372036854775807 2 2")) +} + +func (s *testSuite3) TestUniqueKeyNullValueSelect(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + // test null in unique-key + tk.MustExec("create table t (id int default null, c varchar(20), unique id (id));") + tk.MustExec("insert t (c) values ('a'), ('b'), ('c');") + res := tk.MustQuery("select * from t where id is null;") + res.Check(testkit.Rows(" a", " b", " c")) + + // test null in mul unique-key + tk.MustExec("drop table t") + tk.MustExec("create table t (id int default null, b int default 1, c varchar(20), unique id_c(id, b));") + tk.MustExec("insert t (c) values ('a'), ('b'), ('c');") + res = tk.MustQuery("select * from t where id is null and b = 1;") + res.Check(testkit.Rows(" 1 a", " 1 b", " 1 c")) + + tk.MustExec("drop table t") + // test null in non-unique-key + tk.MustExec("create table t (id int default null, c varchar(20), key id (id));") + tk.MustExec("insert t (c) values ('a'), ('b'), ('c');") + res = tk.MustQuery("select * from t where id is null;") + res.Check(testkit.Rows(" a", " b", " c")) +} + +// TestIssue10178 contains tests for https://github.com/pingcap/tidb/issues/10178 . +func (s *testSuite3) TestIssue10178(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a bigint unsigned primary key)") + tk.MustExec("insert into t values(9223372036854775807), (18446744073709551615)") + tk.MustQuery("select max(a) from t").Check(testkit.Rows("18446744073709551615")) + tk.MustQuery("select * from t where a > 9223372036854775807").Check(testkit.Rows("18446744073709551615")) + tk.MustQuery("select * from t where a < 9223372036854775808").Check(testkit.Rows("9223372036854775807")) +} + +func (s *testSuite3) TestPushLimitDownIndexLookUpReader(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists tbl") + tk.MustExec("create table tbl(a int, b int, c int, key idx_b_c(b,c))") + tk.MustExec("insert into tbl values(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5)") + tk.MustQuery("select * from tbl use index(idx_b_c) where b > 1 limit 2,1").Check(testkit.Rows("4 4 4")) + tk.MustQuery("select * from tbl use index(idx_b_c) where b > 4 limit 2,1").Check(testkit.Rows()) + tk.MustQuery("select * from tbl use index(idx_b_c) where b > 3 limit 2,1").Check(testkit.Rows()) + tk.MustQuery("select * from tbl use index(idx_b_c) where b > 2 limit 2,1").Check(testkit.Rows("5 5 5")) + tk.MustQuery("select * from tbl use index(idx_b_c) where b > 1 limit 1").Check(testkit.Rows("2 2 2")) + tk.MustQuery("select * from tbl use index(idx_b_c) where b > 1 order by b desc limit 2,1").Check(testkit.Rows("3 3 3")) + tk.MustQuery("select * from tbl use index(idx_b_c) where b > 1 and c > 1 limit 2,1").Check(testkit.Rows("4 4 4")) +} diff --git a/executor/errors.go b/executor/errors.go new file mode 100644 index 0000000..d9ddcbf --- /dev/null +++ b/executor/errors.go @@ -0,0 +1,73 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" +) + +// Error instances. +var ( + ErrGetStartTS = terror.ClassExecutor.New(mysql.ErrGetStartTS, mysql.MySQLErrName[mysql.ErrGetStartTS]) + ErrUnknownPlan = terror.ClassExecutor.New(mysql.ErrUnknownPlan, mysql.MySQLErrName[mysql.ErrUnknownPlan]) + ErrPrepareMulti = terror.ClassExecutor.New(mysql.ErrPrepareMulti, mysql.MySQLErrName[mysql.ErrPrepareMulti]) + ErrPrepareDDL = terror.ClassExecutor.New(mysql.ErrPrepareDDL, mysql.MySQLErrName[mysql.ErrPrepareDDL]) + ErrResultIsEmpty = terror.ClassExecutor.New(mysql.ErrResultIsEmpty, mysql.MySQLErrName[mysql.ErrResultIsEmpty]) + ErrBuildExecutor = terror.ClassExecutor.New(mysql.ErrBuildExecutor, mysql.MySQLErrName[mysql.ErrBuildExecutor]) + ErrBatchInsertFail = terror.ClassExecutor.New(mysql.ErrBatchInsertFail, mysql.MySQLErrName[mysql.ErrBatchInsertFail]) + + ErrCantCreateUserWithGrant = terror.ClassExecutor.New(mysql.ErrCantCreateUserWithGrant, mysql.MySQLErrName[mysql.ErrCantCreateUserWithGrant]) + ErrPasswordNoMatch = terror.ClassExecutor.New(mysql.ErrPasswordNoMatch, mysql.MySQLErrName[mysql.ErrPasswordNoMatch]) + ErrCannotUser = terror.ClassExecutor.New(mysql.ErrCannotUser, mysql.MySQLErrName[mysql.ErrCannotUser]) + ErrPasswordFormat = terror.ClassExecutor.New(mysql.ErrPasswordFormat, mysql.MySQLErrName[mysql.ErrPasswordFormat]) + ErrCantChangeTxCharacteristics = terror.ClassExecutor.New(mysql.ErrCantChangeTxCharacteristics, mysql.MySQLErrName[mysql.ErrCantChangeTxCharacteristics]) + ErrPsManyParam = terror.ClassExecutor.New(mysql.ErrPsManyParam, mysql.MySQLErrName[mysql.ErrPsManyParam]) + ErrAdminCheckTable = terror.ClassExecutor.New(mysql.ErrAdminCheckTable, mysql.MySQLErrName[mysql.ErrAdminCheckTable]) + ErrDBaccessDenied = terror.ClassExecutor.New(mysql.ErrDBaccessDenied, mysql.MySQLErrName[mysql.ErrDBaccessDenied]) + ErrTableaccessDenied = terror.ClassExecutor.New(mysql.ErrTableaccessDenied, mysql.MySQLErrName[mysql.ErrTableaccessDenied]) + ErrBadDB = terror.ClassExecutor.New(mysql.ErrBadDB, mysql.MySQLErrName[mysql.ErrBadDB]) + ErrWrongObject = terror.ClassExecutor.New(mysql.ErrWrongObject, mysql.MySQLErrName[mysql.ErrWrongObject]) + ErrRoleNotGranted = terror.ClassPrivilege.New(mysql.ErrRoleNotGranted, mysql.MySQLErrName[mysql.ErrRoleNotGranted]) + ErrQueryInterrupted = terror.ClassExecutor.New(mysql.ErrQueryInterrupted, mysql.MySQLErrName[mysql.ErrQueryInterrupted]) +) + +func init() { + // Map error codes to mysql error codes. + tableMySQLErrCodes := map[terror.ErrCode]uint16{ + mysql.ErrGetStartTS: mysql.ErrGetStartTS, + mysql.ErrUnknownPlan: mysql.ErrUnknownPlan, + mysql.ErrPrepareMulti: mysql.ErrPrepareMulti, + mysql.ErrPrepareDDL: mysql.ErrPrepareDDL, + mysql.ErrResultIsEmpty: mysql.ErrResultIsEmpty, + mysql.ErrBuildExecutor: mysql.ErrBuildExecutor, + mysql.ErrBatchInsertFail: mysql.ErrBatchInsertFail, + + mysql.ErrCantCreateUserWithGrant: mysql.ErrCantCreateUserWithGrant, + mysql.ErrPasswordNoMatch: mysql.ErrPasswordNoMatch, + mysql.ErrCannotUser: mysql.ErrCannotUser, + mysql.ErrPasswordFormat: mysql.ErrPasswordFormat, + mysql.ErrCantChangeTxCharacteristics: mysql.ErrCantChangeTxCharacteristics, + mysql.ErrPsManyParam: mysql.ErrPsManyParam, + mysql.ErrAdminCheckTable: mysql.ErrAdminCheckTable, + mysql.ErrDBaccessDenied: mysql.ErrDBaccessDenied, + mysql.ErrTableaccessDenied: mysql.ErrTableaccessDenied, + mysql.ErrBadDB: mysql.ErrBadDB, + mysql.ErrWrongObject: mysql.ErrWrongObject, + mysql.ErrRoleNotGranted: mysql.ErrRoleNotGranted, + mysql.ErrQueryInterrupted: mysql.ErrQueryInterrupted, + mysql.ErrWrongValueCountOnRow: mysql.ErrWrongValueCountOnRow, + } + terror.ErrClassToMySQLCodes[terror.ClassExecutor] = tableMySQLErrCodes +} diff --git a/executor/executor.go b/executor/executor.go new file mode 100644 index 0000000..57b4f81 --- /dev/null +++ b/executor/executor.go @@ -0,0 +1,695 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "context" + "fmt" + "sync/atomic" + + "github.com/cznic/mathutil" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/admin" + "github.com/pingcap/tidb/util/chunk" +) + +var ( + _ Executor = &baseExecutor{} + _ Executor = &HashAggExec{} + _ Executor = &HashJoinExec{} + _ Executor = &IndexLookUpExecutor{} + _ Executor = &IndexReaderExecutor{} + _ Executor = &LimitExec{} + _ Executor = &MergeJoinExec{} + _ Executor = &ProjectionExec{} + _ Executor = &SelectionExec{} + _ Executor = &ShowDDLExec{} + _ Executor = &ShowDDLJobsExec{} + _ Executor = &SortExec{} + _ Executor = &TableDualExec{} + _ Executor = &TableReaderExecutor{} + _ Executor = &TableScanExec{} + _ Executor = &TopNExec{} +) + +type baseExecutor struct { + ctx sessionctx.Context + id fmt.Stringer + schema *expression.Schema + initCap int + maxChunkSize int + children []Executor + retFieldTypes []*types.FieldType +} + +// base returns the baseExecutor of an executor, don't override this method! +func (e *baseExecutor) base() *baseExecutor { + return e +} + +// Open initializes children recursively and "childrenResults" according to children's schemas. +func (e *baseExecutor) Open(ctx context.Context) error { + for _, child := range e.children { + err := child.Open(ctx) + if err != nil { + return err + } + } + return nil +} + +// Close closes all executors and release all resources. +func (e *baseExecutor) Close() error { + var firstErr error + for _, src := range e.children { + if err := src.Close(); err != nil && firstErr == nil { + firstErr = err + } + } + return firstErr +} + +// Schema returns the current baseExecutor's schema. If it is nil, then create and return a new one. +func (e *baseExecutor) Schema() *expression.Schema { + if e.schema == nil { + return expression.NewSchema() + } + return e.schema +} + +// newFirstChunk creates a new chunk to buffer current executor's result. +func newFirstChunk(e Executor) *chunk.Chunk { + base := e.base() + return chunk.New(base.retFieldTypes, base.initCap, base.maxChunkSize) +} + +// retTypes returns all output column types. +func retTypes(e Executor) []*types.FieldType { + base := e.base() + return base.retFieldTypes +} + +// Next fills multiple rows into a chunk. +func (e *baseExecutor) Next(ctx context.Context, req *chunk.Chunk) error { + return nil +} + +func newBaseExecutor(ctx sessionctx.Context, schema *expression.Schema, id fmt.Stringer, children ...Executor) baseExecutor { + e := baseExecutor{ + children: children, + ctx: ctx, + id: id, + schema: schema, + initCap: ctx.GetSessionVars().InitChunkSize, + maxChunkSize: ctx.GetSessionVars().MaxChunkSize, + } + if schema != nil { + cols := schema.Columns + e.retFieldTypes = make([]*types.FieldType, len(cols)) + for i := range cols { + e.retFieldTypes[i] = cols[i].RetType + } + } + return e +} + +// Executor is the physical implementation of a algebra operator. +// +// In TiDB, all algebra operators are implemented as iterators, i.e., they +// support a simple Open-Next-Close protocol. See this paper for more details: +// +// "Volcano-An Extensible and Parallel Query Evaluation System" +// +// Different from Volcano's execution model, a "Next" function call in TiDB will +// return a batch of rows, other than a single row in Volcano. +// NOTE: Executors must call "chk.Reset()" before appending their results to it. +type Executor interface { + base() *baseExecutor + Open(context.Context) error + Next(ctx context.Context, req *chunk.Chunk) error + Close() error + Schema() *expression.Schema +} + +// Next is a wrapper function on e.Next(), it handles some common codes. +func Next(ctx context.Context, e Executor, req *chunk.Chunk) error { + base := e.base() + sessVars := base.ctx.GetSessionVars() + if atomic.CompareAndSwapUint32(&sessVars.Killed, 1, 0) { + return ErrQueryInterrupted + } + return e.Next(ctx, req) +} + +// ShowDDLExec represents a show DDL executor. +type ShowDDLExec struct { + baseExecutor + + ddlOwnerID string + selfID string + ddlInfo *admin.DDLInfo + done bool +} + +// Next implements the Executor Next interface. +func (e *ShowDDLExec) Next(ctx context.Context, req *chunk.Chunk) error { + req.Reset() + if e.done { + return nil + } + + ddlJobs := "" + query := "" + l := len(e.ddlInfo.Jobs) + for i, job := range e.ddlInfo.Jobs { + ddlJobs += job.String() + query += job.Query + if i != l-1 { + ddlJobs += "\n" + query += "\n" + } + } + + req.AppendInt64(0, e.ddlInfo.SchemaVer) + req.AppendString(1, e.ddlOwnerID) + req.AppendString(2, "") + req.AppendString(3, ddlJobs) + req.AppendString(4, e.selfID) + req.AppendString(5, query) + + e.done = true + return nil +} + +// ShowDDLJobsExec represent a show DDL jobs executor. +type ShowDDLJobsExec struct { + baseExecutor + + jobNumber int64 + is infoschema.InfoSchema +} + +// LimitExec represents limit executor +// It ignores 'Offset' rows from src, then returns 'Count' rows at maximum. +type LimitExec struct { + baseExecutor + + begin uint64 + end uint64 + cursor uint64 + + // meetFirstBatch represents whether we have met the first valid Chunk from child. + meetFirstBatch bool + + childResult *chunk.Chunk +} + +// Next implements the Executor Next interface. +func (e *LimitExec) Next(ctx context.Context, req *chunk.Chunk) error { + req.Reset() + if e.cursor >= e.end { + return nil + } + for !e.meetFirstBatch { + // transfer req's requiredRows to childResult and then adjust it in childResult + e.childResult = e.childResult.SetRequiredRows(req.RequiredRows(), e.maxChunkSize) + err := Next(ctx, e.children[0], e.adjustRequiredRows(e.childResult)) + if err != nil { + return err + } + batchSize := uint64(e.childResult.NumRows()) + // no more data. + if batchSize == 0 { + return nil + } + if newCursor := e.cursor + batchSize; newCursor >= e.begin { + e.meetFirstBatch = true + begin, end := e.begin-e.cursor, batchSize + if newCursor > e.end { + end = e.end - e.cursor + } + e.cursor += end + if begin == end { + break + } + req.Append(e.childResult, int(begin), int(end)) + return nil + } + e.cursor += batchSize + } + e.adjustRequiredRows(req) + err := Next(ctx, e.children[0], req) + if err != nil { + return err + } + batchSize := uint64(req.NumRows()) + // no more data. + if batchSize == 0 { + return nil + } + if e.cursor+batchSize > e.end { + req.TruncateTo(int(e.end - e.cursor)) + batchSize = e.end - e.cursor + } + e.cursor += batchSize + return nil +} + +// Open implements the Executor Open interface. +func (e *LimitExec) Open(ctx context.Context) error { + if err := e.baseExecutor.Open(ctx); err != nil { + return err + } + e.childResult = newFirstChunk(e.children[0]) + e.cursor = 0 + e.meetFirstBatch = e.begin == 0 + return nil +} + +// Close implements the Executor Close interface. +func (e *LimitExec) Close() error { + e.childResult = nil + return e.baseExecutor.Close() +} + +func (e *LimitExec) adjustRequiredRows(chk *chunk.Chunk) *chunk.Chunk { + // the limit of maximum number of rows the LimitExec should read + limitTotal := int(e.end - e.cursor) + + var limitRequired int + if e.cursor < e.begin { + // if cursor is less than begin, it have to read (begin-cursor) rows to ignore + // and then read chk.RequiredRows() rows to return, + // so the limit is (begin-cursor)+chk.RequiredRows(). + limitRequired = int(e.begin) - int(e.cursor) + chk.RequiredRows() + } else { + // if cursor is equal or larger than begin, just read chk.RequiredRows() rows to return. + limitRequired = chk.RequiredRows() + } + + return chk.SetRequiredRows(mathutil.Min(limitTotal, limitRequired), e.maxChunkSize) +} + +// TableDualExec represents a dual table executor. +type TableDualExec struct { + baseExecutor + + // numDualRows can only be 0 or 1. + numDualRows int + numReturned int +} + +// Open implements the Executor Open interface. +func (e *TableDualExec) Open(ctx context.Context) error { + e.numReturned = 0 + return nil +} + +// Next implements the Executor Next interface. +func (e *TableDualExec) Next(ctx context.Context, req *chunk.Chunk) error { + req.Reset() + if e.numReturned >= e.numDualRows { + return nil + } + if e.Schema().Len() == 0 { + req.SetNumVirtualRows(1) + } else { + for i := range e.Schema().Columns { + req.AppendNull(i) + } + } + e.numReturned = e.numDualRows + return nil +} + +// SelectionExec represents a filter executor. +type SelectionExec struct { + baseExecutor + + batched bool + filters []expression.Expression + selected []bool + inputIter *chunk.Iterator4Chunk + inputRow chunk.Row + childResult *chunk.Chunk +} + +// Open implements the Executor Open interface. +func (e *SelectionExec) Open(ctx context.Context) error { + if err := e.baseExecutor.Open(ctx); err != nil { + return err + } + e.childResult = newFirstChunk(e.children[0]) + e.batched = expression.Vectorizable(e.filters) + if e.batched { + e.selected = make([]bool, 0, chunk.InitialCapacity) + } + e.inputIter = chunk.NewIterator4Chunk(e.childResult) + e.inputRow = e.inputIter.End() + return nil +} + +// Close implements plannercore.Plan Close interface. +func (e *SelectionExec) Close() error { + e.childResult = nil + e.selected = nil + return e.baseExecutor.Close() +} + +// Next implements the Executor Next interface. +func (e *SelectionExec) Next(ctx context.Context, req *chunk.Chunk) error { + req.GrowAndReset(e.maxChunkSize) + + if !e.batched { + return e.unBatchedNext(ctx, req) + } + + /* + Exit the loop when: + 1. the `req` chunk` is full. + 2. there is no further results from child. + 3. meets any error. + */ + for { + // Fill in the `req` util it is full or the `inputIter` is fully processed. + for ; e.inputRow != e.inputIter.End(); e.inputRow = e.inputIter.Next() { + // Your code here. + } + err := Next(ctx, e.children[0], e.childResult) + if err != nil { + return err + } + // no more data. + if e.childResult.NumRows() == 0 { + return nil + } + /* Your code here. + Process and filter the child result using `expression.VectorizedFilter`. + */ + } +} + +// unBatchedNext filters input rows one by one and returns once an input row is selected. +// For sql with "SETVAR" in filter and "GETVAR" in projection, for example: "SELECT @a FROM t WHERE (@a := 2) > 0", +// we have to set batch size to 1 to do the evaluation of filter and projection. +func (e *SelectionExec) unBatchedNext(ctx context.Context, chk *chunk.Chunk) error { + for { + for ; e.inputRow != e.inputIter.End(); e.inputRow = e.inputIter.Next() { + selected, _, err := expression.EvalBool(e.ctx, e.filters, e.inputRow) + if err != nil { + return err + } + if selected { + chk.AppendRow(e.inputRow) + e.inputRow = e.inputIter.Next() + return nil + } + } + err := Next(ctx, e.children[0], e.childResult) + if err != nil { + return err + } + e.inputRow = e.inputIter.Begin() + // no more data. + if e.childResult.NumRows() == 0 { + return nil + } + } +} + +// TableScanExec is a table scan executor without result fields. +type TableScanExec struct { + baseExecutor + + t table.Table + seekHandle int64 + iter kv.Iterator + columns []*model.ColumnInfo + isVirtualTable bool + virtualTableChunkList *chunk.List + virtualTableChunkIdx int +} + +// Next implements the Executor Next interface. +func (e *TableScanExec) Next(ctx context.Context, req *chunk.Chunk) error { + req.GrowAndReset(e.maxChunkSize) + if e.isVirtualTable { + return e.nextChunk4InfoSchema(ctx, req) + } + handle, found, err := e.nextHandle() + if err != nil || !found { + return err + } + + mutableRow := chunk.MutRowFromTypes(retTypes(e)) + for req.NumRows() < req.Capacity() { + row, err := e.getRow(handle) + if err != nil { + return err + } + e.seekHandle = handle + 1 + mutableRow.SetDatums(row...) + req.AppendRow(mutableRow.ToRow()) + } + return nil +} + +func (e *TableScanExec) nextChunk4InfoSchema(ctx context.Context, chk *chunk.Chunk) error { + chk.GrowAndReset(e.maxChunkSize) + if e.virtualTableChunkList == nil { + e.virtualTableChunkList = chunk.NewList(retTypes(e), e.initCap, e.maxChunkSize) + columns := make([]*table.Column, e.schema.Len()) + for i, colInfo := range e.columns { + columns[i] = table.ToColumn(colInfo) + } + mutableRow := chunk.MutRowFromTypes(retTypes(e)) + err := e.t.IterRecords(e.ctx, nil, columns, func(h int64, rec []types.Datum, cols []*table.Column) (bool, error) { + mutableRow.SetDatums(rec...) + e.virtualTableChunkList.AppendRow(mutableRow.ToRow()) + return true, nil + }) + if err != nil { + return err + } + } + // no more data. + if e.virtualTableChunkIdx >= e.virtualTableChunkList.NumChunks() { + return nil + } + virtualTableChunk := e.virtualTableChunkList.GetChunk(e.virtualTableChunkIdx) + e.virtualTableChunkIdx++ + chk.SwapColumns(virtualTableChunk) + return nil +} + +// nextHandle gets the unique handle for next row. +func (e *TableScanExec) nextHandle() (handle int64, found bool, err error) { + for { + handle, found, err = e.t.Seek(e.ctx, e.seekHandle) + if err != nil || !found { + return 0, false, err + } + return handle, true, nil + } +} + +func (e *TableScanExec) getRow(handle int64) ([]types.Datum, error) { + columns := make([]*table.Column, e.schema.Len()) + for i, v := range e.columns { + columns[i] = table.ToColumn(v) + } + row, err := e.t.RowWithCols(e.ctx, handle, columns) + if err != nil { + return nil, err + } + + return row, nil +} + +// Open implements the Executor Open interface. +func (e *TableScanExec) Open(ctx context.Context) error { + e.iter = nil + e.virtualTableChunkList = nil + return nil +} + +func extractStmtHintsFromStmtNode(stmtNode ast.StmtNode) []*ast.TableOptimizerHint { + switch x := stmtNode.(type) { + case *ast.SelectStmt: + return x.TableHints + case *ast.DeleteStmt: + return nil + // TODO: support hint for InsertStmt + case *ast.ExplainStmt: + return extractStmtHintsFromStmtNode(x.Stmt) + default: + return nil + } +} + +func handleStmtHints(hints []*ast.TableOptimizerHint) (stmtHints stmtctx.StmtHints, warns []error) { + if len(hints) == 0 { + return + } + var memoryQuotaHint, useToJAHint *ast.TableOptimizerHint + var memoryQuotaHintCnt, useToJAHintCnt, readReplicaHintCnt int + for _, hint := range hints { + switch hint.HintName.L { + case "memory_quota": + memoryQuotaHint = hint + memoryQuotaHintCnt++ + case "use_toja": + useToJAHint = hint + useToJAHintCnt++ + case "read_consistent_replica": + readReplicaHintCnt++ + } + } + // Handle MEMORY_QUOTA + if memoryQuotaHintCnt != 0 { + if memoryQuotaHintCnt > 1 { + warn := errors.New("There are multiple MEMORY_QUOTA hints, only the last one will take effect") + warns = append(warns, warn) + } + // Executor use MemoryQuota <= 0 to indicate no memory limit, here use < 0 to handle hint syntax error. + if memoryQuotaHint.MemoryQuota < 0 { + warn := errors.New("The use of MEMORY_QUOTA hint is invalid, valid usage: MEMORY_QUOTA(10 MB) or MEMORY_QUOTA(10 GB)") + warns = append(warns, warn) + } else { + stmtHints.HasMemQuotaHint = true + stmtHints.MemQuotaQuery = memoryQuotaHint.MemoryQuota + if memoryQuotaHint.MemoryQuota == 0 { + warn := errors.New("Setting the MEMORY_QUOTA to 0 means no memory limit") + warns = append(warns, warn) + } + } + } + // Handle USE_TOJA + if useToJAHintCnt != 0 { + if useToJAHintCnt > 1 { + warn := errors.New("There are multiple USE_TOJA hints, only the last one will take effect") + warns = append(warns, warn) + } + stmtHints.HasAllowInSubqToJoinAndAggHint = true + stmtHints.AllowInSubqToJoinAndAgg = useToJAHint.HintFlag + } + // Handle READ_CONSISTENT_REPLICA + if readReplicaHintCnt != 0 { + if readReplicaHintCnt > 1 { + warn := errors.New("There are multiple READ_CONSISTENT_REPLICA hints, only the last one will take effect") + warns = append(warns, warn) + } + stmtHints.HasReplicaReadHint = true + stmtHints.ReplicaRead = byte(kv.ReplicaReadFollower) + } + return +} + +// ResetContextOfStmt resets the StmtContext and session variables. +// Before every execution, we must clear statement context. +func ResetContextOfStmt(ctx sessionctx.Context, s ast.StmtNode) (err error) { + hints := extractStmtHintsFromStmtNode(s) + stmtHints, hintWarns := handleStmtHints(hints) + vars := ctx.GetSessionVars() + sc := &stmtctx.StatementContext{ + StmtHints: stmtHints, + TimeZone: vars.Location(), + } + if explainStmt, ok := s.(*ast.ExplainStmt); ok { + sc.InExplainStmt = true + sc.CastStrToIntStrict = true + s = explainStmt.Stmt + } + // TODO: Many same bool variables here. + // We should set only two variables ( + // IgnoreErr and StrictSQLMode) to avoid setting the same bool variables and + // pushing them down to TiKV as flags. + switch stmt := s.(type) { + case *ast.DeleteStmt: + sc.InDeleteStmt = true + sc.BadNullAsWarning = !vars.StrictSQLMode + sc.TruncateAsWarning = !vars.StrictSQLMode + sc.DividedByZeroAsWarning = !vars.StrictSQLMode + sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode() + sc.IgnoreZeroInDate = !vars.StrictSQLMode || sc.AllowInvalidDate + case *ast.InsertStmt: + sc.InInsertStmt = true + // For insert statement (not for update statement), disabling the StrictSQLMode + // should make TruncateAsWarning and DividedByZeroAsWarning. + sc.TruncateAsWarning = !vars.StrictSQLMode + sc.DividedByZeroAsWarning = !vars.StrictSQLMode + sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode() + sc.IgnoreZeroInDate = !vars.StrictSQLMode || sc.AllowInvalidDate + case *ast.CreateTableStmt, *ast.AlterTableStmt: + // Make sure the sql_mode is strict when checking column default value. + case *ast.SelectStmt: + sc.InSelectStmt = true + + // see https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html#sql-mode-strict + // said "For statements such as SELECT that do not change data, invalid values + // generate a warning in strict mode, not an error." + // and https://dev.mysql.com/doc/refman/5.7/en/out-of-range-and-overflow.html + sc.OverflowAsWarning = true + + // Return warning for truncate error in selection. + sc.TruncateAsWarning = true + sc.IgnoreZeroInDate = true + sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode() + if opts := stmt.SelectStmtOpts; opts != nil { + sc.NotFillCache = !opts.SQLCache + } + sc.PadCharToFullLength = ctx.GetSessionVars().SQLMode.HasPadCharToFullLengthMode() + sc.CastStrToIntStrict = true + case *ast.ShowStmt: + sc.IgnoreTruncate = true + sc.IgnoreZeroInDate = true + sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode() + if stmt.Tp == ast.ShowWarnings || stmt.Tp == ast.ShowErrors { + sc.InShowWarning = true + sc.SetWarnings(vars.StmtCtx.GetWarnings()) + } + default: + sc.IgnoreTruncate = true + sc.IgnoreZeroInDate = true + sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode() + } + if vars.StmtCtx.LastInsertID > 0 { + sc.PrevLastInsertID = vars.StmtCtx.LastInsertID + } else { + sc.PrevLastInsertID = vars.StmtCtx.PrevLastInsertID + } + sc.PrevAffectedRows = 0 + if vars.StmtCtx.InDeleteStmt || vars.StmtCtx.InInsertStmt { + sc.PrevAffectedRows = int64(vars.StmtCtx.AffectedRows()) + } else if vars.StmtCtx.InSelectStmt { + sc.PrevAffectedRows = -1 + } + errCount, warnCount := vars.StmtCtx.NumErrorWarnings() + vars.SysErrorCount = errCount + vars.SysWarningCount = warnCount + vars.StmtCtx = sc + for _, warn := range hintWarns { + vars.StmtCtx.AppendWarning(warn) + } + return +} diff --git a/executor/executor_test.go b/executor/executor_test.go new file mode 100644 index 0000000..409baa6 --- /dev/null +++ b/executor/executor_test.go @@ -0,0 +1,1249 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor_test + +import ( + "context" + "flag" + "fmt" + . "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/config" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/executor" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta/autoid" + "github.com/pingcap/tidb/parser" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/store/mockstore/mocktikv" + "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/mock" + "github.com/pingcap/tidb/util/testkit" + "github.com/pingcap/tidb/util/testleak" + "math" + "os" + "strconv" + "testing" +) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + *CustomParallelSuiteFlag = true + logLevel := os.Getenv("log_level") + logutil.InitLogger(logutil.NewLogConfig(logLevel, logutil.DefaultLogFormat, logutil.EmptyFileLogConfig, false)) + autoid.SetStep(5000) + + old := config.GetGlobalConfig() + new := *old + config.StoreGlobalConfig(&new) + + testleak.BeforeTest() + TestingT(t) + testleak.AfterTestT(t)() +} + +var _ = Suite(&testSuite{&baseTestSuite{}}) +var _ = Suite(&testSuiteP1{&baseTestSuite{}}) +var _ = Suite(&testSuiteP2{&baseTestSuite{}}) +var _ = Suite(&testSuite1{}) +var _ = Suite(&testSuite2{&baseTestSuite{}}) +var _ = Suite(&testSuite3{&baseTestSuite{}}) +var _ = Suite(&testSuite4{&baseTestSuite{}}) +var _ = Suite(&testSuite5{&baseTestSuite{}}) +var _ = Suite(&testSuiteJoin1{&baseTestSuite{}}) +var _ = Suite(&testSuiteJoin2{&baseTestSuite{}}) +var _ = Suite(&testSuiteJoin3{&baseTestSuite{}}) +var _ = Suite(&testSuiteAgg{baseTestSuite: &baseTestSuite{}}) +var _ = Suite(&testSuite6{&baseTestSuite{}}) +var _ = Suite(&testSuite7{&baseTestSuite{}}) +var _ = Suite(&testSuite8{&baseTestSuite{}}) +var _ = Suite(&testBypassSuite{}) + +type testSuite struct{ *baseTestSuite } +type testSuiteP1 struct{ *baseTestSuite } +type testSuiteP2 struct{ *baseTestSuite } + +type baseTestSuite struct { + cluster *mocktikv.Cluster + mvccStore mocktikv.MVCCStore + store kv.Storage + domain *domain.Domain + *parser.Parser + ctx *mock.Context +} + +var mockTikv = flag.Bool("mockTikv", true, "use mock tikv store in executor test") + +func (s *baseTestSuite) SetUpSuite(c *C) { + s.Parser = parser.New() + flag.Lookup("mockTikv") + useMockTikv := *mockTikv + if useMockTikv { + s.cluster = mocktikv.NewCluster() + mocktikv.BootstrapWithSingleStore(s.cluster) + s.mvccStore = mocktikv.MustNewMVCCStore() + store, err := mockstore.NewMockTikvStore( + mockstore.WithCluster(s.cluster), + mockstore.WithMVCCStore(s.mvccStore), + ) + c.Assert(err, IsNil) + s.store = store + session.SetSchemaLease(0) + session.DisableStats4Test() + } + d, err := session.BootstrapSession(s.store) + c.Assert(err, IsNil) + s.domain = d +} + +func (s *baseTestSuite) TearDownSuite(c *C) { + s.domain.Close() + s.store.Close() +} + +func (s *testSuite) TearDownTest(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + r := tk.MustQuery("show tables") + for _, tb := range r.Rows() { + tableName := tb[0] + tk.MustExec(fmt.Sprintf("drop table %v", tableName)) + } +} + +func (s *testSuiteP1) TestChange(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int)") + tk.MustExec("alter table t change a b int") + tk.MustExec("alter table t change b c bigint") + c.Assert(tk.ExecToErr("alter table t change c d varchar(100)"), NotNil) +} + +func (s *baseTestSuite) fillData(tk *testkit.TestKit, table string) { + tk.MustExec("use test") + tk.MustExec(fmt.Sprintf("create table %s(id int not null default 1, name varchar(255), PRIMARY KEY(id));", table)) + + // insert data + tk.MustExec(fmt.Sprintf("insert INTO %s VALUES (1, \"hello\");", table)) + tk.CheckExecResult(1, 0) + tk.MustExec(fmt.Sprintf("insert into %s values (2, \"hello\");", table)) + tk.CheckExecResult(1, 0) +} + +func (s *testSuiteP1) TestSelectWithoutFrom(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + + r := tk.MustQuery("select 1 + 2*3;") + r.Check(testkit.Rows("7")) + + r = tk.MustQuery(`select _utf8"string";`) + r.Check(testkit.Rows("string")) + + r = tk.MustQuery("select 1 order by 1;") + r.Check(testkit.Rows("1")) +} + +// TestSelectBackslashN Issue 3685. +func (s *testSuiteP1) TestSelectBackslashN(c *C) { + tk := testkit.NewTestKit(c, s.store) + + sql := `select \N;` + r := tk.MustQuery(sql) + r.Check(testkit.Rows("")) + rs, err := tk.Exec(sql) + c.Check(err, IsNil) + fields := rs.Fields() + c.Check(len(fields), Equals, 1) + c.Check(fields[0].Column.Name.O, Equals, "NULL") + + sql = `select "\N";` + r = tk.MustQuery(sql) + r.Check(testkit.Rows("N")) + rs, err = tk.Exec(sql) + c.Check(err, IsNil) + fields = rs.Fields() + c.Check(len(fields), Equals, 1) + c.Check(fields[0].Column.Name.O, Equals, `N`) + + tk.MustExec("use test;") + tk.MustExec("create table test (`\\N` int);") + tk.MustExec("insert into test values (1);") + tk.CheckExecResult(1, 0) + sql = "select * from test;" + r = tk.MustQuery(sql) + r.Check(testkit.Rows("1")) + rs, err = tk.Exec(sql) + c.Check(err, IsNil) + fields = rs.Fields() + c.Check(len(fields), Equals, 1) + c.Check(fields[0].Column.Name.O, Equals, `\N`) + + sql = `select \N from test;` + r = tk.MustQuery(sql) + r.Check(testkit.Rows("")) + rs, err = tk.Exec(sql) + c.Check(err, IsNil) + fields = rs.Fields() + c.Check(err, IsNil) + c.Check(len(fields), Equals, 1) + c.Check(fields[0].Column.Name.O, Equals, `NULL`) + + sql = `select (\N) from test;` + r = tk.MustQuery(sql) + r.Check(testkit.Rows("")) + rs, err = tk.Exec(sql) + c.Check(err, IsNil) + fields = rs.Fields() + c.Check(len(fields), Equals, 1) + c.Check(fields[0].Column.Name.O, Equals, `NULL`) + + sql = "select `\\N` from test;" + r = tk.MustQuery(sql) + r.Check(testkit.Rows("1")) + rs, err = tk.Exec(sql) + c.Check(err, IsNil) + fields = rs.Fields() + c.Check(len(fields), Equals, 1) + c.Check(fields[0].Column.Name.O, Equals, `\N`) + + sql = "select (`\\N`) from test;" + r = tk.MustQuery(sql) + r.Check(testkit.Rows("1")) + rs, err = tk.Exec(sql) + c.Check(err, IsNil) + fields = rs.Fields() + c.Check(len(fields), Equals, 1) + c.Check(fields[0].Column.Name.O, Equals, `\N`) + + sql = `select '\N' from test;` + r = tk.MustQuery(sql) + r.Check(testkit.Rows("N")) + rs, err = tk.Exec(sql) + c.Check(err, IsNil) + fields = rs.Fields() + c.Check(len(fields), Equals, 1) + c.Check(fields[0].Column.Name.O, Equals, `N`) + + sql = `select ('\N') from test;` + r = tk.MustQuery(sql) + r.Check(testkit.Rows("N")) + rs, err = tk.Exec(sql) + c.Check(err, IsNil) + fields = rs.Fields() + c.Check(len(fields), Equals, 1) + c.Check(fields[0].Column.Name.O, Equals, `N`) +} + +// TestSelectNull Issue #4053. +func (s *testSuiteP1) TestSelectNull(c *C) { + tk := testkit.NewTestKit(c, s.store) + + sql := `select nUll;` + r := tk.MustQuery(sql) + r.Check(testkit.Rows("")) + rs, err := tk.Exec(sql) + c.Check(err, IsNil) + fields := rs.Fields() + c.Check(len(fields), Equals, 1) + c.Check(fields[0].Column.Name.O, Equals, `NULL`) + + sql = `select (null);` + r = tk.MustQuery(sql) + r.Check(testkit.Rows("")) + rs, err = tk.Exec(sql) + c.Check(err, IsNil) + fields = rs.Fields() + c.Check(len(fields), Equals, 1) + c.Check(fields[0].Column.Name.O, Equals, `NULL`) + + sql = `select null+NULL;` + r = tk.MustQuery(sql) + r.Check(testkit.Rows("")) + rs, err = tk.Exec(sql) + c.Check(err, IsNil) + fields = rs.Fields() + c.Check(err, IsNil) + c.Check(len(fields), Equals, 1) + c.Check(fields[0].Column.Name.O, Equals, `null+NULL`) +} + +// TestSelectStringLiteral Issue #3686. +func (s *testSuiteP1) TestSelectStringLiteral(c *C) { + tk := testkit.NewTestKit(c, s.store) + + sql := `select 'abc';` + r := tk.MustQuery(sql) + r.Check(testkit.Rows("abc")) + rs, err := tk.Exec(sql) + c.Check(err, IsNil) + fields := rs.Fields() + c.Check(len(fields), Equals, 1) + c.Check(fields[0].Column.Name.O, Equals, `abc`) + + sql = `select (('abc'));` + r = tk.MustQuery(sql) + r.Check(testkit.Rows("abc")) + rs, err = tk.Exec(sql) + c.Check(err, IsNil) + fields = rs.Fields() + c.Check(len(fields), Equals, 1) + c.Check(fields[0].Column.Name.O, Equals, `abc`) + + sql = `select 'abc'+'def';` + r = tk.MustQuery(sql) + r.Check(testkit.Rows("0")) + rs, err = tk.Exec(sql) + c.Check(err, IsNil) + fields = rs.Fields() + c.Check(len(fields), Equals, 1) + c.Check(fields[0].Column.Name.O, Equals, `'abc'+'def'`) + + // Below checks whether leading invalid chars are trimmed. + sql = "select '\n';" + r = tk.MustQuery(sql) + r.Check(testkit.Rows("\n")) + rs, err = tk.Exec(sql) + c.Check(err, IsNil) + fields = rs.Fields() + c.Check(len(fields), Equals, 1) + c.Check(fields[0].Column.Name.O, Equals, "") + + sql = "select '\t col';" // Lowercased letter is a valid char. + rs, err = tk.Exec(sql) + c.Check(err, IsNil) + fields = rs.Fields() + c.Check(len(fields), Equals, 1) + c.Check(fields[0].Column.Name.O, Equals, "col") + + sql = "select '\t Col';" // Uppercased letter is a valid char. + rs, err = tk.Exec(sql) + c.Check(err, IsNil) + fields = rs.Fields() + c.Check(len(fields), Equals, 1) + c.Check(fields[0].Column.Name.O, Equals, "Col") + + sql = "select '\n\t 中文 col';" // Chinese char is a valid char. + rs, err = tk.Exec(sql) + c.Check(err, IsNil) + fields = rs.Fields() + c.Check(len(fields), Equals, 1) + c.Check(fields[0].Column.Name.O, Equals, "中文 col") + + sql = "select ' \r\n .col';" // Punctuation is a valid char. + rs, err = tk.Exec(sql) + c.Check(err, IsNil) + fields = rs.Fields() + c.Check(len(fields), Equals, 1) + c.Check(fields[0].Column.Name.O, Equals, ".col") + + sql = "select ' 😆col';" // Emoji is a valid char. + rs, err = tk.Exec(sql) + c.Check(err, IsNil) + fields = rs.Fields() + c.Check(len(fields), Equals, 1) + c.Check(fields[0].Column.Name.O, Equals, "😆col") + + // Below checks whether trailing invalid chars are preserved. + sql = `select 'abc ';` + rs, err = tk.Exec(sql) + c.Check(err, IsNil) + fields = rs.Fields() + c.Check(len(fields), Equals, 1) + c.Check(fields[0].Column.Name.O, Equals, "abc ") + + sql = `select ' abc 123 ';` + rs, err = tk.Exec(sql) + c.Check(err, IsNil) + fields = rs.Fields() + c.Check(len(fields), Equals, 1) + c.Check(fields[0].Column.Name.O, Equals, "abc 123 ") + + // Issue #4239. + sql = `select 'a' ' ' 'string';` + r = tk.MustQuery(sql) + r.Check(testkit.Rows("a string")) + rs, err = tk.Exec(sql) + c.Check(err, IsNil) + fields = rs.Fields() + c.Check(len(fields), Equals, 1) + c.Check(fields[0].Column.Name.O, Equals, "a") + + sql = `select 'a' " " "string";` + r = tk.MustQuery(sql) + r.Check(testkit.Rows("a string")) + rs, err = tk.Exec(sql) + c.Check(err, IsNil) + fields = rs.Fields() + c.Check(len(fields), Equals, 1) + c.Check(fields[0].Column.Name.O, Equals, "a") + + sql = `select 'string' 'string';` + r = tk.MustQuery(sql) + r.Check(testkit.Rows("stringstring")) + rs, err = tk.Exec(sql) + c.Check(err, IsNil) + fields = rs.Fields() + c.Check(len(fields), Equals, 1) + c.Check(fields[0].Column.Name.O, Equals, "string") + + sql = `select "ss" "a";` + r = tk.MustQuery(sql) + r.Check(testkit.Rows("ssa")) + rs, err = tk.Exec(sql) + c.Check(err, IsNil) + fields = rs.Fields() + c.Check(len(fields), Equals, 1) + c.Check(fields[0].Column.Name.O, Equals, "ss") + + sql = `select "ss" "a" "b";` + r = tk.MustQuery(sql) + r.Check(testkit.Rows("ssab")) + rs, err = tk.Exec(sql) + c.Check(err, IsNil) + fields = rs.Fields() + c.Check(len(fields), Equals, 1) + c.Check(fields[0].Column.Name.O, Equals, "ss") + + sql = `select "ss" "a" ' ' "b";` + r = tk.MustQuery(sql) + r.Check(testkit.Rows("ssa b")) + rs, err = tk.Exec(sql) + c.Check(err, IsNil) + fields = rs.Fields() + c.Check(len(fields), Equals, 1) + c.Check(fields[0].Column.Name.O, Equals, "ss") + + sql = `select "ss" "a" ' ' "b" ' ' "d";` + r = tk.MustQuery(sql) + r.Check(testkit.Rows("ssa b d")) + rs, err = tk.Exec(sql) + c.Check(err, IsNil) + fields = rs.Fields() + c.Check(len(fields), Equals, 1) + c.Check(fields[0].Column.Name.O, Equals, "ss") +} + +func (s *testSuiteP1) TestSelectLimit(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + s.fillData(tk, "select_limit") + + tk.MustExec("insert INTO select_limit VALUES (3, \"hello\");") + tk.CheckExecResult(1, 0) + tk.MustExec("insert INTO select_limit VALUES (4, \"hello\");") + tk.CheckExecResult(1, 0) + + r := tk.MustQuery("select * from select_limit limit 1;") + r.Check(testkit.Rows("1 hello")) + + r = tk.MustQuery("select id from (select * from select_limit limit 1) k where id != 1;") + r.Check(testkit.Rows()) + + r = tk.MustQuery("select * from select_limit limit 18446744073709551615 offset 0;") + r.Check(testkit.Rows("1 hello", "2 hello", "3 hello", "4 hello")) + + r = tk.MustQuery("select * from select_limit limit 18446744073709551615 offset 1;") + r.Check(testkit.Rows("2 hello", "3 hello", "4 hello")) + + r = tk.MustQuery("select * from select_limit limit 18446744073709551615 offset 3;") + r.Check(testkit.Rows("4 hello")) + + err := tk.ExecToErr("select * from select_limit limit 18446744073709551616 offset 3;") + c.Assert(err, NotNil) +} + +func (s *testSuiteP1) TestSelectOrderBy(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + s.fillData(tk, "select_order_test") + + // Test star field + r := tk.MustQuery("select * from select_order_test where id = 1 order by id limit 1 offset 0;") + r.Check(testkit.Rows("1 hello")) + + r = tk.MustQuery("select id from select_order_test order by id desc limit 1 ") + r.Check(testkit.Rows("2")) + + r = tk.MustQuery("select id from select_order_test order by id + 1 desc limit 1 ") + r.Check(testkit.Rows("2")) + + // Test limit + r = tk.MustQuery("select * from select_order_test order by name, id limit 1 offset 0;") + r.Check(testkit.Rows("1 hello")) + + // Test limit + r = tk.MustQuery("select id as c1, name from select_order_test order by 2, id limit 1 offset 0;") + r.Check(testkit.Rows("1 hello")) + + // Test limit overflow + r = tk.MustQuery("select * from select_order_test order by name, id limit 100 offset 0;") + r.Check(testkit.Rows("1 hello", "2 hello")) + + // Test offset overflow + r = tk.MustQuery("select * from select_order_test order by name, id limit 1 offset 100;") + r.Check(testkit.Rows()) + + // Test limit exceeds int range. + r = tk.MustQuery("select id from select_order_test order by name, id limit 18446744073709551615;") + r.Check(testkit.Rows("1", "2")) + + // Test multiple field + r = tk.MustQuery("select id, name from select_order_test where id = 1 group by id, name limit 1 offset 0;") + r.Check(testkit.Rows("1 hello")) + + // Test limit + order by + for i := 3; i <= 10; i += 1 { + tk.MustExec(fmt.Sprintf("insert INTO select_order_test VALUES (%d, \"zz\");", i)) + } + tk.MustExec("insert INTO select_order_test VALUES (10086, \"hi\");") + for i := 11; i <= 20; i += 1 { + tk.MustExec(fmt.Sprintf("insert INTO select_order_test VALUES (%d, \"hh\");", i)) + } + for i := 21; i <= 30; i += 1 { + tk.MustExec(fmt.Sprintf("insert INTO select_order_test VALUES (%d, \"zz\");", i)) + } + tk.MustExec("insert INTO select_order_test VALUES (1501, \"aa\");") + r = tk.MustQuery("select * from select_order_test order by name, id limit 1 offset 3;") + r.Check(testkit.Rows("11 hh")) + tk.MustExec("drop table select_order_test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (c int, d int)") + tk.MustExec("insert t values (1, 1)") + tk.MustExec("insert t values (1, 2)") + tk.MustExec("insert t values (1, 3)") + r = tk.MustQuery("select 1-d as d from t order by d;") + r.Check(testkit.Rows("-2", "-1", "0")) + r = tk.MustQuery("select 1-d as d from t order by d + 1;") + r.Check(testkit.Rows("0", "-1", "-2")) + r = tk.MustQuery("select t.d from t order by d;") + r.Check(testkit.Rows("1", "2", "3")) + + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (a int, b int, c int)") + tk.MustExec("insert t values (1, 2, 3)") + r = tk.MustQuery("select b from (select a,b from t order by a,c) t") + r.Check(testkit.Rows("2")) + r = tk.MustQuery("select b from (select a,b from t order by a,c limit 1) t") + r.Check(testkit.Rows("2")) + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int, b int, index idx(a))") + tk.MustExec("insert into t values(1, 1), (2, 2)") + tk.MustQuery("select * from t where 1 order by b").Check(testkit.Rows("1 1", "2 2")) + tk.MustQuery("select * from t where a between 1 and 2 order by a desc").Check(testkit.Rows("2 2", "1 1")) + + // Test double read and topN is pushed down to first read plannercore. + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int primary key, b int, c int, index idx(b))") + tk.MustExec("insert into t values(1, 3, 1)") + tk.MustExec("insert into t values(2, 2, 2)") + tk.MustExec("insert into t values(3, 1, 3)") + tk.MustQuery("select * from t use index(idx) order by a desc limit 1").Check(testkit.Rows("3 1 3")) + + // Test double read which needs to keep order. + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int, b int, key b (b))") + tk.Se.GetSessionVars().IndexLookupSize = 3 + for i := 0; i < 10; i++ { + tk.MustExec(fmt.Sprintf("insert into t values(%d, %d)", i, 10-i)) + } + tk.MustQuery("select a from t use index(b) order by b").Check(testkit.Rows("9", "8", "7", "6", "5", "4", "3", "2", "1", "0")) +} + +func (s *testSuiteP1) TestOrderBy(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (c1 int, c2 int, c3 varchar(20))") + tk.MustExec("insert into t values (1, 2, 'abc'), (2, 1, 'bcd')") + + // Fix issue https://github.com/pingcap/tidb/issues/337 + tk.MustQuery("select c1 as a, c1 as b from t order by c1").Check(testkit.Rows("1 1", "2 2")) + + tk.MustQuery("select c1 as a, t.c1 as a from t order by a desc").Check(testkit.Rows("2 2", "1 1")) + tk.MustQuery("select c1 as c2 from t order by c2").Check(testkit.Rows("1", "2")) + tk.MustQuery("select sum(c1) from t order by sum(c1)").Check(testkit.Rows("3")) + tk.MustQuery("select c1 as c2 from t order by c2 + 1").Check(testkit.Rows("2", "1")) +} + +func (s *testSuiteP1) TestSelectErrorRow(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + + err := tk.ExecToErr("select row(1, 1) from test") + c.Assert(err, NotNil) + + err = tk.ExecToErr("select * from test group by row(1, 1);") + c.Assert(err, NotNil) + + err = tk.ExecToErr("select * from test order by row(1, 1);") + c.Assert(err, NotNil) + + err = tk.ExecToErr("select * from test having row(1, 1);") + c.Assert(err, NotNil) + + err = tk.ExecToErr("select (select 1, 1) from test;") + c.Assert(err, NotNil) + + err = tk.ExecToErr("select * from test group by (select 1, 1);") + c.Assert(err, NotNil) + + err = tk.ExecToErr("select * from test order by (select 1, 1);") + c.Assert(err, NotNil) + + err = tk.ExecToErr("select * from test having (select 1, 1);") + c.Assert(err, NotNil) +} + +func (s *testSuiteP1) TestIssue5055(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec(`drop table if exists t1, t2`) + tk.MustExec(`create table t1 (a int);`) + tk.MustExec(`create table t2 (a int);`) + tk.MustExec(`insert into t1 values(1);`) + tk.MustExec(`insert into t2 values(1);`) + result := tk.MustQuery("select tbl1.* from (select t1.a, 1 from t1) tbl1 left join t2 tbl2 on tbl1.a = tbl2.a order by tbl1.a desc limit 1;") + result.Check(testkit.Rows("1 1")) +} + +func (s *testSuiteP1) TestIn(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec(`drop table if exists t`) + tk.MustExec(`create table t (c1 int primary key, c2 int, key c (c2));`) + for i := 0; i <= 200; i++ { + tk.MustExec(fmt.Sprintf("insert t values(%d, %d)", i, i)) + } + queryStr := `select c2 from t where c1 in ('7', '10', '112', '111', '98', '106', '100', '9', '18', '17') order by c2` + r := tk.MustQuery(queryStr) + r.Check(testkit.Rows("7", "9", "10", "17", "18", "98", "100", "106", "111", "112")) + + queryStr = `select c2 from t where c1 in ('7a')` + tk.MustQuery(queryStr).Check(testkit.Rows("7")) +} + +func (s *testSuiteP1) TestTablePKisHandleScan(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (a int PRIMARY KEY AUTO_INCREMENT)") + tk.MustExec("insert t values (),()") + tk.MustExec("insert t values (-100),(0)") + + tests := []struct { + sql string + result [][]interface{} + }{ + { + "select * from t", + testkit.Rows("-100", "1", "2", "3"), + }, + { + "select * from t where a = 1", + testkit.Rows("1"), + }, + { + "select * from t where a != 1", + testkit.Rows("-100", "2", "3"), + }, + { + "select * from t where a >= '1.1'", + testkit.Rows("2", "3"), + }, + { + "select * from t where a < '1.1'", + testkit.Rows("-100", "1"), + }, + { + "select * from t where a > '-100.1' and a < 2", + testkit.Rows("-100", "1"), + }, + { + "select * from t where a is null", + testkit.Rows(), + }, + { + "select * from t where a in (1, 2)", + testkit.Rows("1", "2"), + }, + { + "select * from t where a between 1 and 2", + testkit.Rows("1", "2"), + }, + } + + for _, tt := range tests { + result := tk.MustQuery(tt.sql) + result.Check(tt.result) + } +} + +func (s *testSuite8) TestIndexScan(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (a int unique)") + tk.MustExec("insert t values (-1), (2), (3), (5), (6), (7), (8), (9)") + result := tk.MustQuery("select a from t where a < 0 or (a >= 2.1 and a < 5.1) or ( a > 5.9 and a <= 7.9) or a > '8.1'") + result.Check(testkit.Rows("-1", "3", "5", "6", "7", "9")) + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (a int unique)") + tk.MustExec("insert t values (0)") + result = tk.MustQuery("select NULL from t ") + result.Check(testkit.Rows("")) + // test for double read + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (a int unique, b int)") + tk.MustExec("insert t values (5, 0)") + tk.MustExec("insert t values (4, 0)") + tk.MustExec("insert t values (3, 0)") + tk.MustExec("insert t values (2, 0)") + tk.MustExec("insert t values (1, 0)") + tk.MustExec("insert t values (0, 0)") + result = tk.MustQuery("select * from t order by a limit 3") + result.Check(testkit.Rows("0 0", "1 0", "2 0")) + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (a int unique, b int)") + tk.MustExec("insert t values (0, 1)") + tk.MustExec("insert t values (1, 2)") + tk.MustExec("insert t values (2, 1)") + tk.MustExec("insert t values (3, 2)") + tk.MustExec("insert t values (4, 1)") + tk.MustExec("insert t values (5, 2)") + result = tk.MustQuery("select * from t where a < 5 and b = 1 limit 2") + result.Check(testkit.Rows("0 1", "2 1")) + tk.MustExec("drop table if exists tab1") + tk.MustExec("CREATE TABLE tab1(pk INTEGER PRIMARY KEY, col0 INTEGER, col1 FLOAT, col3 INTEGER, col4 FLOAT)") + tk.MustExec("CREATE INDEX idx_tab1_0 on tab1 (col0)") + tk.MustExec("CREATE INDEX idx_tab1_1 on tab1 (col1)") + tk.MustExec("CREATE INDEX idx_tab1_3 on tab1 (col3)") + tk.MustExec("CREATE INDEX idx_tab1_4 on tab1 (col4)") + tk.MustExec("INSERT INTO tab1 VALUES(1,37,20.85,30,10.69)") + result = tk.MustQuery("SELECT pk FROM tab1 WHERE ((col3 <= 6 OR col3 < 29 AND (col0 < 41)) OR col3 > 42) AND col1 >= 96.1 AND col3 = 30 AND col3 > 17 AND (col0 BETWEEN 36 AND 42)") + result.Check(testkit.Rows()) + tk.MustExec("drop table if exists tab1") + tk.MustExec("CREATE TABLE tab1(pk INTEGER PRIMARY KEY, a INTEGER, b INTEGER)") + tk.MustExec("CREATE INDEX idx_tab1_0 on tab1 (a)") + tk.MustExec("INSERT INTO tab1 VALUES(1,1,1)") + tk.MustExec("INSERT INTO tab1 VALUES(2,2,1)") + tk.MustExec("INSERT INTO tab1 VALUES(3,1,2)") + tk.MustExec("INSERT INTO tab1 VALUES(4,2,2)") + result = tk.MustQuery("SELECT * FROM tab1 WHERE pk <= 3 AND a = 1") + result.Check(testkit.Rows("1 1 1", "3 1 2")) + result = tk.MustQuery("SELECT * FROM tab1 WHERE pk <= 4 AND a = 1 AND b = 2") + result.Check(testkit.Rows("3 1 2")) + tk.MustExec("CREATE INDEX idx_tab1_1 on tab1 (b, a)") + result = tk.MustQuery("SELECT pk FROM tab1 WHERE b > 1") + result.Check(testkit.Rows("3", "4")) + + tk.MustExec("drop table if exists t") + tk.MustExec("CREATE TABLE t (a varchar(3), index(a))") + tk.MustExec("insert t values('aaa'), ('aab')") + result = tk.MustQuery("select * from t where a >= 'aaaa' and a < 'aabb'") + result.Check(testkit.Rows("aab")) + + tk.MustExec("drop table if exists t") + tk.MustExec("CREATE TABLE t (a int primary key, b int, c int, index(c))") + tk.MustExec("insert t values(1, 1, 1), (2, 2, 2), (4, 4, 4), (3, 3, 3), (5, 5, 5)") + // Test for double read and top n. + result = tk.MustQuery("select a from t where c >= 2 order by b desc limit 1") + result.Check(testkit.Rows("5")) + + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a varchar(50) primary key, b int, c int, index idx(b))") + tk.MustExec("insert into t values('aa', 1, 1)") + tk.MustQuery("select * from t use index(idx) where a > 'a'").Check(testkit.Rows("aa 1 1")) + + // fix issue9636 + tk.MustExec("drop table if exists t") + tk.MustExec("CREATE TABLE `t` (a int, KEY (a))") + result = tk.MustQuery(`SELECT * FROM (SELECT * FROM (SELECT a as d FROM t WHERE a IN ('100')) AS x WHERE x.d < "123" ) tmp_count`) + result.Check(testkit.Rows()) +} + +func (s *testSuiteP1) TestIndexReverseOrder(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (a int primary key auto_increment, b int, index idx (b))") + tk.MustExec("insert t (b) values (0), (1), (2), (3), (4), (5), (6), (7), (8), (9)") + result := tk.MustQuery("select b from t order by b desc") + result.Check(testkit.Rows("9", "8", "7", "6", "5", "4", "3", "2", "1", "0")) + result = tk.MustQuery("select b from t where b <3 or (b >=6 and b < 8) order by b desc") + result.Check(testkit.Rows("7", "6", "2", "1", "0")) + + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (a int, b int, index idx (b, a))") + tk.MustExec("insert t values (0, 2), (1, 2), (2, 2), (0, 1), (1, 1), (2, 1), (0, 0), (1, 0), (2, 0)") + result = tk.MustQuery("select b, a from t order by b, a desc") + result.Check(testkit.Rows("0 2", "0 1", "0 0", "1 2", "1 1", "1 0", "2 2", "2 1", "2 0")) +} + +func (s *testSuiteP1) TestTableReverseOrder(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (a int primary key auto_increment, b int)") + tk.MustExec("insert t (b) values (1), (2), (3), (4), (5), (6), (7), (8), (9)") + result := tk.MustQuery("select b from t order by a desc") + result.Check(testkit.Rows("9", "8", "7", "6", "5", "4", "3", "2", "1")) + result = tk.MustQuery("select a from t where a <3 or (a >=6 and a < 8) order by a desc") + result.Check(testkit.Rows("7", "6", "2", "1")) +} + +func (s *testSuiteP1) TestUnsignedPKColumn(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (a int unsigned primary key, b int, c int, key idx_ba (b, c, a));") + tk.MustExec("insert t values (1, 1, 1)") + result := tk.MustQuery("select * from t;") + result.Check(testkit.Rows("1 1 1")) +} + +func (s *testSuiteP2) TestTableDual(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + result := tk.MustQuery("Select 1") + result.Check(testkit.Rows("1")) + result = tk.MustQuery("Select 1 from dual") + result.Check(testkit.Rows("1")) + result = tk.MustQuery("Select count(*) from dual") + result.Check(testkit.Rows("1")) + result = tk.MustQuery("Select 1 from dual where 1") + result.Check(testkit.Rows("1")) + + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int primary key)") + tk.MustQuery("select t1.* from t t1, t t2 where t1.a=t2.a and 1=0").Check(testkit.Rows()) +} + +func (s *testSuiteP2) TestAdapterStatement(c *C) { + se, err := session.CreateSession4Test(s.store) + c.Check(err, IsNil) + se.GetSessionVars().TxnCtx.InfoSchema = domain.GetDomain(se).InfoSchema() + compiler := &executor.Compiler{Ctx: se} + stmtNode, err := s.ParseOneStmt("select 1", "", "") + c.Check(err, IsNil) + stmt, err := compiler.Compile(context.TODO(), stmtNode) + c.Check(err, IsNil) + c.Check(stmt.OriginText(), Equals, "select 1") + + stmtNode, err = s.ParseOneStmt("create table test.t (a int)", "", "") + c.Check(err, IsNil) + stmt, err = compiler.Compile(context.TODO(), stmtNode) + c.Check(err, IsNil) + c.Check(stmt.OriginText(), Equals, "create table test.t (a int)") +} + +func (s *testSuiteP2) TestRow(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (c int, d int)") + tk.MustExec("insert t values (1, 1)") + tk.MustExec("insert t values (1, 3)") + tk.MustExec("insert t values (2, 1)") + tk.MustExec("insert t values (2, 3)") + result := tk.MustQuery("select * from t where (c, d) < (2,2)") + result.Check(testkit.Rows("1 1", "1 3", "2 1")) + result = tk.MustQuery("select * from t where (1,2,3) > (3,2,1)") + result.Check(testkit.Rows()) + result = tk.MustQuery("select * from t where row(1,2,3) > (3,2,1)") + result.Check(testkit.Rows()) + result = tk.MustQuery("select (1, 2, 3) < (2, 3, 4)") + result.Check(testkit.Rows("1")) + result = tk.MustQuery("select (2, 3, 4) <= (2, 3, 3)") + result.Check(testkit.Rows("0")) + result = tk.MustQuery("select (2, 3, 4) <= (2, 3, 4)") + result.Check(testkit.Rows("1")) + result = tk.MustQuery("select (2, 3, 4) <= (2, 1, 4)") + result.Check(testkit.Rows("0")) + result = tk.MustQuery("select (2, 3, 4) >= (2, 3, 4)") + result.Check(testkit.Rows("1")) + result = tk.MustQuery("select (2, 3, 4) = (2, 3, 4)") + result.Check(testkit.Rows("1")) + result = tk.MustQuery("select (2, 3, 4) != (2, 3, 4)") + result.Check(testkit.Rows("0")) + result = tk.MustQuery("select row(1, 1) in (row(1, 1))") + result.Check(testkit.Rows("1")) + result = tk.MustQuery("select row(1, 0) in (row(1, 1))") + result.Check(testkit.Rows("0")) + result = tk.MustQuery("select row(1, 1) > row(1, 0)") + result.Check(testkit.Rows("1")) +} + +func (s *testSuiteP2) TestColumnName(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (c int, d int)") + // disable only full group by + tk.MustExec("set sql_mode='STRICT_TRANS_TABLES'") + rs, err := tk.Exec("select 1 + c, count(*) from t") + c.Check(err, IsNil) + fields := rs.Fields() + c.Check(len(fields), Equals, 2) + c.Check(fields[0].Column.Name.L, Equals, "1 + c") + c.Check(fields[0].ColumnAsName.L, Equals, "1 + c") + c.Check(fields[1].Column.Name.L, Equals, "count(*)") + c.Check(fields[1].ColumnAsName.L, Equals, "count(*)") + rs.Close() + tk.MustExec("begin") + tk.MustExec("insert t values(1,1)") + rs, err = tk.Exec("select c d, d c from t") + c.Check(err, IsNil) + fields = rs.Fields() + c.Check(len(fields), Equals, 2) + c.Check(fields[0].Column.Name.L, Equals, "c") + c.Check(fields[0].ColumnAsName.L, Equals, "d") + c.Check(fields[1].Column.Name.L, Equals, "d") + c.Check(fields[1].ColumnAsName.L, Equals, "c") + rs.Close() + // Test case for query a column of a table. + // In this case, all attributes have values. + rs, err = tk.Exec("select c as a from t as t2") + c.Check(err, IsNil) + fields = rs.Fields() + c.Check(fields[0].Column.Name.L, Equals, "c") + c.Check(fields[0].ColumnAsName.L, Equals, "a") + c.Check(fields[0].Table.Name.L, Equals, "t") + c.Check(fields[0].TableAsName.L, Equals, "t2") + c.Check(fields[0].DBName.L, Equals, "test") + rs.Close() + // Test case for query a column wrapped with parentheses and unary plus. + // In this case, the column name should be its original name. + rs, err = tk.Exec("select (c), (+c), +(c), +(+(c)), ++c from t") + c.Check(err, IsNil) + fields = rs.Fields() + for i := 0; i < 5; i++ { + c.Check(fields[i].Column.Name.L, Equals, "c") + c.Check(fields[i].ColumnAsName.L, Equals, "c") + } + rs.Close() + + rs, err = tk.Exec("select if(1,c,c) from t;") + c.Check(err, IsNil) + fields = rs.Fields() + c.Assert(fields[0].Column.Name.L, Equals, "if(1,c,c)") + // It's a compatibility issue. Should be empty instead. + c.Assert(fields[0].ColumnAsName.L, Equals, "if(1,c,c)") +} + +func (s *testSuite) TestSimpleDAG(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int primary key, b int, c int)") + tk.MustExec("insert into t values (1, 1, 1), (2, 1, 1), (3, 1, 2), (4, 2, 3)") + tk.MustQuery("select a from t").Check(testkit.Rows("1", "2", "3", "4")) + tk.MustQuery("select * from t where a = 4").Check(testkit.Rows("4 2 3")) + tk.MustQuery("select a from t limit 1").Check(testkit.Rows("1")) + tk.MustQuery("select a from t order by a desc").Check(testkit.Rows("4", "3", "2", "1")) + tk.MustQuery("select a from t order by a desc limit 1").Check(testkit.Rows("4")) + tk.MustQuery("select a from t order by b desc limit 1").Check(testkit.Rows("4")) + tk.MustQuery("select a from t where a < 3").Check(testkit.Rows("1", "2")) + tk.MustQuery("select a from t where b > 1").Check(testkit.Rows("4")) + tk.MustQuery("select a from t where b > 1 and a < 3").Check(testkit.Rows()) + tk.MustQuery("select count(*) from t where b > 1 and a < 3").Check(testkit.Rows("0")) + tk.MustQuery("select count(*) from t").Check(testkit.Rows("4")) + tk.MustQuery("select count(*), c from t group by c order by c").Check(testkit.Rows("2 1", "1 2", "1 3")) + tk.MustQuery("select sum(c) as s from t group by b order by s").Check(testkit.Rows("3", "4")) + + tk.MustExec("create index i on t(c,b)") + tk.MustQuery("select a from t where c = 1").Check(testkit.Rows("1", "2")) + tk.MustQuery("select a from t where c = 1 and a < 2").Check(testkit.Rows("1")) + tk.MustQuery("select a from t where c = 1 order by a limit 1").Check(testkit.Rows("1")) + tk.MustQuery("select count(*) from t where c = 1 ").Check(testkit.Rows("2")) + tk.MustExec("create index i1 on t(b)") + tk.MustQuery("select c from t where b = 2").Check(testkit.Rows("3")) + tk.MustQuery("select * from t where b = 2").Check(testkit.Rows("4 2 3")) + tk.MustQuery("select count(*) from t where b = 1").Check(testkit.Rows("3")) + tk.MustQuery("select * from t where b = 1 and a > 1 limit 1").Check(testkit.Rows("2 1 1")) +} + +type testSuite1 struct { + store kv.Storage + dom *domain.Domain +} + +func (s *testSuite1) SetUpSuite(c *C) { + var err error + s.store, err = mockstore.NewMockTikvStore() + c.Assert(err, IsNil) + session.SetStatsLease(0) + s.dom, err = session.BootstrapSession(s.store) + c.Assert(err, IsNil) +} + +func (s *testSuite1) TearDownSuite(c *C) { + s.dom.Close() + s.store.Close() +} + +func (s *testSuite1) TearDownTest(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + r := tk.MustQuery("show tables") + for _, tb := range r.Rows() { + tableName := tb[0] + tk.MustExec(fmt.Sprintf("drop table %v", tableName)) + } +} + +func (s *testSuite) TestHandleTransfer(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("create table t(a int, index idx(a))") + tk.MustExec("insert into t values(1), (2), (3)") + tk.MustExec("begin") + // test table scan read whose result need handle. + tk.MustQuery("select * from t ignore index(idx)").Check(testkit.Rows("1", "2", "3")) + tk.MustExec("insert into t values(4)") + // test single read whose result need handle + tk.MustQuery("select * from t use index(idx)").Check(testkit.Rows("1", "2", "3", "4")) + tk.MustQuery("select * from t use index(idx) order by a desc").Check(testkit.Rows("4", "3", "2", "1")) + tk.MustQuery("select * from t use index(idx)").Check(testkit.Rows("1", "2", "3", "4")) + tk.MustExec("commit") + + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int, b int, index idx(a))") + tk.MustExec("insert into t values(3, 3), (1, 1), (2, 2)") + // Second test double read. + tk.MustQuery("select * from t use index(idx) order by a").Check(testkit.Rows("1 1", "2 2", "3 3")) +} + +func (s *testSuite) TestEnhancedRangeAccess(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (a int primary key, b int)") + tk.MustExec("insert into t values(1, 2), (2, 1)") + tk.MustQuery("select * from t where (a = 1 and b = 2) or (a = 2 and b = 1)").Check(testkit.Rows("1 2", "2 1")) + tk.MustQuery("select * from t where (a = 1 and b = 1) or (a = 2 and b = 2)").Check(nil) +} + +// TestMaxInt64Handle Issue #4810 +func (s *testSuite) TestMaxInt64Handle(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(id bigint, PRIMARY KEY (id))") + tk.MustExec("insert into t values(9223372036854775807)") + tk.MustExec("select * from t where id = 9223372036854775807") + tk.MustQuery("select * from t where id = 9223372036854775807;").Check(testkit.Rows("9223372036854775807")) + tk.MustQuery("select * from t").Check(testkit.Rows("9223372036854775807")) + _, err := tk.Exec("insert into t values(9223372036854775807)") + c.Assert(err, NotNil) + tk.MustExec("delete from t where id = 9223372036854775807") + tk.MustQuery("select * from t").Check(nil) +} + +func (s *testSuite) TestTableScanWithPointRanges(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(id int, PRIMARY KEY (id))") + tk.MustExec("insert into t values(1), (5), (10)") + tk.MustQuery("select * from t where id in(1, 2, 10)").Check(testkit.Rows("1", "10")) +} + +func (s *testSuite) TestUnsignedPk(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(id bigint unsigned primary key)") + var num1, num2 uint64 = math.MaxInt64 + 1, math.MaxInt64 + 2 + tk.MustExec(fmt.Sprintf("insert into t values(%v), (%v), (1), (2)", num1, num2)) + num1Str := strconv.FormatUint(num1, 10) + num2Str := strconv.FormatUint(num2, 10) + tk.MustQuery("select * from t order by id").Check(testkit.Rows("1", "2", num1Str, num2Str)) + tk.MustQuery("select * from t where id not in (2)").Check(testkit.Rows(num1Str, num2Str, "1")) + tk.MustExec("drop table t") + tk.MustExec("create table t(a bigint unsigned primary key, b int, index idx(b))") + tk.MustExec("insert into t values(9223372036854775808, 1), (1, 1)") + tk.MustQuery("select * from t use index(idx) where b = 1 and a < 2").Check(testkit.Rows("1 1")) + tk.MustQuery("select * from t use index(idx) where b = 1 order by b, a").Check(testkit.Rows("1 1", "9223372036854775808 1")) +} + +func (s *testSuite) TestIssue5341(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("drop table if exists test.t") + tk.MustExec("create table test.t(a char)") + tk.MustExec("insert into test.t value('a')") + tk.MustQuery("select * from test.t where a < 1 order by a limit 0;").Check(testkit.Rows()) +} + +func (s *testSuite) TestContainDotColumn(c *C) { + tk := testkit.NewTestKit(c, s.store) + + tk.MustExec("use test") + tk.MustExec("drop table if exists test.t1") + tk.MustExec("create table test.t1(t1.a char)") + tk.MustExec("drop table if exists t2") + tk.MustExec("create table t2(a char, t2.b int)") + + tk.MustExec("drop table if exists t3") + _, err := tk.Exec("create table t3(s.a char);") + terr := errors.Cause(err).(*terror.Error) + c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrWrongTableName)) +} + +func (s *testSuite) TestLimit(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec(`use test;`) + tk.MustExec(`drop table if exists t;`) + tk.MustExec(`create table t(a bigint, b bigint);`) + tk.MustExec(`insert into t values(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6);`) + tk.MustQuery(`select * from t order by a limit 1, 1;`).Check(testkit.Rows( + "2 2", + )) + tk.MustQuery(`select * from t order by a limit 1, 2;`).Check(testkit.Rows( + "2 2", + "3 3", + )) + tk.MustQuery(`select * from t order by a limit 1, 3;`).Check(testkit.Rows( + "2 2", + "3 3", + "4 4", + )) + tk.MustQuery(`select * from t order by a limit 1, 4;`).Check(testkit.Rows( + "2 2", + "3 3", + "4 4", + "5 5", + )) + tk.MustExec(`set @@tidb_init_chunk_size=2;`) + tk.MustQuery(`select * from t order by a limit 2, 1;`).Check(testkit.Rows( + "3 3", + )) + tk.MustQuery(`select * from t order by a limit 2, 2;`).Check(testkit.Rows( + "3 3", + "4 4", + )) + tk.MustQuery(`select * from t order by a limit 2, 3;`).Check(testkit.Rows( + "3 3", + "4 4", + "5 5", + )) + tk.MustQuery(`select * from t order by a limit 2, 4;`).Check(testkit.Rows( + "3 3", + "4 4", + "5 5", + "6 6", + )) +} + +type testSuite2 struct { + *baseTestSuite +} + +func (s *testSuite2) TearDownTest(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + r := tk.MustQuery("show full tables") + for _, tb := range r.Rows() { + tableName := tb[0] + tk.MustExec(fmt.Sprintf("drop table %v", tableName)) + } +} + +type testSuite3 struct { + *baseTestSuite +} + +func (s *testSuite3) TearDownTest(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + r := tk.MustQuery("show full tables") + for _, tb := range r.Rows() { + tableName := tb[0] + tk.MustExec(fmt.Sprintf("drop table %v", tableName)) + } +} + +type testSuite4 struct { + *baseTestSuite +} + +func (s *testSuite4) TearDownTest(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + r := tk.MustQuery("show full tables") + for _, tb := range r.Rows() { + tableName := tb[0] + tk.MustExec(fmt.Sprintf("drop table %v", tableName)) + } +} + +type testSuite5 struct { + *baseTestSuite +} + +func (s *testSuite5) TearDownTest(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + r := tk.MustQuery("show full tables") + for _, tb := range r.Rows() { + tableName := tb[0] + tk.MustExec(fmt.Sprintf("drop table %v", tableName)) + } +} + +type testSuite6 struct { + *baseTestSuite +} + +func (s *testSuite6) TearDownTest(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + r := tk.MustQuery("show full tables") + for _, tb := range r.Rows() { + tableName := tb[0] + tk.MustExec(fmt.Sprintf("drop table %v", tableName)) + } +} + +type testSuite7 struct { + *baseTestSuite +} + +func (s *testSuite7) TearDownTest(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + r := tk.MustQuery("show full tables") + for _, tb := range r.Rows() { + tableName := tb[0] + tk.MustExec(fmt.Sprintf("drop table %v", tableName)) + } +} + +type testSuite8 struct { + *baseTestSuite +} + +func (s *testSuite8) TearDownTest(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + r := tk.MustQuery("show full tables") + for _, tb := range r.Rows() { + tableName := tb[0] + tk.MustExec(fmt.Sprintf("drop table %v", tableName)) + } +} diff --git a/executor/explain.go b/executor/explain.go new file mode 100644 index 0000000..75381ca --- /dev/null +++ b/executor/explain.go @@ -0,0 +1,74 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "context" + + "github.com/cznic/mathutil" + "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/util/chunk" +) + +// ExplainExec represents an explain executor. +type ExplainExec struct { + baseExecutor + + explain *core.Explain + rows [][]string + cursor int +} + +// Open implements the Executor Open interface. +func (e *ExplainExec) Open(ctx context.Context) error { + return nil +} + +// Close implements the Executor Close interface. +func (e *ExplainExec) Close() error { + e.rows = nil + return nil +} + +// Next implements the Executor Next interface. +func (e *ExplainExec) Next(ctx context.Context, req *chunk.Chunk) error { + if e.rows == nil { + var err error + e.rows, err = e.generateExplainInfo(ctx) + if err != nil { + return err + } + } + + req.GrowAndReset(e.maxChunkSize) + if e.cursor >= len(e.rows) { + return nil + } + + numCurRows := mathutil.Min(req.Capacity(), len(e.rows)-e.cursor) + for i := e.cursor; i < e.cursor+numCurRows; i++ { + for j := range e.rows[i] { + req.AppendString(j, e.rows[i][j]) + } + } + e.cursor += numCurRows + return nil +} + +func (e *ExplainExec) generateExplainInfo(ctx context.Context) ([][]string, error) { + if err := e.explain.RenderResult(); err != nil { + return nil, err + } + return e.explain.Rows, nil +} diff --git a/executor/hash_table.go b/executor/hash_table.go new file mode 100644 index 0000000..c4a63e5 --- /dev/null +++ b/executor/hash_table.go @@ -0,0 +1,276 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "hash" + "hash/fnv" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/codec" +) + +const ( + // estCountMaxFactor defines the factor of estCountMax with maxChunkSize. + // estCountMax is maxChunkSize * estCountMaxFactor, the maximum threshold of estCount. + // if estCount is larger than estCountMax, set estCount to estCountMax. + // Set this threshold to prevent innerSideEstCount being too large and causing a performance and memory regression. + estCountMaxFactor = 10 * 1024 + + // estCountMinFactor defines the factor of estCountMin with maxChunkSize. + // estCountMin is maxChunkSize * estCountMinFactor, the minimum threshold of estCount. + // If estCount is smaller than estCountMin, set estCount to 0. + // Set this threshold to prevent innerSideEstCount being too small and causing a performance regression. + estCountMinFactor = 8 + + // estCountDivisor defines the divisor of innerSideEstCount. + // Set this divisor to prevent innerSideEstCount being too large and causing a performance regression. + estCountDivisor = 8 +) + +// hashContext keeps the needed hash context of a db table in hash join. +type hashContext struct { + allTypes []*types.FieldType + keyColIdx []int + buf []byte + hashVals []hash.Hash64 + hasNull []bool +} + +func (hc *hashContext) initHash(rows int) { + if hc.buf == nil { + hc.buf = make([]byte, 1) + } + + if len(hc.hashVals) < rows { + hc.hasNull = make([]bool, rows) + hc.hashVals = make([]hash.Hash64, rows) + for i := 0; i < rows; i++ { + hc.hashVals[i] = fnv.New64() + } + } else { + for i := 0; i < rows; i++ { + hc.hasNull[i] = false + hc.hashVals[i].Reset() + } + } +} + +// hashRowContainer handles the rows and the hash map of a table. +// TODO: support spilling out to disk when memory is limited. +type hashRowContainer struct { + records *chunk.List + hashTable *rowHashMap + + sc *stmtctx.StatementContext + hCtx *hashContext +} + +func newHashRowContainer(sctx sessionctx.Context, estCount int, hCtx *hashContext, initList *chunk.List) *hashRowContainer { + maxChunkSize := sctx.GetSessionVars().MaxChunkSize + // The estCount from cost model is not quite accurate and we need + // to avoid that it's too large to consume redundant memory. + // So I invent a rough protection, firstly divide it by estCountDivisor + // then set a maximum threshold and a minimum threshold. + estCount /= estCountDivisor + if estCount > maxChunkSize*estCountMaxFactor { + estCount = maxChunkSize * estCountMaxFactor + } + if estCount < maxChunkSize*estCountMinFactor { + estCount = 0 + } + c := &hashRowContainer{ + records: initList, + hashTable: newRowHashMap(estCount), + + sc: sctx.GetSessionVars().StmtCtx, + hCtx: hCtx, + } + return c +} + +// GetMatchedRows get matched rows from probeRow. It can be called +// in multiple goroutines while each goroutine should keep its own +// h and buf. +func (c *hashRowContainer) GetMatchedRows(probeKey uint64, probeRow chunk.Row, hCtx *hashContext) (matched []chunk.Row, err error) { + innerPtrs := c.hashTable.Get(probeKey) + if len(innerPtrs) == 0 { + return + } + matched = make([]chunk.Row, 0, len(innerPtrs)) + for _, ptr := range innerPtrs { + matchedRow := c.records.GetRow(ptr) + var ok bool + ok, err = c.matchJoinKey(matchedRow, probeRow, hCtx) + if err != nil { + return + } + if !ok { + continue + } + matched = append(matched, matchedRow) + } + /* TODO(fengliyuan): add test case in this case + if len(matched) == 0 { + // noop + } + */ + return +} + +// matchJoinKey checks if join keys of buildRow and probeRow are logically equal. +func (c *hashRowContainer) matchJoinKey(buildRow, probeRow chunk.Row, probeHCtx *hashContext) (ok bool, err error) { + return codec.EqualChunkRow(c.sc, + buildRow, c.hCtx.allTypes, c.hCtx.keyColIdx, + probeRow, probeHCtx.allTypes, probeHCtx.keyColIdx) +} + +// PutChunk puts a chunk into hashRowContainer and build hash map. It's not thread-safe. +// key of hash table: hash value of key columns +// value of hash table: RowPtr of the corresponded row +func (c *hashRowContainer) PutChunk(chk *chunk.Chunk) error { + chkIdx := uint32(c.records.NumChunks()) + numRows := chk.NumRows() + + c.records.Add(chk) + c.hCtx.initHash(numRows) + + hCtx := c.hCtx + for _, colIdx := range c.hCtx.keyColIdx { + err := codec.HashChunkColumns(c.sc, hCtx.hashVals, chk, hCtx.allTypes[colIdx], colIdx, hCtx.buf, hCtx.hasNull) + if err != nil { + return errors.Trace(err) + } + } + for i := 0; i < numRows; i++ { + if c.hCtx.hasNull[i] { + continue + } + key := c.hCtx.hashVals[i].Sum64() + rowPtr := chunk.RowPtr{ChkIdx: chkIdx, RowIdx: uint32(i)} + c.hashTable.Put(key, rowPtr) + } + return nil +} + +// Len returns the length of the records in hashRowContainer. +func (c hashRowContainer) Len() int { + return c.hashTable.Len() +} + +const ( + initialEntrySliceLen = 64 + maxEntrySliceLen = 8 * 1024 +) + +type entry struct { + ptr chunk.RowPtr + next entryAddr +} + +type entryStore struct { + slices [][]entry +} + +func (es *entryStore) init() { + es.slices = [][]entry{make([]entry, 0, initialEntrySliceLen)} + // Reserve the first empty entry, so entryAddr{} can represent nullEntryAddr. + reserved := es.put(entry{}) + if reserved != nullEntryAddr { + panic("entryStore: first entry is not nullEntryAddr") + } +} + +func (es *entryStore) put(e entry) entryAddr { + sliceIdx := uint32(len(es.slices) - 1) + slice := es.slices[sliceIdx] + if len(slice) == cap(slice) { + size := cap(slice) * 2 + if size >= maxEntrySliceLen { + size = maxEntrySliceLen + } + slice = make([]entry, 0, size) + es.slices = append(es.slices, slice) + sliceIdx++ + } + addr := entryAddr{sliceIdx: sliceIdx, offset: uint32(len(slice))} + es.slices[sliceIdx] = append(slice, e) + return addr +} + +func (es *entryStore) get(addr entryAddr) entry { + return es.slices[addr.sliceIdx][addr.offset] +} + +type entryAddr struct { + sliceIdx uint32 + offset uint32 +} + +var nullEntryAddr = entryAddr{} + +// rowHashMap stores multiple rowPtr of rows for a given key with minimum GC overhead. +// A given key can store multiple values. +// It is not thread-safe, should only be used in one goroutine. +// TODO(fengliyuan): add unit test for this. +type rowHashMap struct { + entryStore entryStore + hashTable map[uint64]entryAddr + length int +} + +// newRowHashMap creates a new rowHashMap. estCount means the estimated size of the hashMap. +// If unknown, set it to 0. +func newRowHashMap(estCount int) *rowHashMap { + m := new(rowHashMap) + m.hashTable = make(map[uint64]entryAddr, estCount) + m.entryStore.init() + return m +} + +// Put puts the key/rowPtr pairs to the rowHashMap, multiple rowPtrs are stored in a list. +func (m *rowHashMap) Put(hashKey uint64, rowPtr chunk.RowPtr) { + oldEntryAddr := m.hashTable[hashKey] + e := entry{ + ptr: rowPtr, + next: oldEntryAddr, + } + newEntryAddr := m.entryStore.put(e) + m.hashTable[hashKey] = newEntryAddr + m.length++ +} + +// Get gets the values of the "key" and appends them to "values". +func (m *rowHashMap) Get(hashKey uint64) (rowPtrs []chunk.RowPtr) { + entryAddr := m.hashTable[hashKey] + for entryAddr != nullEntryAddr { + e := m.entryStore.get(entryAddr) + entryAddr = e.next + rowPtrs = append(rowPtrs, e.ptr) + } + // Keep the order of input. + for i := 0; i < len(rowPtrs)/2; i++ { + j := len(rowPtrs) - 1 - i + rowPtrs[i], rowPtrs[j] = rowPtrs[j], rowPtrs[i] + } + return +} + +// Len returns the number of rowPtrs in the rowHashMap, the number of keys may be less than Len +// if the same key is put more than once. +func (m *rowHashMap) Len() int { return m.length } diff --git a/executor/hash_table_test.go b/executor/hash_table_test.go new file mode 100644 index 0000000..5478368 --- /dev/null +++ b/executor/hash_table_test.go @@ -0,0 +1,50 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/util/chunk" +) + +func (s *pkgTestSuite) TestRowHashMap(c *C) { + m := newRowHashMap(0) + m.Put(1, chunk.RowPtr{ChkIdx: 1, RowIdx: 1}) + c.Check(m.Get(1), DeepEquals, []chunk.RowPtr{{ChkIdx: 1, RowIdx: 1}}) + + rawData := map[uint64][]chunk.RowPtr{} + for i := uint64(0); i < 10; i++ { + for j := uint64(0); j < initialEntrySliceLen*i; j++ { + rawData[i] = append(rawData[i], chunk.RowPtr{ChkIdx: uint32(i), RowIdx: uint32(j)}) + } + } + m = newRowHashMap(0) + // put all rawData into m vertically + for j := uint64(0); j < initialEntrySliceLen*9; j++ { + for i := 9; i >= 0; i-- { + i := uint64(i) + if !(j < initialEntrySliceLen*i) { + break + } + m.Put(i, rawData[i][j]) + } + } + // check + totalCount := 0 + for i := uint64(0); i < 10; i++ { + totalCount += len(rawData[i]) + c.Check(m.Get(i), DeepEquals, rawData[i]) + } + c.Check(m.Len(), Equals, totalCount) +} diff --git a/executor/insert.go b/executor/insert.go new file mode 100644 index 0000000..52847f5 --- /dev/null +++ b/executor/insert.go @@ -0,0 +1,76 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "context" + + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" +) + +// InsertExec represents an insert executor. +type InsertExec struct { + *InsertValues + + Priority mysql.PriorityEnum +} + +func (e *InsertExec) exec(ctx context.Context, rows [][]types.Datum) error { + sessVars := e.ctx.GetSessionVars() + defer sessVars.CleanBuffers() + txn, err := e.ctx.Txn(true) + if err != nil { + return err + } + sessVars.GetWriteStmtBufs().BufStore = kv.NewBufferStore(txn, kv.TempTxnMemBufCap) + sessVars.StmtCtx.AddRecordRows(uint64(len(rows))) + for _, row := range rows { + if _, err := e.addRecord(ctx, row); err != nil { + return err + } + } + return nil +} + +// Next implements the Executor Next interface. +func (e *InsertExec) Next(ctx context.Context, req *chunk.Chunk) error { + req.Reset() + if len(e.children) > 0 && e.children[0] != nil { + return insertRowsFromSelect(ctx, e) + } + return insertRows(ctx, e) +} + +// Close implements the Executor Close interface. +func (e *InsertExec) Close() error { + e.ctx.GetSessionVars().CurrInsertValues = chunk.Row{} + if e.SelectExec != nil { + return e.SelectExec.Close() + } + return nil +} + +// Open implements the Executor Open interface. +func (e *InsertExec) Open(ctx context.Context) error { + if e.SelectExec != nil { + return e.SelectExec.Open(ctx) + } + if !e.allAssignmentsAreConstant { + e.initEvalBuffer() + } + return nil +} diff --git a/executor/insert_common.go b/executor/insert_common.go new file mode 100644 index 0000000..102922f --- /dev/null +++ b/executor/insert_common.go @@ -0,0 +1,677 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "context" + "math" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +// InsertValues is the data to insert. +type InsertValues struct { + baseExecutor + + rowCount uint64 + lastInsertID uint64 + hasRefCols bool + hasExtraHandle bool + + SelectExec Executor + + Table table.Table + Columns []*ast.ColumnName + Lists [][]expression.Expression + SetList []*expression.Assignment + + insertColumns []*table.Column + + allAssignmentsAreConstant bool + + // colDefaultVals is used to store casted default value. + // Because not every insert statement needs colDefaultVals, so we will init the buffer lazily. + colDefaultVals []defaultVal + evalBuffer chunk.MutRow + evalBufferTypes []*types.FieldType + + // Fill the autoID lazily to datum. This is used for being compatible with JDBC using getGeneratedKeys(). + // `insert|replace values` can guarantee consecutive autoID in a batch. + // Other statements like `insert select from` don't guarantee consecutive autoID. + // https://dev.mysql.com/doc/refman/8.0/en/innodb-auto-increment-handling.html + lazyFillAutoID bool +} + +type defaultVal struct { + val types.Datum + // valid indicates whether the val is evaluated. We evaluate the default value lazily. + valid bool +} + +type insertCommon interface { + insertCommon() *InsertValues + exec(ctx context.Context, rows [][]types.Datum) error +} + +func (e *InsertValues) insertCommon() *InsertValues { + return e +} + +func (e *InsertValues) exec(ctx context.Context, rows [][]types.Datum) error { + panic("derived should overload exec function") +} + +// initInsertColumns sets the explicitly specified columns of an insert statement. There are three cases: +// There are three types of insert statements: +// 1 insert ... values(...) --> name type column +// 2 insert ... set x=y... --> set type column +// 3 insert ... (select ..) --> name type column +// See https://dev.mysql.com/doc/refman/5.7/en/insert.html +func (e *InsertValues) initInsertColumns() error { + var cols []*table.Column + var err error + + tableCols := e.Table.Cols() + + if len(e.SetList) > 0 { + // Process `set` type column. + columns := make([]string, 0, len(e.SetList)) + for _, v := range e.SetList { + columns = append(columns, v.ColName.O) + } + cols, err = table.FindCols(tableCols, columns, e.Table.Meta().PKIsHandle) + if err != nil { + return errors.Errorf("INSERT INTO %s: %s", e.Table.Meta().Name.O, err) + } + if len(cols) == 0 { + return errors.Errorf("INSERT INTO %s: empty column", e.Table.Meta().Name.O) + } + } else if len(e.Columns) > 0 { + // Process `name` type column. + columns := make([]string, 0, len(e.Columns)) + for _, v := range e.Columns { + columns = append(columns, v.Name.O) + } + cols, err = table.FindCols(tableCols, columns, e.Table.Meta().PKIsHandle) + if err != nil { + return errors.Errorf("INSERT INTO %s: %s", e.Table.Meta().Name.O, err) + } + } else { + // If e.Columns are empty, use all columns instead. + cols = tableCols + } + for _, col := range cols { + e.insertColumns = append(e.insertColumns, col) + if col.Name.L == model.ExtraHandleName.L { + if !e.ctx.GetSessionVars().AllowWriteRowID { + return errors.Errorf("insert, update and replace statements for _tidb_rowid are not supported.") + } + e.hasExtraHandle = true + break + } + } + + // Check column whether is specified only once. + err = table.CheckOnce(cols) + if err != nil { + return err + } + return nil +} + +func (e *InsertValues) initEvalBuffer() { + numCols := len(e.Table.Cols()) + if e.hasExtraHandle { + numCols++ + } + e.evalBufferTypes = make([]*types.FieldType, numCols) + for i, col := range e.Table.Cols() { + e.evalBufferTypes[i] = &col.FieldType + } + if e.hasExtraHandle { + e.evalBufferTypes[len(e.evalBufferTypes)-1] = types.NewFieldType(mysql.TypeLonglong) + } + e.evalBuffer = chunk.MutRowFromTypes(e.evalBufferTypes) +} + +func (e *InsertValues) lazilyInitColDefaultValBuf() (ok bool) { + if e.colDefaultVals != nil { + return true + } + + // only if values count of insert statement is more than one, use colDefaultVals to store + // casted default values has benefits. + if len(e.Lists) > 1 { + e.colDefaultVals = make([]defaultVal, len(e.Table.Cols())) + return true + } + + return false +} + +func (e *InsertValues) processSetList() error { + if len(e.SetList) > 0 { + if len(e.Lists) > 0 { + return errors.Errorf("INSERT INTO %s: set type should not use values", e.Table) + } + l := make([]expression.Expression, 0, len(e.SetList)) + for _, v := range e.SetList { + l = append(l, v.Expr) + } + e.Lists = append(e.Lists, l) + } + return nil +} + +// insertRows processes `insert|replace into values ()` or `insert|replace into set x=y` +func insertRows(ctx context.Context, base insertCommon) (err error) { + e := base.insertCommon() + // For `insert|replace into set x=y`, process the set list here. + if err = e.processSetList(); err != nil { + return err + } + + e.lazyFillAutoID = true + evalRowFunc := e.fastEvalRow + if !e.allAssignmentsAreConstant { + evalRowFunc = e.evalRow + } + + rows := make([][]types.Datum, 0, len(e.Lists)) + for i, list := range e.Lists { + e.rowCount++ + var row []types.Datum + row, err = evalRowFunc(ctx, list, i) + if err != nil { + return err + } + rows = append(rows, row) + } + // Fill the batch allocated autoIDs. + rows, err = e.lazyAdjustAutoIncrementDatum(ctx, rows) + if err != nil { + return err + } + return base.exec(ctx, rows) +} + +func (e *InsertValues) handleErr(col *table.Column, val *types.Datum, rowIdx int, err error) error { + if err == nil { + return nil + } + + // Convert the error with full messages. + var ( + colTp byte + colName string + ) + if col != nil { + colTp = col.Tp + colName = col.Name.String() + } + + if types.ErrDataTooLong.Equal(err) { + err = resetErrDataTooLong(colName, rowIdx+1, err) + } else if types.ErrOverflow.Equal(err) { + err = types.ErrWarnDataOutOfRange.GenWithStackByArgs(colName, rowIdx+1) + } else if types.ErrTruncated.Equal(err) || types.ErrTruncatedWrongVal.Equal(err) { + valStr, err1 := val.ToString() + if err1 != nil { + logutil.BgLogger().Warn("truncate value failed", zap.Error(err1)) + } + err = table.ErrTruncatedWrongValueForField.GenWithStackByArgs(types.TypeStr(colTp), valStr, colName, rowIdx+1) + } + + return err +} + +// evalRow evaluates a to-be-inserted row. The value of the column may base on another column, +// so we use setValueForRefColumn to fill the empty row some default values when needFillDefaultValues is true. +func (e *InsertValues) evalRow(ctx context.Context, list []expression.Expression, rowIdx int) ([]types.Datum, error) { + rowLen := len(e.Table.Cols()) + if e.hasExtraHandle { + rowLen++ + } + row := make([]types.Datum, rowLen) + hasValue := make([]bool, rowLen) + + // For statements like `insert into t set a = b + 1`. + if e.hasRefCols { + if err := e.setValueForRefColumn(row, hasValue); err != nil { + return nil, err + } + } + + e.evalBuffer.SetDatums(row...) + for i, expr := range list { + val, err := expr.Eval(e.evalBuffer.ToRow()) + if err = e.handleErr(e.insertColumns[i], &val, rowIdx, err); err != nil { + return nil, err + } + val1, err := table.CastValue(e.ctx, val, e.insertColumns[i].ToInfo()) + if err = e.handleErr(e.insertColumns[i], &val, rowIdx, err); err != nil { + return nil, err + } + + offset := e.insertColumns[i].Offset + row[offset], hasValue[offset] = *val1.Copy(), true + e.evalBuffer.SetDatum(offset, val1) + } + // Row may lack of generated column, autoIncrement column, empty column here. + return e.fillRow(ctx, row, hasValue) +} + +var emptyRow chunk.Row + +func (e *InsertValues) fastEvalRow(ctx context.Context, list []expression.Expression, rowIdx int) ([]types.Datum, error) { + rowLen := len(e.Table.Cols()) + if e.hasExtraHandle { + rowLen++ + } + row := make([]types.Datum, rowLen) + hasValue := make([]bool, rowLen) + for i, expr := range list { + con := expr.(*expression.Constant) + val, err := con.Eval(emptyRow) + if err = e.handleErr(e.insertColumns[i], &val, rowIdx, err); err != nil { + return nil, err + } + val1, err := table.CastValue(e.ctx, val, e.insertColumns[i].ToInfo()) + if err = e.handleErr(e.insertColumns[i], &val, rowIdx, err); err != nil { + return nil, err + } + offset := e.insertColumns[i].Offset + row[offset], hasValue[offset] = val1, true + } + return e.fillRow(ctx, row, hasValue) +} + +// setValueForRefColumn set some default values for the row to eval the row value with other columns, +// it follows these rules: +// 1. for nullable and no default value column, use NULL. +// 2. for nullable and have default value column, use it's default value. +// 3. for not null column, use zero value even in strict mode. +// 4. for auto_increment column, use zero value. +// 5. for generated column, use NULL. +func (e *InsertValues) setValueForRefColumn(row []types.Datum, hasValue []bool) error { + for i, c := range e.Table.Cols() { + d, err := e.getColDefaultValue(i, c) + if err == nil { + row[i] = d + if !mysql.HasAutoIncrementFlag(c.Flag) { + // It is an interesting behavior in MySQL. + // If the value of auto ID is not explicit, MySQL use 0 value for auto ID when it is + // evaluated by another column, but it should be used once only. + // When we fill it as an auto ID column, it should be set as it used to be. + // So just keep `hasValue` false for auto ID, and the others set true. + hasValue[c.Offset] = true + } + } else if table.ErrNoDefaultValue.Equal(err) { + row[i] = table.GetZeroValue(c.ToInfo()) + hasValue[c.Offset] = false + } else if e.handleErr(c, &d, 0, err) != nil { + return err + } + } + return nil +} + +func insertRowsFromSelect(ctx context.Context, base insertCommon) error { + // process `insert|replace into ... select ... from ...` + e := base.insertCommon() + selectExec := e.children[0] + fields := retTypes(selectExec) + chk := newFirstChunk(selectExec) + iter := chunk.NewIterator4Chunk(chk) + rows := make([][]types.Datum, 0, chk.Capacity()) + + sessVars := e.ctx.GetSessionVars() + if !sessVars.StrictSQLMode { + // If StrictSQLMode is disabled and it is a insert-select statement, it also handle BadNullAsWarning. + sessVars.StmtCtx.BadNullAsWarning = true + } + + for { + err := Next(ctx, selectExec, chk) + if err != nil { + return err + } + if chk.NumRows() == 0 { + break + } + + for innerChunkRow := iter.Begin(); innerChunkRow != iter.End(); innerChunkRow = iter.Next() { + innerRow := innerChunkRow.GetDatumRow(fields) + e.rowCount++ + row, err := e.getRow(ctx, innerRow) + if err != nil { + return err + } + rows = append(rows, row) + } + + err = base.exec(ctx, rows) + if err != nil { + return err + } + rows = rows[:0] + } + return nil +} + +// getRow gets the row which from `insert into select from` or `load data`. +// The input values from these two statements are datums instead of +// expressions which are used in `insert into set x=y`. +func (e *InsertValues) getRow(ctx context.Context, vals []types.Datum) ([]types.Datum, error) { + row := make([]types.Datum, len(e.Table.Cols())) + hasValue := make([]bool, len(e.Table.Cols())) + for i, v := range vals { + casted, err := table.CastValue(e.ctx, v, e.insertColumns[i].ToInfo()) + if e.handleErr(nil, &v, 0, err) != nil { + return nil, err + } + + offset := e.insertColumns[i].Offset + row[offset] = casted + hasValue[offset] = true + } + + return e.fillRow(ctx, row, hasValue) +} + +// getColDefaultValue gets the column default value. +func (e *InsertValues) getColDefaultValue(idx int, col *table.Column) (d types.Datum, err error) { + if e.colDefaultVals != nil && e.colDefaultVals[idx].valid { + return e.colDefaultVals[idx].val, nil + } + + defaultVal, err := table.GetColDefaultValue(e.ctx, col.ToInfo()) + if err != nil { + return types.Datum{}, err + } + if initialized := e.lazilyInitColDefaultValBuf(); initialized { + e.colDefaultVals[idx].val = defaultVal + e.colDefaultVals[idx].valid = true + } + + return defaultVal, nil +} + +// fillColValue fills the column value if it is not set in the insert statement. +func (e *InsertValues) fillColValue(ctx context.Context, datum types.Datum, idx int, column *table.Column, hasValue bool) (types.Datum, + error) { + if mysql.HasAutoIncrementFlag(column.Flag) { + if e.lazyFillAutoID { + // Handle hasValue info in autoIncrement column previously for lazy handle. + if !hasValue { + datum.SetNull() + } + // Store the plain datum of autoIncrement column directly for lazy handle. + return datum, nil + } + d, err := e.adjustAutoIncrementDatum(ctx, datum, hasValue, column) + if err != nil { + return types.Datum{}, err + } + return d, nil + } + if !hasValue { + d, err := e.getColDefaultValue(idx, column) + if e.handleErr(column, &datum, 0, err) != nil { + return types.Datum{}, err + } + return d, nil + } + return datum, nil +} + +// fillRow fills generated columns, auto_increment column and empty column. +// For NOT NULL column, it will return error or use zero value based on sql_mode. +// When lazyFillAutoID is true, fill row will lazily handle auto increment datum for lazy batch allocation. +// `insert|replace values` can guarantee consecutive autoID in a batch. +// Other statements like `insert select from` don't guarantee consecutive autoID. +// https://dev.mysql.com/doc/refman/8.0/en/innodb-auto-increment-handling.html +func (e *InsertValues) fillRow(ctx context.Context, row []types.Datum, hasValue []bool) ([]types.Datum, error) { + for i, c := range e.Table.Cols() { + var err error + // Get the default value for all no value columns, the auto increment column is different from the others. + if row[i], err = e.fillColValue(ctx, row[i], i, c, hasValue[i]); err != nil { + return nil, err + } + if !e.lazyFillAutoID || (e.lazyFillAutoID && !mysql.HasAutoIncrementFlag(c.Flag)) { + if row[i], err = c.HandleBadNull(row[i], e.ctx.GetSessionVars().StmtCtx); err != nil { + return nil, err + } + } + } + return row, nil +} + +// isAutoNull can help judge whether a datum is AutoIncrement Null quickly. +// This used to help lazyFillAutoIncrement to find consecutive N datum backwards for batch autoID alloc. +func (e *InsertValues) isAutoNull(ctx context.Context, d types.Datum, col *table.Column) bool { + var err error + var recordID int64 + if !d.IsNull() { + recordID, err = getAutoRecordID(d, &col.FieldType, true) + if err != nil { + return false + } + } + // Use the value if it's not null and not 0. + if recordID != 0 { + return false + } + // Change NULL to auto id. + // Change value 0 to auto id, if NoAutoValueOnZero SQL mode is not set. + if d.IsNull() || e.ctx.GetSessionVars().SQLMode&mysql.ModeNoAutoValueOnZero == 0 { + return true + } + return false +} + +func (e *InsertValues) hasAutoIncrementColumn() (int, bool) { + colIdx := -1 + for i, c := range e.Table.Cols() { + if mysql.HasAutoIncrementFlag(c.Flag) { + colIdx = i + break + } + } + return colIdx, colIdx != -1 +} + +// lazyAdjustAutoIncrementDatum is quite similar to adjustAutoIncrementDatum +// except it will cache auto increment datum previously for lazy batch allocation of autoID. +func (e *InsertValues) lazyAdjustAutoIncrementDatum(ctx context.Context, rows [][]types.Datum) ([][]types.Datum, error) { + // Not in lazyFillAutoID mode means no need to fill. + if !e.lazyFillAutoID { + return rows, nil + } + // No autoIncrement column means no need to fill. + colIdx, ok := e.hasAutoIncrementColumn() + if !ok { + return rows, nil + } + // Get the autoIncrement column. + col := e.Table.Cols()[colIdx] + // Consider the colIdx of autoIncrement in row are the same. + length := len(rows) + for i := 0; i < length; i++ { + autoDatum := rows[i][colIdx] + + var err error + var recordID int64 + if !autoDatum.IsNull() { + recordID, err = getAutoRecordID(autoDatum, &col.FieldType, true) + if err != nil { + return nil, err + } + } + // Use the value if it's not null and not 0. + if recordID != 0 { + err = e.Table.RebaseAutoID(e.ctx, recordID, true) + if err != nil { + return nil, err + } + e.ctx.GetSessionVars().StmtCtx.InsertID = uint64(recordID) + rows[i][colIdx] = autoDatum + continue + } + + // Change NULL to auto id. + // Change value 0 to auto id, if NoAutoValueOnZero SQL mode is not set. + if autoDatum.IsNull() || e.ctx.GetSessionVars().SQLMode&mysql.ModeNoAutoValueOnZero == 0 { + // Find consecutive num. + start := i + cnt := 1 + for i+1 < length && e.isAutoNull(ctx, rows[i+1][colIdx], col) { + i++ + cnt++ + } + // Alloc batch N consecutive (min, max] autoIDs. + // max value can be derived from adding one for cnt times. + min, _, err := table.AllocBatchAutoIncrementValue(ctx, e.Table, e.ctx, cnt) + if e.handleErr(col, &autoDatum, cnt, err) != nil { + return nil, err + } + // It's compatible with mysql setting the first allocated autoID to lastInsertID. + // Cause autoID may be specified by user, judge only the first row is not suitable. + if e.lastInsertID == 0 { + e.lastInsertID = uint64(min) + 1 + } + // Assign autoIDs to rows. + for j := 0; j < cnt; j++ { + offset := j + start + d := rows[offset][colIdx] + + id := int64(uint64(min) + uint64(j) + 1) + d.SetAutoID(id, col.Flag) + + // The value of d is adjusted by auto ID, so we need to cast it again. + d, err := table.CastValue(e.ctx, d, col.ToInfo()) + if err != nil { + return nil, err + } + rows[offset][colIdx] = d + } + continue + } + + autoDatum.SetAutoID(recordID, col.Flag) + + // the value of d is adjusted by auto ID, so we need to cast it again. + autoDatum, err = table.CastValue(e.ctx, autoDatum, col.ToInfo()) + if err != nil { + return nil, err + } + rows[i][colIdx] = autoDatum + } + return rows, nil +} + +func (e *InsertValues) adjustAutoIncrementDatum(ctx context.Context, d types.Datum, hasValue bool, c *table.Column) (types.Datum, error) { + var err error + var recordID int64 + if !hasValue { + d.SetNull() + } + if !d.IsNull() { + recordID, err = getAutoRecordID(d, &c.FieldType, true) + if err != nil { + return types.Datum{}, err + } + } + // Use the value if it's not null and not 0. + if recordID != 0 { + err = e.Table.RebaseAutoID(e.ctx, recordID, true) + if err != nil { + return types.Datum{}, err + } + e.ctx.GetSessionVars().StmtCtx.InsertID = uint64(recordID) + return d, nil + } + + // Change NULL to auto id. + // Change value 0 to auto id, if NoAutoValueOnZero SQL mode is not set. + if d.IsNull() || e.ctx.GetSessionVars().SQLMode&mysql.ModeNoAutoValueOnZero == 0 { + recordID, err = table.AllocAutoIncrementValue(ctx, e.Table, e.ctx) + if e.handleErr(c, &d, 0, err) != nil { + return types.Datum{}, err + } + // It's compatible with mysql setting the first allocated autoID to lastInsertID. + // Cause autoID may be specified by user, judge only the first row is not suitable. + if e.lastInsertID == 0 { + e.lastInsertID = uint64(recordID) + } + } + + d.SetAutoID(recordID, c.Flag) + + // the value of d is adjusted by auto ID, so we need to cast it again. + casted, err := table.CastValue(e.ctx, d, c.ToInfo()) + if err != nil { + return types.Datum{}, err + } + return casted, nil +} + +func getAutoRecordID(d types.Datum, target *types.FieldType, isInsert bool) (int64, error) { + var recordID int64 + + switch target.Tp { + case mysql.TypeFloat, mysql.TypeDouble: + f := d.GetFloat64() + if isInsert { + recordID = int64(math.Round(f)) + } else { + recordID = int64(f) + } + case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong: + recordID = d.GetInt64() + default: + return 0, errors.Errorf("unexpected field type [%v]", target.Tp) + } + + return recordID, nil +} + +func (e *InsertValues) addRecord(ctx context.Context, row []types.Datum) (int64, error) { + txn, err := e.ctx.Txn(true) + if err != nil { + return 0, err + } + if !e.ctx.GetSessionVars().ConstraintCheckInPlace { + txn.SetOption(kv.PresumeKeyNotExists, nil) + } + h, err := e.Table.AddRecord(e.ctx, row, table.WithCtx(ctx)) + txn.DelOption(kv.PresumeKeyNotExists) + if err != nil { + return 0, err + } + if e.lastInsertID != 0 { + e.ctx.GetSessionVars().SetLastInsertID(e.lastInsertID) + } + return h, nil +} diff --git a/executor/insert_test.go b/executor/insert_test.go new file mode 100644 index 0000000..9e1e7ec --- /dev/null +++ b/executor/insert_test.go @@ -0,0 +1,71 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor_test + +import ( + "fmt" + "strconv" + "sync" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/util/testkit" +) + +func (s *testSuite3) TestInsertWrongValueForField(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec(`drop table if exists t1;`) + tk.MustExec(`create table t1(a bigint);`) + _, err := tk.Exec(`insert into t1 values("asfasdfsajhlkhlksdaf");`) + c.Assert(terror.ErrorEqual(err, table.ErrTruncatedWrongValueForField), IsTrue) +} + +func (s *testSuite3) TestAllocateContinuousRowID(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec(`use test`) + tk.MustExec(`create table t1 (a int,b int, key I_a(a));`) + wg := sync.WaitGroup{} + for i := 0; i < 5; i++ { + wg.Add(1) + go func(idx int) { + defer wg.Done() + tk := testkit.NewTestKitWithInit(c, s.store) + for j := 0; j < 10; j++ { + k := strconv.Itoa(idx*100 + j) + sql := "insert into t1(a,b) values (" + k + ", 2)" + for t := 0; t < 20; t++ { + sql += ",(" + k + ",2)" + } + tk.MustExec(sql) + q := "select _tidb_rowid from t1 where a=" + k + fmt.Printf("query: %v\n", q) + rows := tk.MustQuery(q).Rows() + c.Assert(len(rows), Equals, 21) + last := 0 + for _, r := range rows { + c.Assert(len(r), Equals, 1) + v, err := strconv.Atoi(r[0].(string)) + c.Assert(err, Equals, nil) + if last > 0 { + c.Assert(last+1, Equals, v) + } + last = v + } + } + }(i) + } + wg.Wait() +} diff --git a/executor/join.go b/executor/join.go new file mode 100644 index 0000000..ec2f54d --- /dev/null +++ b/executor/join.go @@ -0,0 +1,362 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "context" + "sync" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/expression" + plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/util" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/codec" +) + +var _ Executor = &HashJoinExec{} + +// HashJoinExec implements the hash join algorithm. +type HashJoinExec struct { + baseExecutor + + outerSideExec Executor + innerSideExec Executor + innerSideEstCount float64 + outerSideFilter expression.CNFExprs + outerKeys []*expression.Column + innerKeys []*expression.Column + + // concurrency is the number of partition, build and join workers. + concurrency uint + rowContainer *hashRowContainer + // joinWorkerWaitGroup is for sync multiple join workers. + joinWorkerWaitGroup sync.WaitGroup + // closeCh add a lock for closing executor. + closeCh chan struct{} + joinType plannercore.JoinType + + // We build individual joiner for each join worker when use chunk-based + // execution, to avoid the concurrency of joiner.chk and joiner.selected. + joiners []joiner + + outerChkResourceCh chan *outerChkResource + outerResultChs []chan *chunk.Chunk + joinChkResourceCh []chan *chunk.Chunk + joinResultCh chan *hashjoinWorkerResult + + prepared bool +} + +// outerChkResource stores the result of the join outer side fetch worker, +// `dest` is for Chunk reuse: after join workers process the outer side chunk which is read from `dest`, +// they'll store the used chunk as `chk`, and then the outer side fetch worker will put new data into `chk` and write `chk` into dest. +type outerChkResource struct { + chk *chunk.Chunk + dest chan<- *chunk.Chunk +} + +// hashjoinWorkerResult stores the result of join workers, +// `src` is for Chunk reuse: the main goroutine will get the join result chunk `chk`, +// and push `chk` into `src` after processing, join worker goroutines get the empty chunk from `src` +// and push new data into this chunk. +type hashjoinWorkerResult struct { + chk *chunk.Chunk + err error + src chan<- *chunk.Chunk +} + +// Close implements the Executor Close interface. +func (e *HashJoinExec) Close() error { + close(e.closeCh) + if e.prepared { + if e.joinResultCh != nil { + for range e.joinResultCh { + } + } + if e.outerChkResourceCh != nil { + close(e.outerChkResourceCh) + for range e.outerChkResourceCh { + } + } + for i := range e.outerResultChs { + for range e.outerResultChs[i] { + } + } + for i := range e.joinChkResourceCh { + close(e.joinChkResourceCh[i]) + for range e.joinChkResourceCh[i] { + } + } + e.outerChkResourceCh = nil + e.joinChkResourceCh = nil + } + err := e.baseExecutor.Close() + return err +} + +// Open implements the Executor Open interface. +func (e *HashJoinExec) Open(ctx context.Context) error { + if err := e.baseExecutor.Open(ctx); err != nil { + return err + } + + e.prepared = false + e.closeCh = make(chan struct{}) + e.joinWorkerWaitGroup = sync.WaitGroup{} + return nil +} + +// Next implements the Executor Next interface. +// hash join constructs the result following these steps: +// step 1. fetch data from build side child and build a hash table; +// step 2. fetch data from outer child in a background goroutine and outer the hash table in multiple join workers. +func (e *HashJoinExec) Next(ctx context.Context, req *chunk.Chunk) (err error) { + if !e.prepared { + err := e.fetchAndBuildHashTable(ctx) + if err != nil { + return err + } + e.fetchAndProbeHashTable(ctx) + e.prepared = true + } + req.Reset() + + result, ok := <-e.joinResultCh + if !ok { + return nil + } + if result.err != nil { + return result.err + } + req.SwapColumns(result.chk) + result.src <- result.chk + return nil +} + +func (e *HashJoinExec) fetchAndBuildHashTable(ctx context.Context) error { + // TODO: Implementing the building hash table stage. + + // In this stage, you'll read the data from the inner side executor of the join operator and + // then use its data to build hash table. + + // You'll need to store the hash table in `e.rowContainer` + // and you can call `newHashRowContainer` in `executor/hash_table.go` to build it. + // In this stage you can only assign value for `e.rowContainer` without changing any value of the `HashJoinExec`. + return nil +} + +func (e *HashJoinExec) initializeForOuter() { + // e.outerResultChs is for transmitting the chunks which store the data of + // outerSideExec, it'll be written by outer side worker goroutine, and read by join + // workers. + e.outerResultChs = make([]chan *chunk.Chunk, e.concurrency) + for i := uint(0); i < e.concurrency; i++ { + e.outerResultChs[i] = make(chan *chunk.Chunk, 1) + } + + // e.outerChkResourceCh is for transmitting the used outerSideExec chunks from + // join workers to outerSideExec worker. + e.outerChkResourceCh = make(chan *outerChkResource, e.concurrency) + for i := uint(0); i < e.concurrency; i++ { + e.outerChkResourceCh <- &outerChkResource{ + chk: newFirstChunk(e.outerSideExec), + dest: e.outerResultChs[i], + } + } + + // e.joinChkResourceCh is for transmitting the reused join result chunks + // from the main thread to join worker goroutines. + e.joinChkResourceCh = make([]chan *chunk.Chunk, e.concurrency) + for i := uint(0); i < e.concurrency; i++ { + e.joinChkResourceCh[i] = make(chan *chunk.Chunk, 1) + e.joinChkResourceCh[i] <- newFirstChunk(e) + } + + // e.joinResultCh is for transmitting the join result chunks to the main + // thread. + e.joinResultCh = make(chan *hashjoinWorkerResult, e.concurrency+1) +} + +// fetchOuterSideChunks get chunks from fetches chunks from the big table in a background goroutine +// and sends the chunks to multiple channels which will be read by multiple join workers. +func (e *HashJoinExec) fetchOuterSideChunks(ctx context.Context) { + for { + var outerSideResource *outerChkResource + var ok bool + select { + case <-e.closeCh: + return + case outerSideResource, ok = <-e.outerChkResourceCh: + if !ok { + return + } + } + outerSideResult := outerSideResource.chk + err := Next(ctx, e.outerSideExec, outerSideResult) + if err != nil { + e.joinResultCh <- &hashjoinWorkerResult{ + err: err, + } + return + } + + if outerSideResult.NumRows() == 0 { + return + } + + outerSideResource.dest <- outerSideResult + } +} + +func (e *HashJoinExec) fetchAndProbeHashTable(ctx context.Context) { + e.initializeForOuter() + e.joinWorkerWaitGroup.Add(1) + go util.WithRecovery(func() { e.fetchOuterSideChunks(ctx) }, e.handleOuterSideFetcherPanic) + + outerKeyColIdx := make([]int, len(e.outerKeys)) + for i := range e.outerKeys { + outerKeyColIdx[i] = e.outerKeys[i].Index + } + + // Start e.concurrency join workers to outer hash table and join build side and + // outer side rows. + for i := uint(0); i < e.concurrency; i++ { + e.joinWorkerWaitGroup.Add(1) + workID := i + go util.WithRecovery(func() { e.runJoinWorker(workID, outerKeyColIdx) }, e.handleJoinWorkerPanic) + } + go util.WithRecovery(e.waitJoinWorkersAndCloseResultChan, nil) +} + +func (e *HashJoinExec) runJoinWorker(workerID uint, outerKeyColIdx []int) { + // TODO: Implement the worker of probing stage. + + // In this method, you read the data from the channel e.outerResultChs[workerID]. + // Then use `e.join2Chunk` method get the joined result `joinResult`, + // and put the `joinResult` into the channel `e.joinResultCh`. + + // You may pay attention to: + // + // - e.closeCh, this is a channel tells that the join can be terminated as soon as possible. +} + +func (e *HashJoinExec) getNewJoinResult(workerID uint) (bool, *hashjoinWorkerResult) { + joinResult := &hashjoinWorkerResult{ + src: e.joinChkResourceCh[workerID], + } + ok := true + select { + case <-e.closeCh: + ok = false + case joinResult.chk, ok = <-e.joinChkResourceCh[workerID]: + } + return ok, joinResult +} + +func (e *HashJoinExec) waitJoinWorkersAndCloseResultChan() { + e.joinWorkerWaitGroup.Wait() + close(e.joinResultCh) +} + +func (e *HashJoinExec) handleOuterSideFetcherPanic(r interface{}) { + for i := range e.outerResultChs { + close(e.outerResultChs[i]) + } + if r != nil { + e.joinResultCh <- &hashjoinWorkerResult{err: errors.Errorf("%v", r)} + } + e.joinWorkerWaitGroup.Done() +} + +func (e *HashJoinExec) handleJoinWorkerPanic(r interface{}) { + if r != nil { + e.joinResultCh <- &hashjoinWorkerResult{err: errors.Errorf("%v", r)} + } + e.joinWorkerWaitGroup.Done() +} + +func (e *HashJoinExec) joinMatchedOuterSideRow2Chunk(workerID uint, outerKey uint64, outerSideRow chunk.Row, hCtx *hashContext, + joinResult *hashjoinWorkerResult) (bool, *hashjoinWorkerResult) { + buildSideRows, err := e.rowContainer.GetMatchedRows(outerKey, outerSideRow, hCtx) + if err != nil { + joinResult.err = err + return false, joinResult + } + if len(buildSideRows) == 0 { + e.joiners[workerID].onMissMatch(outerSideRow, joinResult.chk) + return true, joinResult + } + iter := chunk.NewIterator4Slice(buildSideRows) + hasMatch := false + for iter.Begin(); iter.Current() != iter.End(); { + matched, _, err := e.joiners[workerID].tryToMatchInners(outerSideRow, iter, joinResult.chk) + if err != nil { + joinResult.err = err + return false, joinResult + } + hasMatch = hasMatch || matched + + if joinResult.chk.IsFull() { + e.joinResultCh <- joinResult + ok, joinResult := e.getNewJoinResult(workerID) + if !ok { + return false, joinResult + } + } + } + if !hasMatch { + e.joiners[workerID].onMissMatch(outerSideRow, joinResult.chk) + } + return true, joinResult +} + +func (e *HashJoinExec) join2Chunk(workerID uint, outerSideChk *chunk.Chunk, hCtx *hashContext, joinResult *hashjoinWorkerResult, + selected []bool) (ok bool, _ *hashjoinWorkerResult) { + var err error + selected, err = expression.VectorizedFilter(e.ctx, e.outerSideFilter, chunk.NewIterator4Chunk(outerSideChk), selected) + if err != nil { + joinResult.err = err + return false, joinResult + } + + hCtx.initHash(outerSideChk.NumRows()) + for _, i := range hCtx.keyColIdx { + err = codec.HashChunkSelected(e.rowContainer.sc, hCtx.hashVals, outerSideChk, hCtx.allTypes[i], i, hCtx.buf, hCtx.hasNull, selected) + if err != nil { + joinResult.err = err + return false, joinResult + } + } + + for i := range selected { + if !selected[i] || hCtx.hasNull[i] { // process unmatched outer side rows + e.joiners[workerID].onMissMatch(outerSideChk.GetRow(i), joinResult.chk) + } else { // process matched outer side rows + outerKey, outerRow := hCtx.hashVals[i].Sum64(), outerSideChk.GetRow(i) + ok, joinResult = e.joinMatchedOuterSideRow2Chunk(workerID, outerKey, outerRow, hCtx, joinResult) + if !ok { + return false, joinResult + } + } + if joinResult.chk.IsFull() { + e.joinResultCh <- joinResult + ok, joinResult = e.getNewJoinResult(workerID) + if !ok { + return false, joinResult + } + } + } + return true, joinResult +} diff --git a/executor/join_test.go b/executor/join_test.go new file mode 100644 index 0000000..5da3013 --- /dev/null +++ b/executor/join_test.go @@ -0,0 +1,414 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor_test + +import ( + "context" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/util/testkit" +) + +type testSuiteJoin1 struct { + *baseTestSuite +} + +type testSuiteJoin2 struct { + *baseTestSuite +} + +type testSuiteJoin3 struct { + *baseTestSuite +} + +func (s *testSuite2) TestJoin(c *C) { + tk := testkit.NewTestKit(c, s.store) + + tk.MustExec("set @@tidb_index_lookup_join_concurrency = 200") + c.Assert(tk.Se.GetSessionVars().IndexLookupJoinConcurrency, Equals, 200) + + tk.MustExec("set @@tidb_index_lookup_join_concurrency = 4") + c.Assert(tk.Se.GetSessionVars().IndexLookupJoinConcurrency, Equals, 4) + + tk.MustExec("set @@tidb_index_lookup_size = 2") + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (c int)") + tk.MustExec("insert t values (1)") + tests := []struct { + sql string + result [][]interface{} + }{ + { + "select 1 from t as a left join t as b on 0", + testkit.Rows("1"), + }, + { + "select 1 from t as a join t as b on 1", + testkit.Rows("1"), + }, + } + for _, tt := range tests { + result := tk.MustQuery(tt.sql) + result.Check(tt.result) + } + + tk.MustExec("drop table if exists t") + tk.MustExec("drop table if exists t1") + tk.MustExec("create table t(c1 int, c2 int)") + tk.MustExec("create table t1(c1 int, c2 int)") + tk.MustExec("insert into t values(1,1),(2,2)") + tk.MustExec("insert into t1 values(2,3),(4,4)") + result := tk.MustQuery("select * from t left outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20") + result.Check(testkit.Rows("1 1 ")) + result = tk.MustQuery("select * from t1 right outer join t on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20") + result.Check(testkit.Rows(" 1 1")) + result = tk.MustQuery("select * from t right outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20") + result.Check(testkit.Rows()) + result = tk.MustQuery("select * from t left outer join t1 on t.c1 = t1.c1 where t1.c1 = 3 or false") + result.Check(testkit.Rows()) + result = tk.MustQuery("select * from t left outer join t1 on t.c1 = t1.c1 and t.c1 != 1 order by t1.c1") + result.Check(testkit.Rows("1 1 ", "2 2 2 3")) + + tk.MustExec("drop table if exists t1") + tk.MustExec("drop table if exists t2") + tk.MustExec("drop table if exists t3") + + tk.MustExec("create table t1 (c1 int, c2 int)") + tk.MustExec("create table t2 (c1 int, c2 int)") + tk.MustExec("create table t3 (c1 int, c2 int)") + + tk.MustExec("insert into t1 values (1,1), (2,2), (3,3)") + tk.MustExec("insert into t2 values (1,1), (3,3), (5,5)") + tk.MustExec("insert into t3 values (1,1), (5,5), (9,9)") + + result = tk.MustQuery("select * from t1 left join t2 on t1.c1 = t2.c1 right join t3 on t2.c1 = t3.c1 order by t1.c1, t1.c2, t2.c1, t2.c2, t3.c1, t3.c2;") + result.Check(testkit.Rows(" 5 5", " 9 9", "1 1 1 1 1 1")) + + tk.MustExec("drop table if exists t1") + tk.MustExec("create table t1 (c1 int)") + tk.MustExec("insert into t1 values (1), (1), (1)") + result = tk.MustQuery("select * from t1 a join t1 b on a.c1 = b.c1;") + result.Check(testkit.Rows("1 1", "1 1", "1 1", "1 1", "1 1", "1 1", "1 1", "1 1", "1 1")) + + tk.MustExec("drop table if exists t") + tk.MustExec("drop table if exists t1") + tk.MustExec("create table t(c1 int, index k(c1))") + tk.MustExec("create table t1(c1 int)") + tk.MustExec("insert into t values (1),(2),(3),(4),(5),(6),(7)") + tk.MustExec("insert into t1 values (1),(2),(3),(4),(5),(6),(7)") + result = tk.MustQuery("select a.c1 from t a , t1 b where a.c1 = b.c1 order by a.c1;") + result.Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7")) + // Test race. + result = tk.MustQuery("select a.c1 from t a , t1 b where a.c1 = b.c1 and a.c1 + b.c1 > 5 order by b.c1") + result.Check(testkit.Rows("3", "4", "5", "6", "7")) + + tk.MustExec("drop table if exists t,t2,t1") + tk.MustExec("create table t(c1 int)") + tk.MustExec("create table t1(c1 int, c2 int)") + tk.MustExec("create table t2(c1 int, c2 int)") + tk.MustExec("insert into t1 values(1,2),(2,3),(3,4)") + tk.MustExec("insert into t2 values(1,0),(2,0),(3,0)") + tk.MustExec("insert into t values(1),(2),(3)") + result = tk.MustQuery("select * from t1 , t2 where t2.c1 = t1.c1 and t2.c2 = 0 and t1.c1 = 1 order by t1.c2 limit 1") + result.Sort().Check(testkit.Rows("1 2 1 0")) + tk.MustExec("drop table if exists t, t1") + tk.MustExec("create table t(a int primary key, b int)") + tk.MustExec("create table t1(a int, b int, key s(b))") + tk.MustExec("insert into t values(1, 1), (2, 2), (3, 3)") + tk.MustExec("insert into t1 values(1, 2), (1, 3), (1, 4), (3, 4), (4, 5)") + + // The physical plans of the two sql are tested at physical_plan_test.go + tk.MustQuery("select /*+ INL_JOIN(t, t1) */ * from t join t1 on t.a=t1.a").Check(testkit.Rows("1 1 1 2", "1 1 1 3", "1 1 1 4", "3 3 3 4")) + tk.MustQuery("select /*+ INL_HASH_JOIN(t, t1) */ * from t join t1 on t.a=t1.a").Check(testkit.Rows("1 1 1 2", "1 1 1 3", "1 1 1 4", "3 3 3 4")) + tk.MustQuery("select /*+ INL_MERGE_JOIN(t, t1) */ * from t join t1 on t.a=t1.a").Check(testkit.Rows("1 1 1 2", "1 1 1 3", "1 1 1 4", "3 3 3 4")) + tk.MustQuery("select /*+ INL_JOIN(t) */ * from t1 join t on t.a=t1.a and t.a < t1.b").Check(testkit.Rows("1 2 1 1", "1 3 1 1", "1 4 1 1", "3 4 3 3")) + tk.MustQuery("select /*+ INL_HASH_JOIN(t) */ * from t1 join t on t.a=t1.a and t.a < t1.b").Check(testkit.Rows("1 2 1 1", "1 3 1 1", "1 4 1 1", "3 4 3 3")) + tk.MustQuery("select /*+ INL_MERGE_JOIN(t) */ * from t1 join t on t.a=t1.a and t.a < t1.b").Check(testkit.Rows("1 2 1 1", "1 3 1 1", "1 4 1 1", "3 4 3 3")) + // Test single index reader. + tk.MustQuery("select /*+ INL_JOIN(t, t1) */ t1.b from t1 join t on t.b=t1.b").Check(testkit.Rows("2", "3")) + tk.MustQuery("select /*+ INL_HASH_JOIN(t, t1) */ t1.b from t1 join t on t.b=t1.b").Check(testkit.Rows("2", "3")) + tk.MustQuery("select /*+ INL_MERGE_JOIN(t, t1) */ t1.b from t1 join t on t.b=t1.b").Check(testkit.Rows("2", "3")) + tk.MustQuery("select /*+ INL_JOIN(t1) */ * from t right outer join t1 on t.a=t1.a").Check(testkit.Rows("1 1 1 2", "1 1 1 3", "1 1 1 4", "3 3 3 4", " 4 5")) + tk.MustQuery("select /*+ INL_HASH_JOIN(t1) */ * from t right outer join t1 on t.a=t1.a").Check(testkit.Rows("1 1 1 2", "1 1 1 3", "1 1 1 4", "3 3 3 4", " 4 5")) + tk.MustQuery("select /*+ INL_MERGE_JOIN(t1) */ * from t right outer join t1 on t.a=t1.a").Check(testkit.Rows("1 1 1 2", "1 1 1 3", "1 1 1 4", "3 3 3 4", " 4 5")) + tk.MustExec("drop table if exists t, t1") + tk.MustExec("create table t(a int primary key, b int, key s(b))") + tk.MustExec("create table t1(a int, b int)") + tk.MustExec("insert into t values(1, 3), (2, 2), (3, 1)") + tk.MustExec("insert into t1 values(0, 0), (1, 2), (1, 3), (3, 4)") + tk.MustQuery("select /*+ INL_JOIN(t1) */ * from t join t1 on t.a=t1.a order by t.b").Sort().Check(testkit.Rows("1 3 1 2", "1 3 1 3", "3 1 3 4")) + tk.MustQuery("select /*+ INL_HASH_JOIN(t1) */ * from t join t1 on t.a=t1.a order by t.b").Sort().Check(testkit.Rows("1 3 1 2", "1 3 1 3", "3 1 3 4")) + tk.MustQuery("select /*+ INL_MERGE_JOIN(t1) */ * from t join t1 on t.a=t1.a order by t.b").Sort().Check(testkit.Rows("1 3 1 2", "1 3 1 3", "3 1 3 4")) + tk.MustQuery("select /*+ INL_JOIN(t) */ t.a, t.b from t join t1 on t.a=t1.a where t1.b = 4 limit 1").Check(testkit.Rows("3 1")) + tk.MustQuery("select /*+ INL_HASH_JOIN(t) */ t.a, t.b from t join t1 on t.a=t1.a where t1.b = 4 limit 1").Check(testkit.Rows("3 1")) + tk.MustQuery("select /*+ INL_MERGE_JOIN(t) */ t.a, t.b from t join t1 on t.a=t1.a where t1.b = 4 limit 1").Check(testkit.Rows("3 1")) + tk.MustQuery("select /*+ INL_JOIN(t, t1) */ * from t right join t1 on t.a=t1.a order by t.b").Sort().Check(testkit.Rows("1 3 1 2", "1 3 1 3", "3 1 3 4", " 0 0")) + tk.MustQuery("select /*+ INL_HASH_JOIN(t, t1) */ * from t right join t1 on t.a=t1.a order by t.b").Sort().Check(testkit.Rows("1 3 1 2", "1 3 1 3", "3 1 3 4", " 0 0")) + tk.MustQuery("select /*+ INL_MERGE_JOIN(t, t1) */ * from t right join t1 on t.a=t1.a order by t.b").Sort().Check(testkit.Rows("1 3 1 2", "1 3 1 3", "3 1 3 4", " 0 0")) + + // test index join bug + tk.MustExec("drop table if exists t, t1") + tk.MustExec("create table t(a int, b int, key s1(a,b), key s2(b))") + tk.MustExec("create table t1(a int)") + tk.MustExec("insert into t values(1,2), (5,3), (6,4)") + tk.MustExec("insert into t1 values(1), (2), (3)") + tk.MustQuery("select /*+ INL_JOIN(t) */ t1.a from t1, t where t.a = 5 and t.b = t1.a").Check(testkit.Rows("3")) + tk.MustQuery("select /*+ INL_HASH_JOIN(t) */ t1.a from t1, t where t.a = 5 and t.b = t1.a").Check(testkit.Rows("3")) + tk.MustQuery("select /*+ INL_MERGE_JOIN(t) */ t1.a from t1, t where t.a = 5 and t.b = t1.a").Check(testkit.Rows("3")) + + // This case is for testing: + // when the main thread calls Executor.Close() while the out data fetch worker and join workers are still working, + // we need to stop the goroutines as soon as possible to avoid unexpected error. + tk.MustExec("set @@tidb_hash_join_concurrency=5") + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t(a int)") + for i := 0; i < 100; i++ { + tk.MustExec("insert into t value(1)") + } + result = tk.MustQuery("select /*+ TIDB_HJ(s, r) */ * from t as s join t as r on s.a = r.a limit 1;") + result.Check(testkit.Rows("1 1")) + + tk.MustExec("drop table if exists user, aa, bb") + tk.MustExec("create table aa(id int)") + tk.MustExec("insert into aa values(1)") + tk.MustExec("create table bb(id int)") + tk.MustExec("insert into bb values(1)") + tk.MustExec("create table user(id int, name varchar(20))") + tk.MustExec("insert into user values(1, 'a'), (2, 'b')") + tk.MustQuery("select user.id,user.name from user left join aa on aa.id = user.id left join bb on aa.id = bb.id where bb.id < 10;").Check(testkit.Rows("1 a")) + + tk.MustExec("drop table if exists t1, t2, t3, t4") + tk.MustExec("create table t1(a int, b int)") + tk.MustExec("create table t2(a int, b int)") + tk.MustExec("create table t3(a int, b int)") + tk.MustExec("create table t4(a int, b int)") + tk.MustExec("insert into t1 values(1, 1)") + tk.MustExec("insert into t2 values(1, 1)") + tk.MustExec("insert into t3 values(1, 1)") + tk.MustExec("insert into t4 values(1, 1)") + tk.MustQuery("select min(t2.b) from t1 right join t2 on t2.a=t1.a right join t3 on t2.a=t3.a left join t4 on t3.a=t4.a").Check(testkit.Rows("1")) +} + +func (s *testSuiteJoin3) TestMultiJoin(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("create table t35(a35 int primary key, b35 int, x35 int)") + tk.MustExec("create table t40(a40 int primary key, b40 int, x40 int)") + tk.MustExec("create table t14(a14 int primary key, b14 int, x14 int)") + tk.MustExec("create table t42(a42 int primary key, b42 int, x42 int)") + tk.MustExec("create table t15(a15 int primary key, b15 int, x15 int)") + tk.MustExec("create table t7(a7 int primary key, b7 int, x7 int)") + tk.MustExec("create table t64(a64 int primary key, b64 int, x64 int)") + tk.MustExec("create table t19(a19 int primary key, b19 int, x19 int)") + tk.MustExec("create table t9(a9 int primary key, b9 int, x9 int)") + tk.MustExec("create table t8(a8 int primary key, b8 int, x8 int)") + tk.MustExec("create table t57(a57 int primary key, b57 int, x57 int)") + tk.MustExec("create table t37(a37 int primary key, b37 int, x37 int)") + tk.MustExec("create table t44(a44 int primary key, b44 int, x44 int)") + tk.MustExec("create table t38(a38 int primary key, b38 int, x38 int)") + tk.MustExec("create table t18(a18 int primary key, b18 int, x18 int)") + tk.MustExec("create table t62(a62 int primary key, b62 int, x62 int)") + tk.MustExec("create table t4(a4 int primary key, b4 int, x4 int)") + tk.MustExec("create table t48(a48 int primary key, b48 int, x48 int)") + tk.MustExec("create table t31(a31 int primary key, b31 int, x31 int)") + tk.MustExec("create table t16(a16 int primary key, b16 int, x16 int)") + tk.MustExec("create table t12(a12 int primary key, b12 int, x12 int)") + tk.MustExec("insert into t35 values(1,1,1)") + tk.MustExec("insert into t40 values(1,1,1)") + tk.MustExec("insert into t14 values(1,1,1)") + tk.MustExec("insert into t42 values(1,1,1)") + tk.MustExec("insert into t15 values(1,1,1)") + tk.MustExec("insert into t7 values(1,1,1)") + tk.MustExec("insert into t64 values(1,1,1)") + tk.MustExec("insert into t19 values(1,1,1)") + tk.MustExec("insert into t9 values(1,1,1)") + tk.MustExec("insert into t8 values(1,1,1)") + tk.MustExec("insert into t57 values(1,1,1)") + tk.MustExec("insert into t37 values(1,1,1)") + tk.MustExec("insert into t44 values(1,1,1)") + tk.MustExec("insert into t38 values(1,1,1)") + tk.MustExec("insert into t18 values(1,1,1)") + tk.MustExec("insert into t62 values(1,1,1)") + tk.MustExec("insert into t4 values(1,1,1)") + tk.MustExec("insert into t48 values(1,1,1)") + tk.MustExec("insert into t31 values(1,1,1)") + tk.MustExec("insert into t16 values(1,1,1)") + tk.MustExec("insert into t12 values(1,1,1)") + tk.MustExec("insert into t35 values(7,7,7)") + tk.MustExec("insert into t40 values(7,7,7)") + tk.MustExec("insert into t14 values(7,7,7)") + tk.MustExec("insert into t42 values(7,7,7)") + tk.MustExec("insert into t15 values(7,7,7)") + tk.MustExec("insert into t7 values(7,7,7)") + tk.MustExec("insert into t64 values(7,7,7)") + tk.MustExec("insert into t19 values(7,7,7)") + tk.MustExec("insert into t9 values(7,7,7)") + tk.MustExec("insert into t8 values(7,7,7)") + tk.MustExec("insert into t57 values(7,7,7)") + tk.MustExec("insert into t37 values(7,7,7)") + tk.MustExec("insert into t44 values(7,7,7)") + tk.MustExec("insert into t38 values(7,7,7)") + tk.MustExec("insert into t18 values(7,7,7)") + tk.MustExec("insert into t62 values(7,7,7)") + tk.MustExec("insert into t4 values(7,7,7)") + tk.MustExec("insert into t48 values(7,7,7)") + tk.MustExec("insert into t31 values(7,7,7)") + tk.MustExec("insert into t16 values(7,7,7)") + tk.MustExec("insert into t12 values(7,7,7)") + result := tk.MustQuery(`SELECT x4,x8,x38,x44,x31,x9,x57,x48,x19,x40,x14,x12,x7,x64,x37,x18,x62,x35,x42,x15,x16 FROM +t35,t40,t14,t42,t15,t7,t64,t19,t9,t8,t57,t37,t44,t38,t18,t62,t4,t48,t31,t16,t12 +WHERE b48=a57 +AND a4=b19 +AND a14=b16 +AND b37=a48 +AND a40=b42 +AND a31=7 +AND a15=b40 +AND a38=b8 +AND b15=a31 +AND b64=a18 +AND b12=a44 +AND b7=a8 +AND b35=a16 +AND a12=b14 +AND a64=b57 +AND b62=a7 +AND a35=b38 +AND b9=a19 +AND a62=b18 +AND b4=a37 +AND b44=a42`) + result.Check(testkit.Rows("7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7")) +} + +func (s *testSuiteJoin1) TestJoinLeak(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("set @@tidb_hash_join_concurrency=1") + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (d int)") + tk.MustExec("begin") + for i := 0; i < 1002; i++ { + tk.MustExec("insert t values (1)") + } + tk.MustExec("commit") + result, err := tk.Exec("select * from t t1 left join (select 1) t2 on 1") + c.Assert(err, IsNil) + req := result.NewChunk() + err = result.Next(context.Background(), req) + c.Assert(err, IsNil) + time.Sleep(time.Millisecond) + result.Close() + + tk.MustExec("set @@tidb_hash_join_concurrency=5") +} + +func (s *testSuiteJoin1) TestHashJoinExecEncodeDecodeRow(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t1") + tk.MustExec("drop table if exists t2") + tk.MustExec("create table t1 (id int)") + tk.MustExec("create table t2 (id int, name varchar(255), ts varchar(60))") + tk.MustExec("insert into t1 values (1)") + tk.MustExec("insert into t2 values (1, 'xxx', '2003-06-09 10:51:26')") + result := tk.MustQuery("select ts from t1 inner join t2 where t2.name = 'xxx'") + result.Check(testkit.Rows("2003-06-09 10:51:26")) +} + +func (s *testSuiteJoin1) TestIssue5255(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t1, t2") + tk.MustExec("create table t1(a int, b varchar(64), c float, primary key(a, b))") + tk.MustExec("create table t2(a int primary key)") + tk.MustExec("insert into t1 values(1, '2017-11-29', 2.2)") + tk.MustExec("insert into t2 values(1)") + tk.MustQuery("select /*+ INL_JOIN(t1) */ * from t1 join t2 on t1.a=t2.a").Check(testkit.Rows("1 2017-11-29 2.2 1")) + tk.MustQuery("select /*+ INL_HASH_JOIN(t1) */ * from t1 join t2 on t1.a=t2.a").Check(testkit.Rows("1 2017-11-29 2.2 1")) + tk.MustQuery("select /*+ INL_MERGE_JOIN(t1) */ * from t1 join t2 on t1.a=t2.a").Check(testkit.Rows("1 2017-11-29 2.2 1")) +} + +func (s *testSuiteJoin1) TestIssue5278(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t, tt") + tk.MustExec("create table t(a int, b int)") + tk.MustExec("create table tt(a varchar(10), b int)") + tk.MustExec("insert into t values(1, 1)") + tk.MustQuery("select * from t left join tt on t.a=tt.a left join t ttt on t.a=ttt.a").Check(testkit.Rows("1 1 1 1")) +} + +func (s *testSuiteJoin3) TestMergejoinOrder(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t1, t2;") + tk.MustExec("create table t1(a bigint primary key, b bigint);") + tk.MustExec("create table t2(a bigint primary key, b bigint);") + tk.MustExec("insert into t1 values(1, 100), (2, 100), (3, 100), (4, 100), (5, 100);") + tk.MustExec("insert into t2 select a*100, b*100 from t1;") + + tk.MustQuery("explain select /*+ TIDB_SMJ(t2) */ * from t1 left outer join t2 on t1.a=t2.a and t1.a!=3 order by t1.a;").Check(testkit.Rows( + "MergeJoin_20 10000.00 root left outer join, left key:test.t1.a, right key:test.t2.a, left cond:[ne(test.t1.a, 3)]", + "├─TableReader_12 10000.00 root data:TableScan_11", + "│ └─TableScan_11 10000.00 cop table:t1, range:[-inf,+inf], keep order:true, stats:pseudo", + "└─TableReader_14 6666.67 root data:TableScan_13", + " └─TableScan_13 6666.67 cop table:t2, range:[-inf,3), (3,+inf], keep order:true, stats:pseudo", + )) + + tk.MustExec("set @@tidb_init_chunk_size=1") + tk.MustQuery("select /*+ TIDB_SMJ(t2) */ * from t1 left outer join t2 on t1.a=t2.a and t1.a!=3 order by t1.a;").Check(testkit.Rows( + "1 100 ", + "2 100 ", + "3 100 ", + "4 100 ", + "5 100 ", + )) + + tk.MustExec(`drop table if exists t;`) + tk.MustExec(`create table t(a bigint, b bigint, index idx_1(a,b));`) + tk.MustExec(`insert into t values(1, 1), (1, 2), (2, 1), (2, 2);`) + tk.MustQuery(`select /*+ TIDB_SMJ(t1, t2) */ * from t t1 join t t2 on t1.b = t2.b and t1.a=t2.a;`).Check(testkit.Rows( + `1 1 1 1`, + `1 2 1 2`, + `2 1 2 1`, + `2 2 2 2`, + )) +} + +func (s *testSuiteJoin1) TestEmbeddedOuterJoin(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t1, t2") + tk.MustExec("create table t1(a int, b int)") + tk.MustExec("create table t2(a int, b int)") + tk.MustExec("insert into t1 values(1, 1)") + tk.MustQuery("select * from (t1 left join t2 on t1.a = t2.a) left join (t2 t3 left join t2 t4 on t3.a = t4.a) on t2.b = 1"). + Check(testkit.Rows("1 1 ")) +} + +func (s *testSuiteJoin1) TestInjectProjOnTopN(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t1") + tk.MustExec("drop table if exists t2") + tk.MustExec("create table t1(a bigint, b bigint)") + tk.MustExec("create table t2(a bigint, b bigint)") + tk.MustExec("insert into t1 values(1, 1)") + tk.MustQuery("select t1.a+t1.b as result from t1 left join t2 on 1 = 0 order by result limit 20;").Check(testkit.Rows( + "2", + )) +} diff --git a/executor/joiner.go b/executor/joiner.go new file mode 100644 index 0000000..f0250d5 --- /dev/null +++ b/executor/joiner.go @@ -0,0 +1,410 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "github.com/pingcap/tidb/expression" + plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" +) + +var ( + _ joiner = &leftOuterJoiner{} + _ joiner = &rightOuterJoiner{} + _ joiner = &innerJoiner{} +) + +// joiner is used to generate join results according to the join type. +// A typical instruction flow is: +// +// hasMatch, hasNull := false, false +// for innerIter.Current() != innerIter.End() { +// matched, isNull, err := j.tryToMatchInners(outer, innerIter, chk) +// // handle err +// hasMatch = hasMatch || matched +// hasNull = hasNull || isNull +// } +// if !hasMatch { +// j.onMissMatch(hasNull, outer, chk) +// } +// +// NOTE: This interface is **not** thread-safe. +type joiner interface { + // tryToMatchInners tries to join an outer row with a batch of inner rows. When + // 'inners.Len != 0' but all the joined rows are filtered, the outer row is + // considered unmatched. Otherwise, the outer row is matched and some joined + // rows are appended to `chk`. The size of `chk` is limited to MaxChunkSize. + // Note that when the outer row is considered unmatched, we need to differentiate + // whether the join conditions return null or false, because that matters for + // AntiSemiJoin/LeftOuterSemiJoin/AntiLeftOuterSemiJoin, and the result is reflected + // by the second return value; for other join types, we always return false. + // + // NOTE: Callers need to call this function multiple times to consume all + // the inner rows for an outer row, and decide whether the outer row can be + // matched with at lease one inner row. + tryToMatchInners(outer chunk.Row, inners chunk.Iterator, chk *chunk.Chunk) (matched bool, isNull bool, err error) + + // tryToMatchOuters tries to join a batch of outer rows with one inner row. + // It's used when the join is an outer join and the hash table is built + // using the outer side. + tryToMatchOuters(outer chunk.Iterator, inner chunk.Row, chk *chunk.Chunk, outerRowStatus []outerRowStatusFlag) (_ []outerRowStatusFlag, err error) + + // onMissMatch operates on the unmatched outer row according to the join + // type. An outer row can be considered miss matched if: + // 1. it can not pass the filter on the outer table side. + // 2. there is no inner row with the same join key. + // 3. all the joined rows can not pass the filter on the join result. + // + // On these conditions, the caller calls this function to handle the + // unmatched outer rows according to the current join type: + // 1. 'LeftOuterJoin': concats the unmatched outer row with a row of NULLs + // and appends it to the result buffer. + // 2. 'RightOuterJoin': concats the unmatched outer row with a row of NULLs + // and appends it to the result buffer. + // 3. 'InnerJoin': ignores the unmatched outer row. + onMissMatch(outer chunk.Row, chk *chunk.Chunk) + + // Clone deep copies a joiner. + Clone() joiner +} + +func newJoiner(ctx sessionctx.Context, joinType plannercore.JoinType, + outerIsRight bool, defaultInner []types.Datum, filter []expression.Expression, + lhsColTypes, rhsColTypes []*types.FieldType) joiner { + base := baseJoiner{ + ctx: ctx, + conditions: filter, + outerIsRight: outerIsRight, + maxChunkSize: ctx.GetSessionVars().MaxChunkSize, + } + colTypes := make([]*types.FieldType, 0, len(lhsColTypes)+len(rhsColTypes)) + colTypes = append(colTypes, lhsColTypes...) + colTypes = append(colTypes, rhsColTypes...) + base.selected = make([]bool, 0, chunk.InitialCapacity) + base.isNull = make([]bool, 0, chunk.InitialCapacity) + if joinType == plannercore.LeftOuterJoin || joinType == plannercore.RightOuterJoin { + innerColTypes := lhsColTypes + if !outerIsRight { + innerColTypes = rhsColTypes + } + base.initDefaultInner(innerColTypes, defaultInner) + } + switch joinType { + case plannercore.LeftOuterJoin: + base.chk = chunk.NewChunkWithCapacity(colTypes, ctx.GetSessionVars().MaxChunkSize) + return &leftOuterJoiner{base} + case plannercore.RightOuterJoin: + base.chk = chunk.NewChunkWithCapacity(colTypes, ctx.GetSessionVars().MaxChunkSize) + return &rightOuterJoiner{base} + case plannercore.InnerJoin: + base.chk = chunk.NewChunkWithCapacity(colTypes, ctx.GetSessionVars().MaxChunkSize) + return &innerJoiner{base} + } + panic("unsupported join type in func newJoiner()") +} + +type outerRowStatusFlag byte + +const ( + outerRowUnmatched outerRowStatusFlag = iota + outerRowMatched + outerRowHasNull +) + +type baseJoiner struct { + ctx sessionctx.Context + conditions []expression.Expression + defaultInner chunk.Row + outerIsRight bool + chk *chunk.Chunk + shallowRow chunk.MutRow + selected []bool + isNull []bool + maxChunkSize int +} + +func (j *baseJoiner) initDefaultInner(innerTypes []*types.FieldType, defaultInner []types.Datum) { + mutableRow := chunk.MutRowFromTypes(innerTypes) + mutableRow.SetDatums(defaultInner[:len(innerTypes)]...) + j.defaultInner = mutableRow.ToRow() +} + +func (j *baseJoiner) makeJoinRowToChunk(chk *chunk.Chunk, lhs, rhs chunk.Row) { + // Call AppendRow() first to increment the virtual rows. + // Fix: https://github.com/pingcap/tidb/issues/5771 + chk.AppendRow(lhs) + chk.AppendPartialRow(lhs.Len(), rhs) +} + +// filter is used to filter the result constructed by tryToMatchInners, the result is +// built by one outer row and multiple inner rows. The returned bool value +// indicates whether the outer row matches any inner rows. +func (j *baseJoiner) filter(input, output *chunk.Chunk, outerColsLen int) (bool, error) { + var err error + j.selected, err = expression.VectorizedFilter(j.ctx, j.conditions, chunk.NewIterator4Chunk(input), j.selected) + if err != nil { + return false, err + } + // Batch copies selected rows to output chunk. + innerColOffset, outerColOffset := 0, input.NumCols()-outerColsLen + if !j.outerIsRight { + innerColOffset, outerColOffset = outerColsLen, 0 + } + return chunk.CopySelectedJoinRows(input, innerColOffset, outerColOffset, j.selected, output) +} + +// filterAndCheckOuterRowStatus is used to filter the result constructed by +// tryToMatchOuters, the result is built by multiple outer rows and one inner +// row. The returned outerRowStatusFlag slice value indicates the status of +// each outer row (matched/unmatched/hasNull). +func (j *baseJoiner) filterAndCheckOuterRowStatus(input, output *chunk.Chunk, innerColsLen int, outerRowStatus []outerRowStatusFlag) (_ []outerRowStatusFlag, _ error) { + var err error + j.selected, j.isNull, err = expression.VectorizedFilterConsiderNull(j.ctx, j.conditions, chunk.NewIterator4Chunk(input), j.selected, j.isNull) + if err != nil { + return nil, err + } + for i := 0; i < len(j.selected); i++ { + if j.isNull[i] { + outerRowStatus[i] = outerRowHasNull + } else if !j.selected[i] { + outerRowStatus[i] = outerRowUnmatched + } + } + + // Batch copies selected rows to output chunk. + innerColOffset, outerColOffset := 0, innerColsLen + if !j.outerIsRight { + innerColOffset, outerColOffset = input.NumCols()-innerColsLen, 0 + } + + _, err = chunk.CopySelectedJoinRows(input, innerColOffset, outerColOffset, j.selected, output) + return outerRowStatus, err +} + +func (j *baseJoiner) Clone() baseJoiner { + base := baseJoiner{ + ctx: j.ctx, + conditions: make([]expression.Expression, 0, len(j.conditions)), + outerIsRight: j.outerIsRight, + maxChunkSize: j.maxChunkSize, + selected: make([]bool, 0, len(j.selected)), + isNull: make([]bool, 0, len(j.isNull)), + } + for _, con := range j.conditions { + base.conditions = append(base.conditions, con.Clone()) + } + if j.chk != nil { + base.chk = j.chk.CopyConstruct() + } else { + base.shallowRow = chunk.MutRow(j.shallowRow.ToRow()) + } + if !j.defaultInner.IsEmpty() { + base.defaultInner = j.defaultInner.CopyConstruct() + } + return base +} + +type leftOuterJoiner struct { + baseJoiner +} + +// tryToMatchInners implements joiner interface. +func (j *leftOuterJoiner) tryToMatchInners(outer chunk.Row, inners chunk.Iterator, chk *chunk.Chunk) (matched bool, hasNull bool, err error) { + if inners.Len() == 0 { + return false, false, nil + } + j.chk.Reset() + chkForJoin := j.chk + if len(j.conditions) == 0 { + chkForJoin = chk + } + + numToAppend := chk.RequiredRows() - chk.NumRows() + for ; inners.Current() != inners.End() && numToAppend > 0; numToAppend-- { + j.makeJoinRowToChunk(chkForJoin, outer, inners.Current()) + inners.Next() + } + if len(j.conditions) == 0 { + return true, false, nil + } + + // reach here, chkForJoin is j.chk + matched, err = j.filter(chkForJoin, chk, outer.Len()) + if err != nil { + return false, false, err + } + return matched, false, nil +} + +func (j *leftOuterJoiner) tryToMatchOuters(outers chunk.Iterator, inner chunk.Row, chk *chunk.Chunk, outerRowStatus []outerRowStatusFlag) (_ []outerRowStatusFlag, err error) { + j.chk.Reset() + chkForJoin := j.chk + if len(j.conditions) == 0 { + chkForJoin = chk + } + + outer, numToAppend, cursor := outers.Current(), chk.RequiredRows()-chk.NumRows(), 0 + for ; outer != outers.End() && cursor < numToAppend; outer, cursor = outers.Next(), cursor+1 { + j.makeJoinRowToChunk(chkForJoin, outer, inner) + } + outerRowStatus = outerRowStatus[:0] + for i := 0; i < cursor; i++ { + outerRowStatus = append(outerRowStatus, outerRowMatched) + } + if len(j.conditions) == 0 { + return outerRowStatus, nil + } + // reach here, chkForJoin is j.chk + return j.filterAndCheckOuterRowStatus(chkForJoin, chk, inner.Len(), outerRowStatus) +} + +func (j *leftOuterJoiner) onMissMatch(outer chunk.Row, chk *chunk.Chunk) { + chk.AppendPartialRow(0, outer) + chk.AppendPartialRow(outer.Len(), j.defaultInner) +} + +func (j *leftOuterJoiner) Clone() joiner { + return &leftOuterJoiner{baseJoiner: j.baseJoiner.Clone()} +} + +type rightOuterJoiner struct { + baseJoiner +} + +// tryToMatchInners implements joiner interface. +func (j *rightOuterJoiner) tryToMatchInners(outer chunk.Row, inners chunk.Iterator, chk *chunk.Chunk) (matched bool, hasNull bool, err error) { + if inners.Len() == 0 { + return false, false, nil + } + + j.chk.Reset() + chkForJoin := j.chk + if len(j.conditions) == 0 { + chkForJoin = chk + } + + numToAppend := chk.RequiredRows() - chk.NumRows() + for ; inners.Current() != inners.End() && numToAppend > 0; numToAppend-- { + j.makeJoinRowToChunk(chkForJoin, inners.Current(), outer) + inners.Next() + } + if len(j.conditions) == 0 { + return true, false, nil + } + + matched, err = j.filter(chkForJoin, chk, outer.Len()) + if err != nil { + return false, false, err + } + return matched, false, nil +} + +func (j *rightOuterJoiner) tryToMatchOuters(outers chunk.Iterator, inner chunk.Row, chk *chunk.Chunk, outerRowStatus []outerRowStatusFlag) (_ []outerRowStatusFlag, err error) { + j.chk.Reset() + chkForJoin := j.chk + if len(j.conditions) == 0 { + chkForJoin = chk + } + + outer, numToAppend, cursor := outers.Current(), chk.RequiredRows()-chk.NumRows(), 0 + for ; outer != outers.End() && cursor < numToAppend; outer, cursor = outers.Next(), cursor+1 { + j.makeJoinRowToChunk(chkForJoin, inner, outer) + } + outerRowStatus = outerRowStatus[:0] + for i := 0; i < cursor; i++ { + outerRowStatus = append(outerRowStatus, outerRowMatched) + } + if len(j.conditions) == 0 { + return outerRowStatus, nil + } + // reach here, chkForJoin is j.chk + return j.filterAndCheckOuterRowStatus(chkForJoin, chk, inner.Len(), outerRowStatus) +} + +func (j *rightOuterJoiner) onMissMatch(outer chunk.Row, chk *chunk.Chunk) { + chk.AppendPartialRow(0, j.defaultInner) + chk.AppendPartialRow(j.defaultInner.Len(), outer) +} + +func (j *rightOuterJoiner) Clone() joiner { + return &rightOuterJoiner{baseJoiner: j.baseJoiner.Clone()} +} + +type innerJoiner struct { + baseJoiner +} + +// tryToMatchInners implements joiner interface. +func (j *innerJoiner) tryToMatchInners(outer chunk.Row, inners chunk.Iterator, chk *chunk.Chunk) (matched bool, hasNull bool, err error) { + if inners.Len() == 0 { + return false, false, nil + } + j.chk.Reset() + chkForJoin := j.chk + if len(j.conditions) == 0 { + chkForJoin = chk + } + inner, numToAppend := inners.Current(), chk.RequiredRows()-chk.NumRows() + for ; inner != inners.End() && numToAppend > 0; inner, numToAppend = inners.Next(), numToAppend-1 { + if j.outerIsRight { + j.makeJoinRowToChunk(chkForJoin, inner, outer) + } else { + j.makeJoinRowToChunk(chkForJoin, outer, inner) + } + } + if len(j.conditions) == 0 { + return true, false, nil + } + + // reach here, chkForJoin is j.chk + matched, err = j.filter(chkForJoin, chk, outer.Len()) + if err != nil { + return false, false, err + } + return matched, false, nil +} + +func (j *innerJoiner) tryToMatchOuters(outers chunk.Iterator, inner chunk.Row, chk *chunk.Chunk, outerRowStatus []outerRowStatusFlag) (_ []outerRowStatusFlag, err error) { + j.chk.Reset() + chkForJoin := j.chk + if len(j.conditions) == 0 { + chkForJoin = chk + } + outer, numToAppend, cursor := outers.Current(), chk.RequiredRows()-chk.NumRows(), 0 + for ; outer != outers.End() && cursor < numToAppend; outer, cursor = outers.Next(), cursor+1 { + if j.outerIsRight { + j.makeJoinRowToChunk(chkForJoin, inner, outer) + } else { + j.makeJoinRowToChunk(chkForJoin, outer, inner) + } + } + outerRowStatus = outerRowStatus[:0] + for i := 0; i < cursor; i++ { + outerRowStatus = append(outerRowStatus, outerRowMatched) + } + if len(j.conditions) == 0 { + return outerRowStatus, nil + } + // reach here, chkForJoin is j.chk + return j.filterAndCheckOuterRowStatus(chkForJoin, chk, inner.Len(), outerRowStatus) +} + +func (j *innerJoiner) onMissMatch(outer chunk.Row, chk *chunk.Chunk) { +} + +func (j *innerJoiner) Clone() joiner { + return &innerJoiner{baseJoiner: j.baseJoiner.Clone()} +} diff --git a/executor/mem_reader.go b/executor/mem_reader.go new file mode 100644 index 0000000..76a0d1e --- /dev/null +++ b/executor/mem_reader.go @@ -0,0 +1,398 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "github.com/pingcap/errors" + "github.com/pingcap/tidb/distsql" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/rowcodec" +) + +type memIndexReader struct { + ctx sessionctx.Context + index *model.IndexInfo + table *model.TableInfo + kvRanges []kv.KeyRange + desc bool + conditions []expression.Expression + addedRows [][]types.Datum + addedRowsLen int + retFieldTypes []*types.FieldType + outputOffset []int + // belowHandleIndex is the handle's position of the below scan plan. + belowHandleIndex int +} + +func buildMemIndexReader(us *UnionScanExec, idxReader *IndexReaderExecutor) *memIndexReader { + kvRanges := idxReader.kvRanges + outputOffset := make([]int, 0, len(us.columns)) + for _, col := range idxReader.outputColumns { + outputOffset = append(outputOffset, col.Index) + } + return &memIndexReader{ + ctx: us.ctx, + index: idxReader.index, + table: idxReader.table.Meta(), + kvRanges: kvRanges, + desc: us.desc, + conditions: us.conditions, + addedRows: make([][]types.Datum, 0, len(us.dirty.addedRows)), + retFieldTypes: retTypes(us), + outputOffset: outputOffset, + belowHandleIndex: us.belowHandleIndex, + } +} + +func (m *memIndexReader) getMemRows() ([][]types.Datum, error) { + tps := make([]*types.FieldType, 0, len(m.index.Columns)+1) + cols := m.table.Columns + for _, col := range m.index.Columns { + tps = append(tps, &cols[col.Offset].FieldType) + } + if m.table.PKIsHandle { + for _, col := range m.table.Columns { + if mysql.HasPriKeyFlag(col.Flag) { + tps = append(tps, &col.FieldType) + break + } + } + } else { + // ExtraHandle Column tp. + tps = append(tps, types.NewFieldType(mysql.TypeLonglong)) + } + + mutableRow := chunk.MutRowFromTypes(m.retFieldTypes) + err := iterTxnMemBuffer(m.ctx, m.kvRanges, func(key, value []byte) error { + data, err := m.decodeIndexKeyValue(key, value, tps) + if err != nil { + return err + } + + mutableRow.SetDatums(data...) + matched, _, err := expression.EvalBool(m.ctx, m.conditions, mutableRow.ToRow()) + if err != nil || !matched { + return err + } + m.addedRows = append(m.addedRows, data) + return nil + }) + + if err != nil { + return nil, err + } + // TODO: After refine `IterReverse`, remove below logic and use `IterReverse` when do reverse scan. + if m.desc { + reverseDatumSlice(m.addedRows) + } + return m.addedRows, nil +} + +func (m *memIndexReader) decodeIndexKeyValue(key, value []byte, tps []*types.FieldType) ([]types.Datum, error) { + pkStatus := tablecodec.PrimaryKeyIsSigned + if mysql.HasUnsignedFlag(tps[len(tps)-1].Flag) { + pkStatus = tablecodec.PrimaryKeyIsUnsigned + } + values, err := tablecodec.DecodeIndexKV(key, value, len(m.index.Columns), pkStatus) + if err != nil { + return nil, errors.Trace(err) + } + + ds := make([]types.Datum, 0, len(m.outputOffset)) + for _, offset := range m.outputOffset { + d, err := tablecodec.DecodeColumnValue(values[offset], tps[offset], m.ctx.GetSessionVars().TimeZone) + if err != nil { + return nil, err + } + ds = append(ds, d) + } + return ds, nil +} + +type memTableReader struct { + ctx sessionctx.Context + table *model.TableInfo + columns []*model.ColumnInfo + kvRanges []kv.KeyRange + desc bool + conditions []expression.Expression + addedRows [][]types.Datum + retFieldTypes []*types.FieldType + colIDs map[int64]int + buffer allocBuf +} + +type allocBuf struct { + // cache for decode handle. // cache for decode handle. + handleBytes []byte + rd *rowcodec.BytesDecoder +} + +func buildMemTableReader(us *UnionScanExec, tblReader *TableReaderExecutor) *memTableReader { + colIDs := make(map[int64]int) + for i, col := range us.columns { + colIDs[col.ID] = i + } + + colInfo := make([]rowcodec.ColInfo, 0, len(us.columns)) + for i := range us.columns { + col := us.columns[i] + colInfo = append(colInfo, rowcodec.ColInfo{ + ID: col.ID, + Tp: int32(col.Tp), + Flag: int32(col.Flag), + IsPKHandle: us.table.Meta().PKIsHandle && mysql.HasPriKeyFlag(col.Flag), + }) + } + + rd := rowcodec.NewByteDecoder(colInfo, -1, nil, nil) + + return &memTableReader{ + ctx: us.ctx, + table: us.table.Meta(), + columns: us.columns, + kvRanges: tblReader.kvRanges, + desc: us.desc, + conditions: us.conditions, + addedRows: make([][]types.Datum, 0, len(us.dirty.addedRows)), + retFieldTypes: retTypes(us), + colIDs: colIDs, + buffer: allocBuf{ + handleBytes: make([]byte, 0, 16), + rd: rd, + }, + } +} + +// TODO: Try to make memXXXReader lazy, There is no need to decode many rows when parent operator only need 1 row. +func (m *memTableReader) getMemRows() ([][]types.Datum, error) { + mutableRow := chunk.MutRowFromTypes(m.retFieldTypes) + err := iterTxnMemBuffer(m.ctx, m.kvRanges, func(key, value []byte) error { + row, err := m.decodeRecordKeyValue(key, value) + if err != nil { + return err + } + + mutableRow.SetDatums(row...) + matched, _, err := expression.EvalBool(m.ctx, m.conditions, mutableRow.ToRow()) + if err != nil || !matched { + return err + } + m.addedRows = append(m.addedRows, row) + return nil + }) + if err != nil { + return nil, err + } + + // TODO: After refine `IterReverse`, remove below logic and use `IterReverse` when do reverse scan. + if m.desc { + reverseDatumSlice(m.addedRows) + } + return m.addedRows, nil +} + +func (m *memTableReader) decodeRecordKeyValue(key, value []byte) ([]types.Datum, error) { + handle, err := tablecodec.DecodeRowKey(key) + if err != nil { + return nil, errors.Trace(err) + } + return decodeRowData(m.ctx, m.table, m.columns, m.colIDs, handle, value, &m.buffer) +} + +// decodeRowData uses to decode row data value. +func decodeRowData(ctx sessionctx.Context, tb *model.TableInfo, columns []*model.ColumnInfo, colIDs map[int64]int, handle int64, value []byte, buffer *allocBuf) ([]types.Datum, error) { + values, err := getRowData(ctx.GetSessionVars().StmtCtx, tb, columns, colIDs, handle, value, buffer) + if err != nil { + return nil, err + } + ds := make([]types.Datum, 0, len(columns)) + for _, col := range columns { + offset := colIDs[col.ID] + d, err := tablecodec.DecodeColumnValue(values[offset], &col.FieldType, ctx.GetSessionVars().TimeZone) + if err != nil { + return nil, err + } + ds = append(ds, d) + } + return ds, nil +} + +// getRowData decodes raw byte slice to row data. +func getRowData( + ctx *stmtctx.StatementContext, + tb *model.TableInfo, + columns []*model.ColumnInfo, + colIDs map[int64]int, + handle int64, + value []byte, + buffer *allocBuf, +) ([][]byte, error) { + return buffer.rd.DecodeToBytes(colIDs, handle, value, buffer.handleBytes) +} + +type processKVFunc func(key, value []byte) error + +func iterTxnMemBuffer(ctx sessionctx.Context, kvRanges []kv.KeyRange, fn processKVFunc) error { + txn, err := ctx.Txn(true) + if err != nil { + return err + } + for _, rg := range kvRanges { + iter, err := txn.GetMemBuffer().Iter(rg.StartKey, rg.EndKey) + if err != nil { + return err + } + for ; iter.Valid(); err = iter.Next() { + if err != nil { + return err + } + // check whether the key was been deleted. + if len(iter.Value()) == 0 { + continue + } + err = fn(iter.Key(), iter.Value()) + if err != nil { + return err + } + } + } + return nil +} + +func reverseDatumSlice(rows [][]types.Datum) { + for i, j := 0, len(rows)-1; i < j; i, j = i+1, j-1 { + rows[i], rows[j] = rows[j], rows[i] + } +} + +func (m *memIndexReader) getMemRowsHandle() ([]int64, error) { + pkTp := types.NewFieldType(mysql.TypeLonglong) + if m.table.PKIsHandle { + for _, col := range m.table.Columns { + if mysql.HasPriKeyFlag(col.Flag) { + pkTp = &col.FieldType + break + } + } + } + handles := make([]int64, 0, m.addedRowsLen) + err := iterTxnMemBuffer(m.ctx, m.kvRanges, func(key, value []byte) error { + handle, err := tablecodec.DecodeIndexHandle(key, value, len(m.index.Columns), pkTp) + if err != nil { + return err + } + handles = append(handles, handle) + return nil + }) + if err != nil { + return nil, err + } + + if m.desc { + for i, j := 0, len(handles)-1; i < j; i, j = i+1, j-1 { + handles[i], handles[j] = handles[j], handles[i] + } + } + return handles, nil +} + +type memIndexLookUpReader struct { + ctx sessionctx.Context + index *model.IndexInfo + columns []*model.ColumnInfo + table table.Table + desc bool + conditions []expression.Expression + retFieldTypes []*types.FieldType + + idxReader *memIndexReader +} + +func buildMemIndexLookUpReader(us *UnionScanExec, idxLookUpReader *IndexLookUpExecutor) *memIndexLookUpReader { + kvRanges := idxLookUpReader.kvRanges + outputOffset := []int{len(idxLookUpReader.index.Columns)} + memIdxReader := &memIndexReader{ + ctx: us.ctx, + index: idxLookUpReader.index, + table: idxLookUpReader.table.Meta(), + kvRanges: kvRanges, + desc: idxLookUpReader.desc, + addedRowsLen: len(us.dirty.addedRows), + retFieldTypes: retTypes(us), + outputOffset: outputOffset, + belowHandleIndex: us.belowHandleIndex, + } + + return &memIndexLookUpReader{ + ctx: us.ctx, + index: idxLookUpReader.index, + columns: idxLookUpReader.columns, + table: idxLookUpReader.table, + desc: idxLookUpReader.desc, + conditions: us.conditions, + retFieldTypes: retTypes(us), + idxReader: memIdxReader, + } +} + +func (m *memIndexLookUpReader) getMemRows() ([][]types.Datum, error) { + handles, err := m.idxReader.getMemRowsHandle() + if err != nil || len(handles) == 0 { + return nil, err + } + + tblKVRanges := distsql.TableHandlesToKVRanges(getPhysicalTableID(m.table), handles) + colIDs := make(map[int64]int, len(m.columns)) + for i, col := range m.columns { + colIDs[col.ID] = i + } + + colInfos := make([]rowcodec.ColInfo, 0, len(m.columns)) + for i := range m.columns { + col := m.columns[i] + colInfos = append(colInfos, rowcodec.ColInfo{ + ID: col.ID, + Tp: int32(col.Tp), + Flag: int32(col.Flag), + IsPKHandle: m.table.Meta().PKIsHandle && mysql.HasPriKeyFlag(col.Flag), + }) + } + rd := rowcodec.NewByteDecoder(colInfos, -1, nil, nil) + memTblReader := &memTableReader{ + ctx: m.ctx, + table: m.table.Meta(), + columns: m.columns, + kvRanges: tblKVRanges, + conditions: m.conditions, + addedRows: make([][]types.Datum, 0, len(handles)), + retFieldTypes: m.retFieldTypes, + colIDs: colIDs, + buffer: allocBuf{ + handleBytes: make([]byte, 0, 16), + rd: rd, + }, + } + + return memTblReader.getMemRows() +} diff --git a/executor/memory_test.go b/executor/memory_test.go new file mode 100644 index 0000000..56dd62f --- /dev/null +++ b/executor/memory_test.go @@ -0,0 +1,111 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor_test + +import ( + "context" + "fmt" + "runtime" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/store/mockstore" +) + +var _ = SerialSuites(&testMemoryLeak{}) + +type testMemoryLeak struct { + store kv.Storage + domain *domain.Domain +} + +func (s *testMemoryLeak) SetUpSuite(c *C) { + var err error + s.store, err = mockstore.NewMockTikvStore() + c.Assert(err, IsNil) + s.domain, err = session.BootstrapSession(s.store) + c.Assert(err, IsNil) +} + +func (s *testMemoryLeak) TestPBMemoryLeak(c *C) { + c.Skip("too slow") + + se, err := session.CreateSession4Test(s.store) + c.Assert(err, IsNil) + _, err = se.Execute(context.Background(), "create database test_mem") + c.Assert(err, IsNil) + _, err = se.Execute(context.Background(), "use test_mem") + c.Assert(err, IsNil) + + // prepare data + totalSize := uint64(256 << 20) // 256MB + blockSize := uint64(8 << 10) // 8KB + delta := totalSize / 5 + numRows := totalSize / blockSize + _, err = se.Execute(context.Background(), fmt.Sprintf("create table t (c varchar(%v))", blockSize)) + c.Assert(err, IsNil) + defer func() { + _, err = se.Execute(context.Background(), "drop table t") + c.Assert(err, IsNil) + }() + sql := fmt.Sprintf("insert into t values (space(%v))", blockSize) + for i := uint64(0); i < numRows; i++ { + _, err = se.Execute(context.Background(), sql) + c.Assert(err, IsNil) + } + + // read data + runtime.GC() + allocatedBegin, inUseBegin := s.readMem() + records, err := se.Execute(context.Background(), "select * from t") + c.Assert(err, IsNil) + record := records[0] + rowCnt := 0 + chk := record.NewChunk() + for { + c.Assert(record.Next(context.Background(), chk), IsNil) + rowCnt += chk.NumRows() + if chk.NumRows() == 0 { + break + } + } + c.Assert(rowCnt, Equals, int(numRows)) + + // check memory before close + runtime.GC() + allocatedAfter, inUseAfter := s.readMem() + c.Assert(allocatedAfter-allocatedBegin, GreaterEqual, totalSize) + c.Assert(s.memDiff(inUseAfter, inUseBegin), Less, delta) + + se.Close() + runtime.GC() + allocatedFinal, inUseFinal := s.readMem() + c.Assert(allocatedFinal-allocatedAfter, Less, delta) + c.Assert(s.memDiff(inUseFinal, inUseAfter), Less, delta) +} + +func (s *testMemoryLeak) readMem() (allocated, heapInUse uint64) { + var stat runtime.MemStats + runtime.ReadMemStats(&stat) + return stat.TotalAlloc, stat.HeapInuse +} + +func (s *testMemoryLeak) memDiff(m1, m2 uint64) uint64 { + if m1 > m2 { + return m1 - m2 + } + return m2 - m1 +} diff --git a/executor/merge_join.go b/executor/merge_join.go new file mode 100644 index 0000000..be2abe6 --- /dev/null +++ b/executor/merge_join.go @@ -0,0 +1,373 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "context" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/util/chunk" +) + +// MergeJoinExec implements the merge join algorithm. +// This operator assumes that two iterators of both sides +// will provide required order on join condition: +// 1. For equal-join, one of the join key from each side +// matches the order given. +// 2. For other cases its preferred not to use SMJ and operator +// will throw error. +type MergeJoinExec struct { + baseExecutor + + stmtCtx *stmtctx.StatementContext + compareFuncs []expression.CompareFunc + joiner joiner + isOuterJoin bool + + prepared bool + outerIdx int + + innerTable *mergeJoinInnerTable + outerTable *mergeJoinOuterTable + + innerRows []chunk.Row + innerIter4Row chunk.Iterator + + childrenResults []*chunk.Chunk +} + +type mergeJoinOuterTable struct { + reader Executor + filter []expression.Expression + keys []*expression.Column + + chk *chunk.Chunk + selected []bool + + iter *chunk.Iterator4Chunk + row chunk.Row + hasMatch bool +} + +// mergeJoinInnerTable represents the inner table of merge join. +// All the inner rows which have the same join key are returned when function +// "rowsWithSameKey()" being called. +type mergeJoinInnerTable struct { + reader Executor + joinKeys []*expression.Column + ctx context.Context + + // for chunk executions + sameKeyRows []chunk.Row + keyCmpFuncs []chunk.CompareFunc + firstRow4Key chunk.Row + curRow chunk.Row + curResult *chunk.Chunk + curIter *chunk.Iterator4Chunk + curResultInUse bool + resultQueue []*chunk.Chunk + resourceQueue []*chunk.Chunk +} + +func (t *mergeJoinInnerTable) init(ctx context.Context, chk4Reader *chunk.Chunk) (err error) { + if t.reader == nil || ctx == nil { + return errors.Errorf("Invalid arguments: Empty arguments detected.") + } + t.ctx = ctx + t.curResult = chk4Reader + t.curIter = chunk.NewIterator4Chunk(t.curResult) + t.curRow = t.curIter.End() + t.curResultInUse = false + t.resultQueue = append(t.resultQueue, chk4Reader) + t.firstRow4Key, err = t.nextRow() + t.keyCmpFuncs = make([]chunk.CompareFunc, 0, len(t.joinKeys)) + for i := range t.joinKeys { + t.keyCmpFuncs = append(t.keyCmpFuncs, chunk.GetCompareFunc(t.joinKeys[i].RetType)) + } + return err +} + +func (t *mergeJoinInnerTable) rowsWithSameKey() ([]chunk.Row, error) { + lastResultIdx := len(t.resultQueue) - 1 + t.resourceQueue = append(t.resourceQueue, t.resultQueue[0:lastResultIdx]...) + t.resultQueue = t.resultQueue[lastResultIdx:] + // no more data. + if t.firstRow4Key == t.curIter.End() { + return nil, nil + } + t.sameKeyRows = t.sameKeyRows[:0] + t.sameKeyRows = append(t.sameKeyRows, t.firstRow4Key) + for { + selectedRow, err := t.nextRow() + // error happens or no more data. + if err != nil || selectedRow == t.curIter.End() { + t.firstRow4Key = t.curIter.End() + return t.sameKeyRows, err + } + compareResult := compareChunkRow(t.keyCmpFuncs, selectedRow, t.firstRow4Key, t.joinKeys, t.joinKeys) + if compareResult == 0 { + t.sameKeyRows = append(t.sameKeyRows, selectedRow) + } else { + t.firstRow4Key = selectedRow + return t.sameKeyRows, nil + } + } +} + +func (t *mergeJoinInnerTable) nextRow() (chunk.Row, error) { + for { + if t.curRow == t.curIter.End() { + t.reallocReaderResult() + err := Next(t.ctx, t.reader, t.curResult) + // error happens or no more data. + if err != nil || t.curResult.NumRows() == 0 { + t.curRow = t.curIter.End() + return t.curRow, err + } + t.curRow = t.curIter.Begin() + } + + result := t.curRow + t.curResultInUse = true + t.curRow = t.curIter.Next() + + if !t.hasNullInJoinKey(result) { + return result, nil + } + } +} + +func (t *mergeJoinInnerTable) hasNullInJoinKey(row chunk.Row) bool { + for _, col := range t.joinKeys { + ordinal := col.Index + if row.IsNull(ordinal) { + return true + } + } + return false +} + +// reallocReaderResult resets "t.curResult" to an empty Chunk to buffer the result of "t.reader". +// It pops a Chunk from "t.resourceQueue" and push it into "t.resultQueue" immediately. +func (t *mergeJoinInnerTable) reallocReaderResult() { + if !t.curResultInUse { + // If "t.curResult" is not in use, we can just reuse it. + t.curResult.Reset() + return + } + + // Create a new Chunk and append it to "resourceQueue" if there is no more + // available chunk in "resourceQueue". + if len(t.resourceQueue) == 0 { + newChunk := newFirstChunk(t.reader) + t.resourceQueue = append(t.resourceQueue, newChunk) + } + + // NOTE: "t.curResult" is always the last element of "resultQueue". + t.curResult = t.resourceQueue[0] + t.curIter = chunk.NewIterator4Chunk(t.curResult) + t.resourceQueue = t.resourceQueue[1:] + t.resultQueue = append(t.resultQueue, t.curResult) + t.curResult.Reset() + t.curResultInUse = false +} + +// Close implements the Executor Close interface. +func (e *MergeJoinExec) Close() error { + e.childrenResults = nil + + return e.baseExecutor.Close() +} + +// Open implements the Executor Open interface. +func (e *MergeJoinExec) Open(ctx context.Context) error { + if err := e.baseExecutor.Open(ctx); err != nil { + return err + } + + e.prepared = false + + e.childrenResults = make([]*chunk.Chunk, 0, len(e.children)) + for _, child := range e.children { + e.childrenResults = append(e.childrenResults, newFirstChunk(child)) + } + + return nil +} + +func compareChunkRow(cmpFuncs []chunk.CompareFunc, lhsRow, rhsRow chunk.Row, lhsKey, rhsKey []*expression.Column) int { + for i := range lhsKey { + cmp := cmpFuncs[i](lhsRow, lhsKey[i].Index, rhsRow, rhsKey[i].Index) + if cmp != 0 { + return cmp + } + } + return 0 +} + +func (e *MergeJoinExec) prepare(ctx context.Context, requiredRows int) error { + err := e.innerTable.init(ctx, e.childrenResults[e.outerIdx^1]) + if err != nil { + return err + } + + err = e.fetchNextInnerRows() + if err != nil { + return err + } + + // init outer table. + e.outerTable.chk = e.childrenResults[e.outerIdx] + e.outerTable.iter = chunk.NewIterator4Chunk(e.outerTable.chk) + e.outerTable.selected = make([]bool, 0, e.maxChunkSize) + + err = e.fetchNextOuterRows(ctx, requiredRows) + if err != nil { + return err + } + + e.prepared = true + return nil +} + +// Next implements the Executor Next interface. +func (e *MergeJoinExec) Next(ctx context.Context, req *chunk.Chunk) error { + req.Reset() + if !e.prepared { + if err := e.prepare(ctx, req.RequiredRows()); err != nil { + return err + } + } + + for !req.IsFull() { + hasMore, err := e.joinToChunk(ctx, req) + if err != nil || !hasMore { + return err + } + } + return nil +} + +func (e *MergeJoinExec) joinToChunk(ctx context.Context, chk *chunk.Chunk) (hasMore bool, err error) { + for { + if e.outerTable.row == e.outerTable.iter.End() { + err = e.fetchNextOuterRows(ctx, chk.RequiredRows()-chk.NumRows()) + if err != nil || e.outerTable.chk.NumRows() == 0 { + return false, err + } + } + + cmpResult := -1 + if e.outerTable.selected[e.outerTable.row.Idx()] && len(e.innerRows) > 0 { + cmpResult, err = e.compare(e.outerTable.row, e.innerIter4Row.Current()) + if err != nil { + return false, err + } + } + + if cmpResult > 0 { + if err = e.fetchNextInnerRows(); err != nil { + return false, err + } + continue + } + + if cmpResult < 0 { + e.joiner.onMissMatch(e.outerTable.row, chk) + if err != nil { + return false, err + } + + e.outerTable.row = e.outerTable.iter.Next() + e.outerTable.hasMatch = false + + if chk.IsFull() { + return true, nil + } + continue + } + + matched, _, err := e.joiner.tryToMatchInners(e.outerTable.row, e.innerIter4Row, chk) + if err != nil { + return false, err + } + e.outerTable.hasMatch = e.outerTable.hasMatch || matched + + if e.innerIter4Row.Current() == e.innerIter4Row.End() { + if !e.outerTable.hasMatch { + e.joiner.onMissMatch(e.outerTable.row, chk) + } + e.outerTable.row = e.outerTable.iter.Next() + e.outerTable.hasMatch = false + e.innerIter4Row.Begin() + } + + if chk.IsFull() { + return true, err + } + } +} + +func (e *MergeJoinExec) compare(outerRow, innerRow chunk.Row) (int, error) { + outerJoinKeys := e.outerTable.keys + innerJoinKeys := e.innerTable.joinKeys + for i := range outerJoinKeys { + cmp, _, err := e.compareFuncs[i](e.ctx, outerJoinKeys[i], innerJoinKeys[i], outerRow, innerRow) + if err != nil { + return 0, err + } + + if cmp != 0 { + return int(cmp), nil + } + } + return 0, nil +} + +// fetchNextInnerRows fetches the next join group, within which all the rows +// have the same join key, from the inner table. +func (e *MergeJoinExec) fetchNextInnerRows() (err error) { + e.innerRows, err = e.innerTable.rowsWithSameKey() + if err != nil { + return err + } + e.innerIter4Row = chunk.NewIterator4Slice(e.innerRows) + e.innerIter4Row.Begin() + return nil +} + +// fetchNextOuterRows fetches the next Chunk of outer table. Rows in a Chunk +// may not all belong to the same join key, but are guaranteed to be sorted +// according to the join key. +func (e *MergeJoinExec) fetchNextOuterRows(ctx context.Context, requiredRows int) (err error) { + // It's hard to calculate selectivity if there is any filter or it's inner join, + // so we just push the requiredRows down when it's outer join and has no filter. + if e.isOuterJoin && len(e.outerTable.filter) == 0 { + e.outerTable.chk.SetRequiredRows(requiredRows, e.maxChunkSize) + } + + err = Next(ctx, e.outerTable.reader, e.outerTable.chk) + if err != nil { + return err + } + + e.outerTable.iter.Begin() + e.outerTable.selected, err = expression.VectorizedFilter(e.ctx, e.outerTable.filter, e.outerTable.iter, e.outerTable.selected) + if err != nil { + return err + } + e.outerTable.row = e.outerTable.iter.Begin() + return nil +} diff --git a/executor/merge_join_test.go b/executor/merge_join_test.go new file mode 100644 index 0000000..99a1550 --- /dev/null +++ b/executor/merge_join_test.go @@ -0,0 +1,366 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor_test + +import ( + "fmt" + "strings" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/util/testkit" +) + +const plan1 = `[[TableScan_12 { + "db": "test", + "table": "t1", + "desc": false, + "keep order": true, + "push down info": { + "limit": 0, + "access conditions": null, + "index filter conditions": null, + "table filter conditions": null + } +} MergeJoin_17] [TableScan_15 { + "db": "test", + "table": "t2", + "desc": false, + "keep order": true, + "push down info": { + "limit": 0, + "access conditions": null, + "index filter conditions": null, + "table filter conditions": null + } +} MergeJoin_17] [MergeJoin_17 { + "eqCond": [ + "eq(test.t1.c1, test.t2.c1)" + ], + "leftCond": null, + "rightCond": null, + "otherCond": [], + "leftPlan": "TableScan_12", + "rightPlan": "TableScan_15", + "desc": "false" +} MergeJoin_8] [TableScan_22 { + "db": "test", + "table": "t3", + "desc": false, + "keep order": true, + "push down info": { + "limit": 0, + "access conditions": null, + "index filter conditions": null, + "table filter conditions": null + } +} MergeJoin_8] [MergeJoin_8 { + "eqCond": [ + "eq(test.t2.c1, test.t3.c1)" + ], + "leftCond": null, + "rightCond": null, + "otherCond": [], + "leftPlan": "MergeJoin_17", + "rightPlan": "TableScan_22", + "desc": "false" +} Sort_23] [Sort_23 { + "exprs": [ + { + "Expr": "test.t1.c1", + "Desc": false + } + ], + "limit": null, + "child": "MergeJoin_8" +} ]]` + +const plan2 = `[[TableScan_12 { + "db": "test", + "table": "t1", + "desc": false, + "keep order": true, + "push down info": { + "limit": 0, + "access conditions": null, + "index filter conditions": null, + "table filter conditions": null + } +} MergeJoin_17] [TableScan_15 { + "db": "test", + "table": "t2", + "desc": false, + "keep order": true, + "push down info": { + "limit": 0, + "access conditions": null, + "index filter conditions": null, + "table filter conditions": null + } +} MergeJoin_17] [MergeJoin_17 { + "eqCond": [ + "eq(test.t1.c1, test.t2.c1)" + ], + "leftCond": null, + "rightCond": null, + "otherCond": [], + "leftPlan": "TableScan_12", + "rightPlan": "TableScan_15", + "desc": "false" +} MergeJoin_8] [TableScan_22 { + "db": "test", + "table": "t3", + "desc": false, + "keep order": true, + "push down info": { + "limit": 0, + "access conditions": null, + "index filter conditions": null, + "table filter conditions": null + } +} MergeJoin_8] [MergeJoin_8 { + "eqCond": [ + "eq(test.t2.c1, test.t3.c1)" + ], + "leftCond": null, + "rightCond": null, + "otherCond": [], + "leftPlan": "MergeJoin_17", + "rightPlan": "TableScan_22", + "desc": "false" +} Sort_23] [Sort_23 { + "exprs": [ + { + "Expr": "test.t1.c1", + "Desc": false + } + ], + "limit": null, + "child": "MergeJoin_8" +} ]]` + +const plan3 = `[[TableScan_12 { + "db": "test", + "table": "t1", + "desc": false, + "keep order": true, + "push down info": { + "limit": 0, + "access conditions": null, + "index filter conditions": null, + "table filter conditions": null + } +} MergeJoin_9] [TableScan_15 { + "db": "test", + "table": "t2", + "desc": false, + "keep order": true, + "push down info": { + "limit": 0, + "access conditions": null, + "index filter conditions": null, + "table filter conditions": null + } +} MergeJoin_9] [MergeJoin_9 { + "eqCond": [ + "eq(test.t1.c1, test.t2.c1)" + ], + "leftCond": null, + "rightCond": null, + "otherCond": [], + "leftPlan": "TableScan_12", + "rightPlan": "TableScan_15", + "desc": "false" +} Sort_16] [Sort_16 { + "exprs": [ + { + "Expr": "test.t1.c1", + "Desc": false + } + ], + "limit": null, + "child": "MergeJoin_9" +} MergeJoin_8] [TableScan_23 { + "db": "test", + "table": "t3", + "desc": false, + "keep order": true, + "push down info": { + "limit": 0, + "access conditions": null, + "index filter conditions": null, + "table filter conditions": null + } +} MergeJoin_8] [MergeJoin_8 { + "eqCond": [ + "eq(test.t1.c1, test.t3.c1)" + ], + "leftCond": null, + "rightCond": null, + "otherCond": [], + "leftPlan": "Sort_16", + "rightPlan": "TableScan_23", + "desc": "false" +} ]]` + +func checkMergeAndRun(tk *testkit.TestKit, c *C, sql string) *testkit.Result { + explainedSQL := "explain " + sql + result := tk.MustQuery(explainedSQL) + resultStr := fmt.Sprintf("%v", result.Rows()) + if !strings.ContainsAny(resultStr, "MergeJoin") { + c.Error("Expected MergeJoin in plan.") + } + return tk.MustQuery(sql) +} + +func checkPlanAndRun(tk *testkit.TestKit, c *C, plan string, sql string) *testkit.Result { + explainedSQL := "explain " + sql + tk.MustQuery(explainedSQL) + + // TODO: Reopen it after refactoring explain. + // resultStr := fmt.Sprintf("%v", result.Rows()) + // if plan != resultStr { + // c.Errorf("Plan not match. Obtained:\n %s\nExpected:\n %s\n", resultStr, plan) + // } + return tk.MustQuery(sql) +} + +func (s *testSuite2) TestMergeJoin(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + + tk.MustExec("drop table if exists t") + tk.MustExec("drop table if exists t1") + tk.MustExec("create table t(c1 int, c2 int)") + tk.MustExec("create table t1(c1 int, c2 int)") + tk.MustExec("insert into t values(1,1),(2,2)") + tk.MustExec("insert into t1 values(2,3),(4,4)") + + result := checkMergeAndRun(tk, c, "select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20") + result.Check(testkit.Rows("1 1 ")) + result = checkMergeAndRun(tk, c, "select /*+ TIDB_SMJ(t) */ * from t1 right outer join t on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20") + result.Check(testkit.Rows(" 1 1")) + result = checkMergeAndRun(tk, c, "select /*+ TIDB_SMJ(t) */ * from t right outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20") + result.Check(testkit.Rows()) + result = checkMergeAndRun(tk, c, "select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t1.c1 = 3 or false") + result.Check(testkit.Rows()) + result = checkMergeAndRun(tk, c, "select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 and t.c1 != 1 order by t1.c1") + result.Check(testkit.Rows("1 1 ", "2 2 2 3")) + + tk.MustExec("drop table if exists t1") + tk.MustExec("drop table if exists t2") + tk.MustExec("drop table if exists t3") + + tk.MustExec("create table t1 (c1 int, c2 int)") + tk.MustExec("create table t2 (c1 int, c2 int)") + tk.MustExec("create table t3 (c1 int, c2 int)") + + tk.MustExec("insert into t1 values (1,1), (2,2), (3,3)") + tk.MustExec("insert into t2 values (1,1), (3,3), (5,5)") + tk.MustExec("insert into t3 values (1,1), (5,5), (9,9)") + + result = tk.MustQuery("select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 left join t2 on t1.c1 = t2.c1 right join t3 on t2.c1 = t3.c1 order by t1.c1, t1.c2, t2.c1, t2.c2, t3.c1, t3.c2;") + result.Check(testkit.Rows(" 5 5", " 9 9", "1 1 1 1 1 1")) + + tk.MustExec("drop table if exists t1") + tk.MustExec("create table t1 (c1 int)") + tk.MustExec("insert into t1 values (1), (1), (1)") + result = tk.MustQuery("select/*+ TIDB_SMJ(t) */ * from t1 a join t1 b on a.c1 = b.c1;") + result.Check(testkit.Rows("1 1", "1 1", "1 1", "1 1", "1 1", "1 1", "1 1", "1 1", "1 1")) + + tk.MustExec("drop table if exists t") + tk.MustExec("drop table if exists t1") + tk.MustExec("create table t(c1 int, index k(c1))") + tk.MustExec("create table t1(c1 int)") + tk.MustExec("insert into t values (1),(2),(3),(4),(5),(6),(7)") + tk.MustExec("insert into t1 values (1),(2),(3),(4),(5),(6),(7)") + result = tk.MustQuery("select /*+ TIDB_SMJ(a,b) */ a.c1 from t a , t1 b where a.c1 = b.c1 order by a.c1;") + result.Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7")) + result = tk.MustQuery("select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 limit 3) b where a.c1 = b.c1 order by b.c1;") + result.Check(testkit.Rows("1", "2", "3")) + // Test LogicalSelection under LogicalJoin. + result = tk.MustQuery("select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 limit 3) b where a.c1 = b.c1 and b.c1 is not null order by b.c1;") + result.Check(testkit.Rows("1", "2", "3")) + tk.MustExec("begin;") + // Test LogicalUnionScan under LogicalJoin. + tk.MustExec("insert into t1 values(8);") + result = tk.MustQuery("select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , t1 b where a.c1 = b.c1;") + result.Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7")) + tk.MustExec("rollback;") + + tk.MustExec("drop table if exists t") + tk.MustExec("drop table if exists t1") + tk.MustExec("create table t(c1 int)") + tk.MustExec("create table t1(c1 int unsigned)") + tk.MustExec("insert into t values (1)") + tk.MustExec("insert into t1 values (1)") + result = tk.MustQuery("select /*+ TIDB_SMJ(t,t1) */ t.c1 from t , t1 where t.c1 = t1.c1") + result.Check(testkit.Rows("1")) + + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int, b int, index a(a), index b(b))") + tk.MustExec("insert into t values(1, 2)") + tk.MustQuery("select /*+ TIDB_SMJ(t, t1) */ t.a, t1.b from t right join t t1 on t.a = t1.b order by t.a").Check(testkit.Rows(" 2")) + + tk.MustExec("drop table if exists t") + tk.MustExec("drop table if exists s") + tk.MustExec("create table t(a int, b int, primary key(a, b))") + tk.MustExec("insert into t value(1,1),(1,2),(1,3),(1,4)") + tk.MustExec("create table s(a int, primary key(a))") + tk.MustExec("insert into s value(1)") + tk.MustQuery("select /*+ TIDB_SMJ(t, s) */ count(*) from t join s on t.a = s.a").Check(testkit.Rows("4")) + + // Test TIDB_SMJ for cartesian product. + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int)") + tk.MustExec("insert into t value(1),(2)") + tk.MustQuery("explain select /*+ TIDB_SMJ(t1, t2) */ * from t t1 join t t2 order by t1.a, t2.a").Check(testkit.Rows( + "Sort_6 100000000.00 root test.t.a:asc, test.t.a:asc", + "└─MergeJoin_9 100000000.00 root inner join", + " ├─TableReader_11 10000.00 root data:TableScan_10", + " │ └─TableScan_10 10000.00 cop table:t1, range:[-inf,+inf], keep order:false, stats:pseudo", + " └─TableReader_13 10000.00 root data:TableScan_12", + " └─TableScan_12 10000.00 cop table:t2, range:[-inf,+inf], keep order:false, stats:pseudo", + )) + tk.MustQuery("select /*+ TIDB_SMJ(t1, t2) */ * from t t1 join t t2 order by t1.a, t2.a").Check(testkit.Rows( + "1 1", + "1 2", + "2 1", + "2 2", + )) +} + +func (s *testSuite2) Test3WaysMergeJoin(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + + tk.MustExec("drop table if exists t1") + tk.MustExec("drop table if exists t2") + tk.MustExec("drop table if exists t3") + tk.MustExec("create table t1(c1 int, c2 int, PRIMARY KEY (c1))") + tk.MustExec("create table t2(c1 int, c2 int, PRIMARY KEY (c1))") + tk.MustExec("create table t3(c1 int, c2 int, PRIMARY KEY (c1))") + tk.MustExec("insert into t1 values(1,1),(2,2),(3,3)") + tk.MustExec("insert into t2 values(2,3),(3,4),(4,5)") + tk.MustExec("insert into t3 values(1,2),(2,4),(3,10)") + result := checkPlanAndRun(tk, c, plan1, "select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1") + result.Check(testkit.Rows("2 2 2 3 2 4", "3 3 3 4 3 10")) + + result = checkPlanAndRun(tk, c, plan2, "select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1") + result.Check(testkit.Rows("2 2 2 3 2 4", "3 3 3 4 3 10")) + + // In below case, t1 side filled with null when no matched join, so that order is not kept and sort appended + // On the other hand, t1 order kept so no final sort appended + result = checkPlanAndRun(tk, c, plan3, "select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t1.c1 = t3.c1 order by 1") + result.Check(testkit.Rows("2 2 2 3 2 4", "3 3 3 4 3 10")) +} diff --git a/executor/pkg_test.go b/executor/pkg_test.go new file mode 100644 index 0000000..e1205d5 --- /dev/null +++ b/executor/pkg_test.go @@ -0,0 +1,40 @@ +package executor + +import ( + . "github.com/pingcap/check" +) + +var _ = Suite(&pkgTestSuite{}) + +type pkgTestSuite struct { +} + +func (s *pkgTestSuite) TestMoveInfoSchemaToFront(c *C) { + dbss := [][]string{ + {}, + {"A", "B", "C", "a", "b", "c"}, + {"A", "B", "C", "INFORMATION_SCHEMA"}, + {"A", "B", "INFORMATION_SCHEMA", "a"}, + {"INFORMATION_SCHEMA"}, + {"A", "B", "C", "INFORMATION_SCHEMA", "a", "b"}, + } + wanted := [][]string{ + {}, + {"A", "B", "C", "a", "b", "c"}, + {"INFORMATION_SCHEMA", "A", "B", "C"}, + {"INFORMATION_SCHEMA", "A", "B", "a"}, + {"INFORMATION_SCHEMA"}, + {"INFORMATION_SCHEMA", "A", "B", "C", "a", "b"}, + } + + for _, dbs := range dbss { + moveInfoSchemaToFront(dbs) + } + + for i, dbs := range wanted { + c.Check(len(dbss[i]), Equals, len(dbs)) + for j, db := range dbs { + c.Check(dbss[i][j], Equals, db) + } + } +} diff --git a/executor/projection.go b/executor/projection.go new file mode 100644 index 0000000..67cf759 --- /dev/null +++ b/executor/projection.go @@ -0,0 +1,434 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/util" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +// This file contains the implementation of the physical Projection Operator: +// https://en.wikipedia.org/wiki/Projection_(relational_algebra) +// +// NOTE: +// 1. The number of "projectionWorker" is controlled by the global session +// variable "tidb_projection_concurrency". +// 2. Unparallel version is used when one of the following situations occurs: +// a. "tidb_projection_concurrency" is set to 0. +// b. The estimated input size is smaller than "tidb_max_chunk_size". +// c. This projection can not be executed vectorially. + +type projectionInput struct { + chk *chunk.Chunk + targetWorker *projectionWorker +} + +type projectionOutput struct { + chk *chunk.Chunk + done chan error +} + +// ProjectionExec implements the physical Projection Operator: +// https://en.wikipedia.org/wiki/Projection_(relational_algebra) +type ProjectionExec struct { + baseExecutor + + evaluatorSuit *expression.EvaluatorSuite + + prepared bool + finishCh chan struct{} + outputCh chan *projectionOutput + fetcher projectionInputFetcher + numWorkers int64 + workers []*projectionWorker + childResult *chunk.Chunk + + wg sync.WaitGroup + + // parentReqRows indicates how many rows the parent executor is + // requiring. It is set when parallelExecute() is called and used by the + // concurrent projectionInputFetcher. + // + // NOTE: It should be protected by atomic operations. + parentReqRows int64 +} + +// Open implements the Executor Open interface. +func (e *ProjectionExec) Open(ctx context.Context) error { + if err := e.baseExecutor.Open(ctx); err != nil { + return err + } + return e.open(ctx) +} + +func (e *ProjectionExec) open(ctx context.Context) error { + e.prepared = false + e.parentReqRows = int64(e.maxChunkSize) + + // For now a Projection can not be executed vectorially only because it + // contains "SetVar" or "GetVar" functions, in this scenario this + // Projection can not be executed parallelly. + if e.numWorkers > 0 && !e.evaluatorSuit.Vectorizable() { + e.numWorkers = 0 + } + + if e.isUnparallelExec() { + e.childResult = newFirstChunk(e.children[0]) + } + + return nil +} + +// Next implements the Executor Next interface. +// +// Here we explain the execution flow of the parallel projection implementation. +// There are 3 main components: +// 1. "projectionInputFetcher": Fetch input "Chunk" from child. +// 2. "projectionWorker": Do the projection work. +// 3. "ProjectionExec.Next": Return result to parent. +// +// 1. "projectionInputFetcher" gets its input and output resources from its +// "inputCh" and "outputCh" channel, once the input and output resources are +// abtained, it fetches child's result into "input.chk" and: +// a. Dispatches this input to the worker specified in "input.targetWorker" +// b. Dispatches this output to the main thread: "ProjectionExec.Next" +// c. Dispatches this output to the worker specified in "input.targetWorker" +// It is finished and exited once: +// a. There is no more input from child. +// b. "ProjectionExec" close the "globalFinishCh" +// +// 2. "projectionWorker" gets its input and output resources from its +// "inputCh" and "outputCh" channel, once the input and output resources are +// abtained, it calculates the projection result use "input.chk" as the input +// and "output.chk" as the output, once the calculation is done, it: +// a. Sends "nil" or error to "output.done" to mark this input is finished. +// b. Returns the "input" resource to "projectionInputFetcher.inputCh" +// They are finished and exited once: +// a. "ProjectionExec" closes the "globalFinishCh" +// +// 3. "ProjectionExec.Next" gets its output resources from its "outputCh" channel. +// After receiving an output from "outputCh", it should wait to receive a "nil" +// or error from "output.done" channel. Once a "nil" or error is received: +// a. Returns this output to its parent +// b. Returns the "output" resource to "projectionInputFetcher.outputCh" +// +// +-----------+----------------------+--------------------------+ +// | | | | +// | +--------+---------+ +--------+---------+ +--------+---------+ +// | | projectionWorker | + projectionWorker | ... + projectionWorker | +// | +------------------+ +------------------+ +------------------+ +// | ^ ^ ^ ^ ^ ^ +// | | | | | | | +// | inputCh outputCh inputCh outputCh inputCh outputCh +// | ^ ^ ^ ^ ^ ^ +// | | | | | | | +// | | | +// | | +----------------->outputCh +// | | | | +// | | | v +// | +-------+-------+--------+ +---------------------+ +// | | projectionInputFetcher | | ProjectionExec.Next | +// | +------------------------+ +---------+-----------+ +// | ^ ^ | +// | | | | +// | inputCh outputCh | +// | ^ ^ | +// | | | | +// +------------------------------+ +----------------------+ +// +func (e *ProjectionExec) Next(ctx context.Context, req *chunk.Chunk) error { + req.GrowAndReset(e.maxChunkSize) + if e.isUnparallelExec() { + return e.unParallelExecute(ctx, req) + } + return e.parallelExecute(ctx, req) + +} + +func (e *ProjectionExec) isUnparallelExec() bool { + return e.numWorkers <= 0 +} + +func (e *ProjectionExec) unParallelExecute(ctx context.Context, chk *chunk.Chunk) error { + // transmit the requiredRows + e.childResult.SetRequiredRows(chk.RequiredRows(), e.maxChunkSize) + err := Next(ctx, e.children[0], e.childResult) + if err != nil { + return err + } + if e.childResult.NumRows() == 0 { + return nil + } + err = e.evaluatorSuit.Run(e.ctx, e.childResult, chk) + return err +} + +func (e *ProjectionExec) parallelExecute(ctx context.Context, chk *chunk.Chunk) error { + atomic.StoreInt64(&e.parentReqRows, int64(chk.RequiredRows())) + if !e.prepared { + e.prepare(ctx) + e.prepared = true + } + + output, ok := <-e.outputCh + if !ok { + return nil + } + + err := <-output.done + if err != nil { + return err + } + + chk.SwapColumns(output.chk) + e.fetcher.outputCh <- output + return nil +} + +func (e *ProjectionExec) prepare(ctx context.Context) { + e.finishCh = make(chan struct{}) + e.outputCh = make(chan *projectionOutput, e.numWorkers) + + // Initialize projectionInputFetcher. + e.fetcher = projectionInputFetcher{ + proj: e, + child: e.children[0], + globalFinishCh: e.finishCh, + globalOutputCh: e.outputCh, + inputCh: make(chan *projectionInput, e.numWorkers), + outputCh: make(chan *projectionOutput, e.numWorkers), + } + + // Initialize projectionWorker. + e.workers = make([]*projectionWorker, 0, e.numWorkers) + for i := int64(0); i < e.numWorkers; i++ { + e.workers = append(e.workers, &projectionWorker{ + proj: e, + sctx: e.ctx, + evaluatorSuit: e.evaluatorSuit, + globalFinishCh: e.finishCh, + inputGiveBackCh: e.fetcher.inputCh, + inputCh: make(chan *projectionInput, 1), + outputCh: make(chan *projectionOutput, 1), + }) + + inputChk := newFirstChunk(e.children[0]) + e.fetcher.inputCh <- &projectionInput{ + chk: inputChk, + targetWorker: e.workers[i], + } + + outputChk := newFirstChunk(e) + e.fetcher.outputCh <- &projectionOutput{ + chk: outputChk, + done: make(chan error, 1), + } + } + + e.wg.Add(1) + go e.fetcher.run(ctx) + + for i := range e.workers { + e.wg.Add(1) + go e.workers[i].run(ctx) + } +} + +func (e *ProjectionExec) drainInputCh(ch chan *projectionInput) { + close(ch) + for range ch { + } +} + +func (e *ProjectionExec) drainOutputCh(ch chan *projectionOutput) { + close(ch) + for range ch { + } +} + +// Close implements the Executor Close interface. +func (e *ProjectionExec) Close() error { + if e.isUnparallelExec() { + e.childResult = nil + } + if e.prepared { + close(e.finishCh) + e.wg.Wait() // Wait for fetcher and workers to finish and exit. + + // clear fetcher + e.drainInputCh(e.fetcher.inputCh) + e.drainOutputCh(e.fetcher.outputCh) + + // clear workers + for _, w := range e.workers { + e.drainInputCh(w.inputCh) + e.drainOutputCh(w.outputCh) + } + } + return e.baseExecutor.Close() +} + +type projectionInputFetcher struct { + proj *ProjectionExec + child Executor + globalFinishCh <-chan struct{} + globalOutputCh chan<- *projectionOutput + + inputCh chan *projectionInput + outputCh chan *projectionOutput +} + +// run gets projectionInputFetcher's input and output resources from its +// "inputCh" and "outputCh" channel, once the input and output resources are +// abtained, it fetches child's result into "input.chk" and: +// a. Dispatches this input to the worker specified in "input.targetWorker" +// b. Dispatches this output to the main thread: "ProjectionExec.Next" +// c. Dispatches this output to the worker specified in "input.targetWorker" +// +// It is finished and exited once: +// a. There is no more input from child. +// b. "ProjectionExec" close the "globalFinishCh" +func (f *projectionInputFetcher) run(ctx context.Context) { + var output *projectionOutput + defer func() { + if r := recover(); r != nil { + recoveryProjection(output, r) + } + close(f.globalOutputCh) + f.proj.wg.Done() + }() + + for { + input := readProjectionInput(f.inputCh, f.globalFinishCh) + if input == nil { + return + } + targetWorker := input.targetWorker + + output = readProjectionOutput(f.outputCh, f.globalFinishCh) + if output == nil { + return + } + + f.globalOutputCh <- output + + requiredRows := atomic.LoadInt64(&f.proj.parentReqRows) + input.chk.SetRequiredRows(int(requiredRows), f.proj.maxChunkSize) + err := Next(ctx, f.child, input.chk) + if err != nil || input.chk.NumRows() == 0 { + output.done <- err + return + } + + targetWorker.inputCh <- input + targetWorker.outputCh <- output + } +} + +type projectionWorker struct { + proj *ProjectionExec + sctx sessionctx.Context + evaluatorSuit *expression.EvaluatorSuite + globalFinishCh <-chan struct{} + inputGiveBackCh chan<- *projectionInput + + // channel "input" and "output" is : + // a. initialized by "ProjectionExec.prepare" + // b. written by "projectionInputFetcher.run" + // c. read by "projectionWorker.run" + inputCh chan *projectionInput + outputCh chan *projectionOutput +} + +// run gets projectionWorker's input and output resources from its +// "inputCh" and "outputCh" channel, once the input and output resources are +// abtained, it calculate the projection result use "input.chk" as the input +// and "output.chk" as the output, once the calculation is done, it: +// a. Sends "nil" or error to "output.done" to mark this input is finished. +// b. Returns the "input" resource to "projectionInputFetcher.inputCh". +// +// It is finished and exited once: +// a. "ProjectionExec" closes the "globalFinishCh". +func (w *projectionWorker) run(ctx context.Context) { + var output *projectionOutput + defer func() { + if r := recover(); r != nil { + recoveryProjection(output, r) + } + w.proj.wg.Done() + }() + for { + input := readProjectionInput(w.inputCh, w.globalFinishCh) + if input == nil { + return + } + + output = readProjectionOutput(w.outputCh, w.globalFinishCh) + if output == nil { + return + } + + // TODO: trace memory used by the evaluatorSuit including all temporal buffers it uses + err := w.evaluatorSuit.Run(w.sctx, input.chk, output.chk) + output.done <- err + + if err != nil { + return + } + + w.inputGiveBackCh <- input + } +} + +func recoveryProjection(output *projectionOutput, r interface{}) { + if output != nil { + output.done <- errors.Errorf("%v", r) + } + buf := util.GetStack() + logutil.BgLogger().Error("projection executor panicked", zap.String("error", fmt.Sprintf("%v", r)), zap.String("stack", string(buf))) +} + +func readProjectionInput(inputCh <-chan *projectionInput, finishCh <-chan struct{}) *projectionInput { + select { + case <-finishCh: + return nil + case input, ok := <-inputCh: + if !ok { + return nil + } + return input + } +} + +func readProjectionOutput(outputCh <-chan *projectionOutput, finishCh <-chan struct{}) *projectionOutput { + select { + case <-finishCh: + return nil + case output, ok := <-outputCh: + if !ok { + return nil + } + return output + } +} diff --git a/executor/replace.go b/executor/replace.go new file mode 100644 index 0000000..e1cd2dd --- /dev/null +++ b/executor/replace.go @@ -0,0 +1,205 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "context" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/table/tables" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +// ReplaceExec represents a replace executor. +type ReplaceExec struct { + *InsertValues + Priority int +} + +// Close implements the Executor Close interface. +func (e *ReplaceExec) Close() error { + if e.SelectExec != nil { + return e.SelectExec.Close() + } + return nil +} + +// Open implements the Executor Open interface. +func (e *ReplaceExec) Open(ctx context.Context) error { + if e.SelectExec != nil { + return e.SelectExec.Open(ctx) + } + e.initEvalBuffer() + return nil +} + +// removeRow removes the duplicate row and cleanup its keys in the key-value map, +// but if the to-be-removed row equals to the to-be-added row, no remove or add things to do. +func (e *ReplaceExec) removeRow(ctx context.Context, txn kv.Transaction, handle int64, r toBeCheckedRow) (bool, error) { + newRow := r.row + oldRow, err := getOldRow(ctx, e.ctx, txn, r.t, handle) + if err != nil { + logutil.BgLogger().Error("get old row failed when replace", + zap.Int64("handle", handle), + zap.String("toBeInsertedRow", types.DatumsToStrNoErr(r.row))) + if kv.IsErrNotFound(err) { + err = errors.NotFoundf("can not be duplicated row, due to old row not found. handle %d", handle) + } + return false, err + } + + rowUnchanged, err := types.EqualDatums(e.ctx.GetSessionVars().StmtCtx, oldRow, newRow) + if err != nil { + return false, err + } + if rowUnchanged { + e.ctx.GetSessionVars().StmtCtx.AddAffectedRows(1) + return true, nil + } + + err = r.t.RemoveRecord(e.ctx, handle, oldRow) + if err != nil { + return false, err + } + e.ctx.GetSessionVars().StmtCtx.AddAffectedRows(1) + return false, nil +} + +// replaceRow removes all duplicate rows for one row, then inserts it. +func (e *ReplaceExec) replaceRow(ctx context.Context, r toBeCheckedRow) error { + txn, err := e.ctx.Txn(true) + if err != nil { + return err + } + + if r.handleKey != nil { + handle, err := tablecodec.DecodeRowKey(r.handleKey.newKV.key) + if err != nil { + return err + } + + if _, err := txn.Get(ctx, r.handleKey.newKV.key); err == nil { + rowUnchanged, err := e.removeRow(ctx, txn, handle, r) + if err != nil { + return err + } + if rowUnchanged { + return nil + } + } else { + if !kv.IsErrNotFound(err) { + return err + } + } + } + + // Keep on removing duplicated rows. + for { + rowUnchanged, foundDupKey, err := e.removeIndexRow(ctx, txn, r) + if err != nil { + return err + } + if rowUnchanged { + return nil + } + if foundDupKey { + continue + } + break + } + + // No duplicated rows now, insert the row. + _, err = e.addRecord(ctx, r.row) + if err != nil { + return err + } + return nil +} + +// removeIndexRow removes the row which has a duplicated key. +// the return values: +// 1. bool: true when the row is unchanged. This means no need to remove, and then add the row. +// 2. bool: true when found the duplicated key. This only means that duplicated key was found, +// and the row was removed. +// 3. error: the error. +func (e *ReplaceExec) removeIndexRow(ctx context.Context, txn kv.Transaction, r toBeCheckedRow) (bool, bool, error) { + for _, uk := range r.uniqueKeys { + val, err := txn.Get(ctx, uk.newKV.key) + if err != nil { + if kv.IsErrNotFound(err) { + continue + } + return false, false, err + } + + handle, err := tables.DecodeHandle(val) + if err != nil { + return false, true, err + } + rowUnchanged, err := e.removeRow(ctx, txn, handle, r) + if err != nil { + return false, true, err + } + return rowUnchanged, true, nil + } + return false, false, nil +} + +func (e *ReplaceExec) exec(ctx context.Context, newRows [][]types.Datum) error { + /* + * MySQL uses the following algorithm for REPLACE (and LOAD DATA ... REPLACE): + * 1. Try to insert the new row into the table + * 2. While the insertion fails because a duplicate-key error occurs for a primary key or unique index: + * 3. Delete from the table the conflicting row that has the duplicate key value + * 4. Try again to insert the new row into the table + * See http://dev.mysql.com/doc/refman/5.7/en/replace.html + * + * For REPLACE statements, the affected-rows value is 2 if the new row replaced an old row, + * because in this case, one row was inserted after the duplicate was deleted. + * See http://dev.mysql.com/doc/refman/5.7/en/mysql-affected-rows.html + */ + + // Get keys need to be checked. + toBeCheckedRows, err := getKeysNeedCheck(ctx, e.ctx, e.Table, newRows) + if err != nil { + return err + } + + //txn, err := e.ctx.Txn(true) + //if err != nil { + // return err + //} + + e.ctx.GetSessionVars().StmtCtx.AddRecordRows(uint64(len(newRows))) + for _, r := range toBeCheckedRows { + err = e.replaceRow(ctx, r) + if err != nil { + return err + } + } + return nil +} + +// Next implements the Executor Next interface. +func (e *ReplaceExec) Next(ctx context.Context, req *chunk.Chunk) error { + req.Reset() + if len(e.children) > 0 && e.children[0] != nil { + return insertRowsFromSelect(ctx, e) + } + return insertRows(ctx, e) +} diff --git a/executor/seqtest/seq_executor_test.go b/executor/seqtest/seq_executor_test.go new file mode 100644 index 0000000..575b280 --- /dev/null +++ b/executor/seqtest/seq_executor_test.go @@ -0,0 +1,210 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +// Note: All the tests in this file will be executed sequentially. + +package executor_test + +import ( + "bytes" + "context" + "flag" + "fmt" + "math" + "os" + "runtime/pprof" + "strings" + "sync/atomic" + "testing" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/executor" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/store/mockstore/mocktikv" + "github.com/pingcap/tidb/store/tikv" + "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/testkit" +) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + logLevel := os.Getenv("log_level") + logutil.InitLogger(logutil.NewLogConfig(logLevel, logutil.DefaultLogFormat, logutil.EmptyFileLogConfig, false)) + TestingT(t) +} + +var _ = Suite(&seqTestSuite{}) + +type seqTestSuite struct { + cluster *mocktikv.Cluster + mvccStore mocktikv.MVCCStore + store kv.Storage + domain *domain.Domain + *parser.Parser +} + +var mockTikv = flag.Bool("mockTikv", true, "use mock tikv store in executor test") + +func (s *seqTestSuite) SetUpSuite(c *C) { + s.Parser = parser.New() + flag.Lookup("mockTikv") + useMockTikv := *mockTikv + if useMockTikv { + s.cluster = mocktikv.NewCluster() + mocktikv.BootstrapWithSingleStore(s.cluster) + s.mvccStore = mocktikv.MustNewMVCCStore() + store, err := mockstore.NewMockTikvStore( + mockstore.WithCluster(s.cluster), + mockstore.WithMVCCStore(s.mvccStore), + ) + c.Assert(err, IsNil) + s.store = store + session.SetSchemaLease(0) + session.DisableStats4Test() + } + d, err := session.BootstrapSession(s.store) + c.Assert(err, IsNil) + s.domain = d +} + +func (s *seqTestSuite) TearDownSuite(c *C) { + s.domain.Close() + s.store.Close() +} + +func (s *seqTestSuite) TestEarlyClose(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("create table earlyclose (id int primary key)") + + N := 100 + // Insert N rows. + var values []string + for i := 0; i < N; i++ { + values = append(values, fmt.Sprintf("(%d)", i)) + } + tk.MustExec("insert earlyclose values " + strings.Join(values, ",")) + + // Get table ID for split. + dom := domain.GetDomain(tk.Se) + is := dom.InfoSchema() + tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("earlyclose")) + c.Assert(err, IsNil) + tblID := tbl.Meta().ID + + // Split the table. + s.cluster.SplitTable(s.mvccStore, tblID, N/2) + + ctx := context.Background() + for i := 0; i < N/2; i++ { + rss, err1 := tk.Se.Execute(ctx, "select * from earlyclose order by id") + c.Assert(err1, IsNil) + rs := rss[0] + req := rs.NewChunk() + err = rs.Next(ctx, req) + c.Assert(err, IsNil) + rs.Close() + } + + // Goroutine should not leak when error happen. + c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/handleTaskOnceError", `return(true)`), IsNil) + defer func() { + c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/handleTaskOnceError"), IsNil) + }() + rss, err := tk.Se.Execute(ctx, "select * from earlyclose") + c.Assert(err, IsNil) + rs := rss[0] + req := rs.NewChunk() + err = rs.Next(ctx, req) + c.Assert(err, NotNil) + rs.Close() +} + +// TestIndexDoubleReadClose checks that when a index double read returns before reading all the rows, the goroutine doesn't +// leak. For testing distsql with multiple regions, we need to manually split a mock TiKV. +func (s *seqTestSuite) TestIndexDoubleReadClose(c *C) { + if _, ok := s.store.GetClient().(*tikv.CopClient); !ok { + // Make sure the store is tikv store. + return + } + originSize := atomic.LoadInt32(&executor.LookupTableTaskChannelSize) + atomic.StoreInt32(&executor.LookupTableTaskChannelSize, 1) + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("set @@tidb_index_lookup_size = '10'") + tk.MustExec("use test") + tk.MustExec("create table dist (id int primary key, c_idx int, c_col int, index (c_idx))") + + // Insert 100 rows. + var values []string + for i := 0; i < 100; i++ { + values = append(values, fmt.Sprintf("(%d, %d, %d)", i, i, i)) + } + tk.MustExec("insert dist values " + strings.Join(values, ",")) + + rs, err := tk.Exec("select * from dist where c_idx between 0 and 100") + c.Assert(err, IsNil) + req := rs.NewChunk() + err = rs.Next(context.Background(), req) + c.Assert(err, IsNil) + c.Assert(err, IsNil) + keyword := "pickAndExecTask" + rs.Close() + time.Sleep(time.Millisecond * 10) + c.Check(checkGoroutineExists(keyword), IsFalse) + atomic.StoreInt32(&executor.LookupTableTaskChannelSize, originSize) +} + +func checkGoroutineExists(keyword string) bool { + buf := new(bytes.Buffer) + profile := pprof.Lookup("goroutine") + profile.WriteTo(buf, 1) + str := buf.String() + return strings.Contains(str, keyword) +} + +func (s *seqTestSuite) TestMaxDeltaSchemaCount(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + c.Assert(variable.GetMaxDeltaSchemaCount(), Equals, int64(variable.DefTiDBMaxDeltaSchemaCount)) + gvc := domain.GetDomain(tk.Se).GetGlobalVarsCache() + gvc.Disable() + + tk.MustExec("set @@global.tidb_max_delta_schema_count= -1") + tk.MustQuery("show warnings;").Check(testkit.Rows("Warning 1292 Truncated incorrect tidb_max_delta_schema_count value: '-1'")) + // Make sure a new session will load global variables. + tk.Se = nil + tk.MustExec("use test") + c.Assert(variable.GetMaxDeltaSchemaCount(), Equals, int64(100)) + tk.MustExec(fmt.Sprintf("set @@global.tidb_max_delta_schema_count= %v", uint64(math.MaxInt64))) + tk.MustQuery("show warnings;").Check(testkit.Rows(fmt.Sprintf("Warning 1292 Truncated incorrect tidb_max_delta_schema_count value: '%d'", uint64(math.MaxInt64)))) + tk.Se = nil + tk.MustExec("use test") + c.Assert(variable.GetMaxDeltaSchemaCount(), Equals, int64(16384)) + _, err := tk.Exec("set @@global.tidb_max_delta_schema_count= invalid_val") + c.Assert(terror.ErrorEqual(err, variable.ErrWrongTypeForVar), IsTrue, Commentf("err %v", err)) + + tk.MustExec("set @@global.tidb_max_delta_schema_count= 2048") + tk.Se = nil + tk.MustExec("use test") + c.Assert(variable.GetMaxDeltaSchemaCount(), Equals, int64(2048)) + tk.MustQuery("select @@global.tidb_max_delta_schema_count").Check(testkit.Rows("2048")) +} diff --git a/executor/set.go b/executor/set.go new file mode 100644 index 0000000..9a63b3a --- /dev/null +++ b/executor/set.go @@ -0,0 +1,173 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "context" + "fmt" + "strings" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +// SetExecutor executes set statement. +type SetExecutor struct { + baseExecutor + + vars []*expression.VarAssignment + done bool +} + +// Next implements the Executor Next interface. +func (e *SetExecutor) Next(ctx context.Context, req *chunk.Chunk) error { + req.Reset() + if e.done { + return nil + } + e.done = true + sessionVars := e.ctx.GetSessionVars() + for _, v := range e.vars { + name := strings.ToLower(v.Name) + if !v.IsSystem { + // Set user variable. + value, err := v.Expr.Eval(chunk.Row{}) + if err != nil { + return err + } + + if value.IsNull() { + delete(sessionVars.Users, name) + } else { + svalue, err1 := value.ToString() + if err1 != nil { + return err1 + } + sessionVars.Users[name] = fmt.Sprintf("%v", svalue) + } + continue + } + + syns := e.getSynonyms(name) + // Set system variable + for _, n := range syns { + err := e.setSysVariable(n, v) + if err != nil { + return err + } + } + } + return nil +} + +func (e *SetExecutor) getSynonyms(varName string) []string { + synonyms, ok := variable.SynonymsSysVariables[varName] + if ok { + return synonyms + } + + synonyms = []string{varName} + return synonyms +} + +func (e *SetExecutor) setSysVariable(name string, v *expression.VarAssignment) error { + sessionVars := e.ctx.GetSessionVars() + sysVar := variable.GetSysVar(name) + if sysVar == nil { + return variable.ErrUnknownSystemVar.GenWithStackByArgs(name) + } + if sysVar.Scope == variable.ScopeNone { + return errors.Errorf("Variable '%s' is a read only variable", name) + } + var valStr string + if v.IsGlobal { + // Set global scope system variable. + if sysVar.Scope&variable.ScopeGlobal == 0 { + return errors.Errorf("Variable '%s' is a SESSION variable and can't be used with SET GLOBAL", name) + } + value, err := e.getVarValue(v, sysVar) + if err != nil { + return err + } + if value.IsNull() { + value.SetString("") + } + valStr, err = value.ToString() + if err != nil { + return err + } + err = sessionVars.GlobalVarsAccessor.SetGlobalSysVar(name, valStr) + if err != nil { + return err + } + } else { + // Set session scope system variable. + if sysVar.Scope&variable.ScopeSession == 0 { + return errors.Errorf("Variable '%s' is a GLOBAL variable and should be set with SET GLOBAL", name) + } + value, err := e.getVarValue(v, nil) + if err != nil { + return err + } + if name == variable.TxnIsolationOneShot && sessionVars.InTxn() { + return errors.Trace(ErrCantChangeTxCharacteristics) + } + err = variable.SetSessionSystemVar(sessionVars, name, value) + if err != nil { + return err + } + if value.IsNull() { + valStr = "NULL" + } else { + var err error + valStr, err = value.ToString() + terror.Log(err) + } + if name != variable.AutoCommit { + logutil.BgLogger().Info("set session var", zap.Uint64("conn", sessionVars.ConnectionID), zap.String("name", name), zap.String("val", valStr)) + } else { + // Some applications will set `autocommit` variable before query. + // This will print too many unnecessary log info. + logutil.BgLogger().Debug("set session var", zap.Uint64("conn", sessionVars.ConnectionID), zap.String("name", name), zap.String("val", valStr)) + } + } + + return nil +} + +func (e *SetExecutor) getVarValue(v *expression.VarAssignment, sysVar *variable.SysVar) (value types.Datum, err error) { + if v.IsDefault { + // To set a SESSION variable to the GLOBAL value or a GLOBAL value + // to the compiled-in MySQL default value, use the DEFAULT keyword. + // See http://dev.mysql.com/doc/refman/5.7/en/set-statement.html + if sysVar != nil { + value = types.NewStringDatum(sysVar.Value) + } else { + s, err1 := variable.GetGlobalSystemVar(e.ctx.GetSessionVars(), v.Name) + if err1 != nil { + return value, err1 + } + value = types.NewStringDatum(s) + } + return + } + value, err = v.Expr.Eval(chunk.Row{}) + return value, err +} diff --git a/executor/set_test.go b/executor/set_test.go new file mode 100644 index 0000000..e293649 --- /dev/null +++ b/executor/set_test.go @@ -0,0 +1,41 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor_test + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/util/testkit" +) + +func (s *testSuite5) TestSelectGlobalVar(c *C) { + tk := testkit.NewTestKit(c, s.store) + + tk.MustQuery("select @@global.max_connections;").Check(testkit.Rows("151")) + tk.MustQuery("select @@max_connections;").Check(testkit.Rows("151")) + + tk.MustExec("set @@global.max_connections=100;") + + tk.MustQuery("select @@global.max_connections;").Check(testkit.Rows("100")) + tk.MustQuery("select @@max_connections;").Check(testkit.Rows("100")) + + tk.MustExec("set @@global.max_connections=151;") + + // test for unknown variable. + err := tk.ExecToErr("select @@invalid") + c.Assert(terror.ErrorEqual(err, variable.ErrUnknownSystemVar), IsTrue, Commentf("err %v", err)) + err = tk.ExecToErr("select @@global.invalid") + c.Assert(terror.ErrorEqual(err, variable.ErrUnknownSystemVar), IsTrue, Commentf("err %v", err)) +} diff --git a/executor/show.go b/executor/show.go new file mode 100644 index 0000000..1769daf --- /dev/null +++ b/executor/show.go @@ -0,0 +1,483 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "bytes" + "context" + "fmt" + "sort" + "strconv" + "strings" + + "github.com/cznic/mathutil" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/meta/autoid" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/charset" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/format" +) + +// ShowExec represents a show executor. +type ShowExec struct { + baseExecutor + + Tp ast.ShowStmtType // Databases/Tables/Columns/.... + DBName model.CIStr + Table *ast.TableName // Used for showing columns. + Column *ast.ColumnName // Used for `desc table column`. + IndexName model.CIStr // Used for show table regions. + Flag int // Some flag parsed from sql, such as FULL. + + is infoschema.InfoSchema + + result *chunk.Chunk + cursor int + + Full bool + IfNotExists bool // Used for `show create database if not exists` + GlobalScope bool // GlobalScope is used by show variables +} + +// Next implements the Executor Next interface. +func (e *ShowExec) Next(ctx context.Context, req *chunk.Chunk) error { + req.GrowAndReset(e.maxChunkSize) + if e.result == nil { + e.result = newFirstChunk(e) + err := e.fetchAll(ctx) + if err != nil { + return errors.Trace(err) + } + iter := chunk.NewIterator4Chunk(e.result) + for colIdx := 0; colIdx < e.Schema().Len(); colIdx++ { + retType := e.Schema().Columns[colIdx].RetType + if !types.IsTypeVarchar(retType.Tp) { + continue + } + for row := iter.Begin(); row != iter.End(); row = iter.Next() { + if valLen := len(row.GetString(colIdx)); retType.Flen < valLen { + retType.Flen = valLen + } + } + } + } + if e.cursor >= e.result.NumRows() { + return nil + } + numCurBatch := mathutil.Min(req.Capacity(), e.result.NumRows()-e.cursor) + req.Append(e.result, e.cursor, e.cursor+numCurBatch) + e.cursor += numCurBatch + return nil +} + +func (e *ShowExec) fetchAll(ctx context.Context) error { + switch e.Tp { + case ast.ShowCreateTable: + return e.fetchShowCreateTable() + case ast.ShowCreateDatabase: + return e.fetchShowCreateDatabase() + case ast.ShowDatabases: + return e.fetchShowDatabases() + case ast.ShowTables: + return e.fetchShowTables() + case ast.ShowVariables: + return e.fetchShowVariables() + case ast.ShowWarnings: + return e.fetchShowWarnings(false) + case ast.ShowErrors: + return e.fetchShowWarnings(true) + } + return nil +} + +// moveInfoSchemaToFront moves information_schema to the first, and the others are sorted in the origin ascending order. +func moveInfoSchemaToFront(dbs []string) { + if len(dbs) > 0 && strings.EqualFold(dbs[0], "INFORMATION_SCHEMA") { + return + } + + i := sort.SearchStrings(dbs, "INFORMATION_SCHEMA") + if i < len(dbs) && strings.EqualFold(dbs[i], "INFORMATION_SCHEMA") { + copy(dbs[1:i+1], dbs[0:i]) + dbs[0] = "INFORMATION_SCHEMA" + } +} + +func (e *ShowExec) fetchShowDatabases() error { + dbs := e.is.AllSchemaNames() + sort.Strings(dbs) + // let information_schema be the first database + moveInfoSchemaToFront(dbs) + for _, d := range dbs { + e.appendRow([]interface{}{ + d, + }) + } + return nil +} + +func (e *ShowExec) fetchShowTables() error { + if !e.is.SchemaExists(e.DBName) { + return ErrBadDB.GenWithStackByArgs(e.DBName) + } + // sort for tables + tableNames := make([]string, 0, len(e.is.SchemaTables(e.DBName))) + var tableTypes = make(map[string]string) + for _, v := range e.is.SchemaTables(e.DBName) { + tableNames = append(tableNames, v.Meta().Name.O) + tableTypes[v.Meta().Name.O] = "BASE TABLE" + } + sort.Strings(tableNames) + for _, v := range tableNames { + if e.Full { + e.appendRow([]interface{}{v, tableTypes[v]}) + } else { + e.appendRow([]interface{}{v}) + } + } + return nil +} + +func (e *ShowExec) fetchShowVariables() (err error) { + var ( + value string + ok bool + sessionVars = e.ctx.GetSessionVars() + unreachedVars = make([]string, 0, len(variable.SysVars)) + ) + for _, v := range variable.SysVars { + if !e.GlobalScope { + // For a session scope variable, + // 1. try to fetch value from SessionVars.Systems; + // 2. if this variable is session-only, fetch value from SysVars + // otherwise, fetch the value from table `mysql.Global_Variables`. + value, ok, err = variable.GetSessionOnlySysVars(sessionVars, v.Name) + } else { + // If the scope of a system variable is ScopeNone, + // it's a read-only variable, so we return the default value of it. + // Otherwise, we have to fetch the values from table `mysql.Global_Variables` for global variable names. + value, ok, err = variable.GetScopeNoneSystemVar(v.Name) + } + if err != nil { + return errors.Trace(err) + } + if !ok { + unreachedVars = append(unreachedVars, v.Name) + continue + } + e.appendRow([]interface{}{v.Name, value}) + } + if len(unreachedVars) != 0 { + systemVars, err := sessionVars.GlobalVarsAccessor.GetAllSysVars() + if err != nil { + return errors.Trace(err) + } + for _, varName := range unreachedVars { + varValue, ok := systemVars[varName] + if !ok { + varValue = variable.SysVars[varName].Value + } + e.appendRow([]interface{}{varName, varValue}) + } + } + return nil +} + +func getDefaultCollate(charsetName string) string { + for _, c := range charset.GetSupportedCharsets() { + if strings.EqualFold(c.Name, charsetName) { + return c.DefaultCollation + } + } + return "" +} + +// escape the identifier for pretty-printing. +// For instance, the identifier "foo `bar`" will become "`foo ``bar```". +// The sqlMode controls whether to escape with backquotes (`) or double quotes +// (`"`) depending on whether mysql.ModeANSIQuotes is enabled. +func escape(cis model.CIStr, sqlMode mysql.SQLMode) string { + var quote string + if sqlMode&mysql.ModeANSIQuotes != 0 { + quote = `"` + } else { + quote = "`" + } + return quote + strings.Replace(cis.O, quote, quote+quote, -1) + quote +} + +// ConstructResultOfShowCreateTable constructs the result for show create table. +func ConstructResultOfShowCreateTable(ctx sessionctx.Context, tableInfo *model.TableInfo, allocator autoid.Allocator, buf *bytes.Buffer) (err error) { + tblCharset := tableInfo.Charset + if len(tblCharset) == 0 { + tblCharset = mysql.DefaultCharset + } + tblCollate := tableInfo.Collate + // Set default collate if collate is not specified. + if len(tblCollate) == 0 { + tblCollate = getDefaultCollate(tblCharset) + } + + sqlMode := ctx.GetSessionVars().SQLMode + fmt.Fprintf(buf, "CREATE TABLE %s (\n", escape(tableInfo.Name, sqlMode)) + var pkCol *model.ColumnInfo + var hasAutoIncID bool + for i, col := range tableInfo.Cols() { + fmt.Fprintf(buf, " %s %s", escape(col.Name, sqlMode), col.GetTypeDesc()) + if col.Charset != "binary" { + if col.Charset != tblCharset { + fmt.Fprintf(buf, " CHARACTER SET %s", col.Charset) + } + if col.Collate != tblCollate { + fmt.Fprintf(buf, " COLLATE %s", col.Collate) + } else { + defcol, err := charset.GetDefaultCollation(col.Charset) + if err == nil && defcol != col.Collate { + fmt.Fprintf(buf, " COLLATE %s", col.Collate) + } + } + } + if mysql.HasAutoIncrementFlag(col.Flag) { + hasAutoIncID = true + buf.WriteString(" NOT NULL AUTO_INCREMENT") + } else { + if mysql.HasNotNullFlag(col.Flag) { + buf.WriteString(" NOT NULL") + } + // default values are not shown for generated columns in MySQL + if !mysql.HasNoDefaultValueFlag(col.Flag) { + defaultValue := col.GetDefaultValue() + switch defaultValue { + case nil: + if !mysql.HasNotNullFlag(col.Flag) { + buf.WriteString(" DEFAULT NULL") + } + default: + defaultValStr := fmt.Sprintf("%v", defaultValue) + + fmt.Fprintf(buf, " DEFAULT '%s'", format.OutputFormat(defaultValStr)) + } + } + if mysql.HasOnUpdateNowFlag(col.Flag) { + buf.WriteString(" ON UPDATE CURRENT_TIMESTAMP") + buf.WriteString(table.OptionalFsp(&col.FieldType)) + } + } + if len(col.Comment) > 0 { + fmt.Fprintf(buf, " COMMENT '%s'", format.OutputFormat(col.Comment)) + } + if i != len(tableInfo.Cols())-1 { + buf.WriteString(",\n") + } + if tableInfo.PKIsHandle && mysql.HasPriKeyFlag(col.Flag) { + pkCol = col + } + } + + if pkCol != nil { + // If PKIsHanle, pk info is not in tb.Indices(). We should handle it here. + buf.WriteString(",\n") + fmt.Fprintf(buf, " PRIMARY KEY (%s)", escape(pkCol.Name, sqlMode)) + } + + publicIndices := make([]*model.IndexInfo, 0, len(tableInfo.Indices)) + for _, idx := range tableInfo.Indices { + if idx.State == model.StatePublic { + publicIndices = append(publicIndices, idx) + } + } + if len(publicIndices) > 0 { + buf.WriteString(",\n") + } + + for i, idxInfo := range publicIndices { + if idxInfo.Primary { + buf.WriteString(" PRIMARY KEY ") + } else if idxInfo.Unique { + fmt.Fprintf(buf, " UNIQUE KEY %s ", escape(idxInfo.Name, sqlMode)) + } else { + fmt.Fprintf(buf, " KEY %s ", escape(idxInfo.Name, sqlMode)) + } + + cols := make([]string, 0, len(idxInfo.Columns)) + for _, c := range idxInfo.Columns { + colInfo := escape(c.Name, sqlMode) + if c.Length != types.UnspecifiedLength { + colInfo = fmt.Sprintf("%s(%s)", colInfo, strconv.Itoa(c.Length)) + } + cols = append(cols, colInfo) + } + fmt.Fprintf(buf, "(%s)", strings.Join(cols, ",")) + if i != len(publicIndices)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n") + + buf.WriteString(") ENGINE=InnoDB") + // Because we only support case sensitive utf8_bin collate, we need to explicitly set the default charset and collation + // to make it work on MySQL server which has default collate utf8_general_ci. + if len(tblCollate) == 0 { + // If we can not find default collate for the given charset, + // do not show the collate part. + fmt.Fprintf(buf, " DEFAULT CHARSET=%s", tblCharset) + } else { + fmt.Fprintf(buf, " DEFAULT CHARSET=%s COLLATE=%s", tblCharset, tblCollate) + } + + // Displayed if the compression typed is set. + if len(tableInfo.Compression) != 0 { + fmt.Fprintf(buf, " COMPRESSION='%s'", tableInfo.Compression) + } + + if hasAutoIncID { + autoIncID, err := allocator.NextGlobalAutoID(tableInfo.ID) + if err != nil { + return errors.Trace(err) + } + // It's compatible with MySQL. + if autoIncID > 1 { + fmt.Fprintf(buf, " AUTO_INCREMENT=%d", autoIncID) + } + } + + if tableInfo.ShardRowIDBits > 0 { + fmt.Fprintf(buf, "/*!90000 SHARD_ROW_ID_BITS=%d ", tableInfo.ShardRowIDBits) + if tableInfo.PreSplitRegions > 0 { + fmt.Fprintf(buf, "PRE_SPLIT_REGIONS=%d ", tableInfo.PreSplitRegions) + } + buf.WriteString("*/") + } + + if len(tableInfo.Comment) > 0 { + fmt.Fprintf(buf, " COMMENT='%s'", format.OutputFormat(tableInfo.Comment)) + } + return nil +} + +func (e *ShowExec) fetchShowCreateTable() error { + tb, err := e.getTable() + if err != nil { + return errors.Trace(err) + } + + allocator := tb.Allocator(e.ctx) + var buf bytes.Buffer + // TODO: let the result more like MySQL. + if err = ConstructResultOfShowCreateTable(e.ctx, tb.Meta(), allocator, &buf); err != nil { + return err + } + + e.appendRow([]interface{}{tb.Meta().Name.O, buf.String()}) + return nil +} + +// ConstructResultOfShowCreateDatabase constructs the result for show create database. +func ConstructResultOfShowCreateDatabase(ctx sessionctx.Context, dbInfo *model.DBInfo, ifNotExists bool, buf *bytes.Buffer) (err error) { + sqlMode := ctx.GetSessionVars().SQLMode + var ifNotExistsStr string + if ifNotExists { + ifNotExistsStr = "/*!32312 IF NOT EXISTS*/ " + } + fmt.Fprintf(buf, "CREATE DATABASE %s%s", ifNotExistsStr, escape(dbInfo.Name, sqlMode)) + if s := dbInfo.Charset; len(s) > 0 { + fmt.Fprintf(buf, " /*!40100 DEFAULT CHARACTER SET %s */", s) + } + return nil +} + +// fetchShowCreateDatabase composes show create database result. +func (e *ShowExec) fetchShowCreateDatabase() error { + dbInfo, ok := e.is.SchemaByName(e.DBName) + if !ok { + return infoschema.ErrDatabaseNotExists.GenWithStackByArgs(e.DBName.O) + } + + var buf bytes.Buffer + err := ConstructResultOfShowCreateDatabase(e.ctx, dbInfo, e.IfNotExists, &buf) + if err != nil { + return err + } + e.appendRow([]interface{}{dbInfo.Name.O, buf.String()}) + return nil +} + +func (e *ShowExec) fetchShowWarnings(errOnly bool) error { + warns := e.ctx.GetSessionVars().StmtCtx.GetWarnings() + for _, w := range warns { + if errOnly && w.Level != stmtctx.WarnLevelError { + continue + } + warn := errors.Cause(w.Err) + switch x := warn.(type) { + case *terror.Error: + sqlErr := x.ToSQLError() + e.appendRow([]interface{}{w.Level, int64(sqlErr.Code), sqlErr.Message}) + default: + e.appendRow([]interface{}{w.Level, int64(mysql.ErrUnknown), warn.Error()}) + } + } + return nil +} + +func (e *ShowExec) getTable() (table.Table, error) { + if e.Table == nil { + return nil, errors.New("table not found") + } + tb, ok := e.is.TableByID(e.Table.TableInfo.ID) + if !ok { + return nil, errors.Errorf("table %s not found", e.Table.Name) + } + return tb, nil +} + +func (e *ShowExec) appendRow(row []interface{}) { + for i, col := range row { + if col == nil { + e.result.AppendNull(i) + continue + } + switch x := col.(type) { + case nil: + e.result.AppendNull(i) + case int: + e.result.AppendInt64(i, int64(x)) + case int64: + e.result.AppendInt64(i, x) + case uint64: + e.result.AppendUint64(i, x) + case float64: + e.result.AppendFloat64(i, x) + case float32: + e.result.AppendFloat32(i, x) + case string: + e.result.AppendString(i, x) + case []byte: + e.result.AppendBytes(i, x) + default: + e.result.AppendNull(i) + } + } +} diff --git a/executor/show_test.go b/executor/show_test.go new file mode 100644 index 0000000..b157daa --- /dev/null +++ b/executor/show_test.go @@ -0,0 +1,81 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor_test + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/util/testkit" + "github.com/pingcap/tidb/util/testutil" +) + +func (s *testSuite5) TestShowWarnings(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + testSQL := `create table if not exists show_warnings (a int)` + tk.MustExec(testSQL) + tk.MustExec("set @@sql_mode=''") + tk.MustExec("insert show_warnings values ('a')") + c.Assert(tk.Se.GetSessionVars().StmtCtx.WarningCount(), Equals, uint16(1)) + tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Warning|1292|Truncated incorrect FLOAT value: 'a'")) + c.Assert(tk.Se.GetSessionVars().StmtCtx.WarningCount(), Equals, uint16(0)) + tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Warning|1292|Truncated incorrect FLOAT value: 'a'")) + c.Assert(tk.Se.GetSessionVars().StmtCtx.WarningCount(), Equals, uint16(0)) + + // Test Warning level 'Error' + testSQL = `create table show_warnings (a int)` + tk.Exec(testSQL) + c.Assert(tk.Se.GetSessionVars().StmtCtx.WarningCount(), Equals, uint16(1)) + tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Error|1050|Table 'test.show_warnings' already exists")) + tk.MustQuery("select @@error_count").Check(testutil.RowsWithSep("|", "1")) +} + +func (s *testSuite5) TestShowErrors(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + testSQL := `create table if not exists show_errors (a int)` + tk.MustExec(testSQL) + testSQL = `create table show_errors (a int)` + tk.Exec(testSQL) + + tk.MustQuery("show errors").Check(testutil.RowsWithSep("|", "Error|1050|Table 'test.show_errors' already exists")) +} + +func (s *testSuite5) TestShowEscape(c *C) { + tk := testkit.NewTestKit(c, s.store) + + tk.MustExec("use test") + tk.MustExec("drop table if exists `t``abl\"e`") + tk.MustExec("create table `t``abl\"e`(`c``olum\"n` int(11) primary key)") + tk.MustQuery("show create table `t``abl\"e`").Check(testutil.RowsWithSep("|", + ""+ + "t`abl\"e CREATE TABLE `t``abl\"e` (\n"+ + " `c``olum\"n` int(11) NOT NULL,\n"+ + " PRIMARY KEY (`c``olum\"n`)\n"+ + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", + )) + + // ANSI_QUOTES will change the SHOW output + tk.MustExec("set @old_sql_mode=@@sql_mode") + tk.MustExec("set sql_mode=ansi_quotes") + tk.MustQuery("show create table \"t`abl\"\"e\"").Check(testutil.RowsWithSep("|", + ""+ + "t`abl\"e CREATE TABLE \"t`abl\"\"e\" (\n"+ + " \"c`olum\"\"n\" int(11) NOT NULL,\n"+ + " PRIMARY KEY (\"c`olum\"\"n\")\n"+ + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", + )) + + tk.MustExec("drop table \"t`abl\"\"e\"") + tk.MustExec("set sql_mode=@old_sql_mode") +} diff --git a/executor/simple.go b/executor/simple.go new file mode 100644 index 0000000..5f36f00 --- /dev/null +++ b/executor/simple.go @@ -0,0 +1,114 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "context" + + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +// SimpleExec represents simple statement executor. +// For statements do simple execution. +// includes `UseStmt`,`BeginStmt`, `CommitStmt` and `RollbackStmt`. +type SimpleExec struct { + baseExecutor + + Statement ast.StmtNode + done bool + is infoschema.InfoSchema +} + +// Next implements the Executor Next interface. +func (e *SimpleExec) Next(ctx context.Context, req *chunk.Chunk) (err error) { + if e.done { + return nil + } + + switch x := e.Statement.(type) { + case *ast.UseStmt: + err = e.executeUse(x) + case *ast.BeginStmt: + err = e.executeBegin(ctx, x) + case *ast.CommitStmt: + e.executeCommit(x) + case *ast.RollbackStmt: + err = e.executeRollback(x) + } + e.done = true + return err +} + +func (e *SimpleExec) executeUse(s *ast.UseStmt) error { + dbname := model.NewCIStr(s.DBName) + + dbinfo, exists := e.is.SchemaByName(dbname) + if !exists { + return infoschema.ErrDatabaseNotExists.GenWithStackByArgs(dbname) + } + e.ctx.GetSessionVars().CurrentDB = dbname.O + // character_set_database is the character set used by the default database. + // The server sets this variable whenever the default database changes. + // See http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_character_set_database + sessionVars := e.ctx.GetSessionVars() + terror.Log(sessionVars.SetSystemVar(variable.CharsetDatabase, dbinfo.Charset)) + terror.Log(sessionVars.SetSystemVar(variable.CollationDatabase, dbinfo.Collate)) + return nil +} + +func (e *SimpleExec) executeBegin(ctx context.Context, s *ast.BeginStmt) error { + // If BEGIN is the first statement in TxnCtx, we can reuse the existing transaction, without the + // need to call NewTxn, which commits the existing transaction and begins a new one. + txnCtx := e.ctx.GetSessionVars().TxnCtx + if txnCtx.History != nil { + err := e.ctx.NewTxn(ctx) + if err != nil { + return err + } + } + // With START TRANSACTION, autocommit remains disabled until you end + // the transaction with COMMIT or ROLLBACK. The autocommit mode then + // reverts to its previous state. + e.ctx.GetSessionVars().SetStatusFlag(mysql.ServerStatusInTrans, true) + // Call ctx.Txn(true) to active pending txn. + _, err := e.ctx.Txn(true) + return err +} + +func (e *SimpleExec) executeCommit(s *ast.CommitStmt) { + e.ctx.GetSessionVars().SetStatusFlag(mysql.ServerStatusInTrans, false) +} + +func (e *SimpleExec) executeRollback(s *ast.RollbackStmt) error { + sessVars := e.ctx.GetSessionVars() + logutil.BgLogger().Debug("execute rollback statement", zap.Uint64("conn", sessVars.ConnectionID)) + sessVars.SetStatusFlag(mysql.ServerStatusInTrans, false) + txn, err := e.ctx.Txn(false) + if err != nil { + return err + } + if txn.Valid() { + sessVars.TxnCtx.ClearDelta() + return txn.Rollback() + } + return nil +} diff --git a/executor/simple_test.go b/executor/simple_test.go new file mode 100644 index 0000000..f8ef66b --- /dev/null +++ b/executor/simple_test.go @@ -0,0 +1,84 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor_test + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/util/testkit" +) + +func (s *testSuite3) TestCharsetDatabase(c *C) { + tk := testkit.NewTestKit(c, s.store) + testSQL := `create database if not exists cd_test_utf8 CHARACTER SET utf8 COLLATE utf8_bin;` + tk.MustExec(testSQL) + + testSQL = `create database if not exists cd_test_latin1 CHARACTER SET latin1 COLLATE latin1_swedish_ci;` + tk.MustExec(testSQL) + + testSQL = `use cd_test_utf8;` + tk.MustExec(testSQL) + tk.MustQuery(`select @@character_set_database;`).Check(testkit.Rows("utf8")) + tk.MustQuery(`select @@collation_database;`).Check(testkit.Rows("utf8_bin")) + + testSQL = `use cd_test_latin1;` + tk.MustExec(testSQL) + tk.MustQuery(`select @@character_set_database;`).Check(testkit.Rows("latin1")) + tk.MustQuery(`select @@collation_database;`).Check(testkit.Rows("latin1_swedish_ci")) +} + +func (s *testSuite3) TestTransaction(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("begin") + ctx := tk.Se.(sessionctx.Context) + c.Assert(inTxn(ctx), IsTrue) + tk.MustExec("commit") + c.Assert(inTxn(ctx), IsFalse) + tk.MustExec("begin") + c.Assert(inTxn(ctx), IsTrue) + tk.MustExec("rollback") + c.Assert(inTxn(ctx), IsFalse) + + // Test that begin implicitly commits previous transaction. + tk.MustExec("use test") + tk.MustExec("create table txn (a int)") + tk.MustExec("begin") + tk.MustExec("insert txn values (1)") + tk.MustExec("begin") + tk.MustExec("rollback") + tk.MustQuery("select * from txn").Check(testkit.Rows("1")) + + // Test that DDL implicitly commits previous transaction. + tk.MustExec("begin") + tk.MustExec("insert txn values (2)") + tk.MustExec("create table txn2 (a int)") + tk.MustExec("rollback") + tk.MustQuery("select * from txn").Check(testkit.Rows("1", "2")) +} + +func inTxn(ctx sessionctx.Context) bool { + return (ctx.GetSessionVars().Status & mysql.ServerStatusInTrans) > 0 +} + +func (s *testSuite3) TestUseDB(c *C) { + tk := testkit.NewTestKit(c, s.store) + _, err := tk.Exec("USE test") + c.Check(err, IsNil) + + _, err = tk.Exec("USE ``") + c.Assert(terror.ErrorEqual(core.ErrNoDB, err), IsTrue, Commentf("err %v", err)) +} diff --git a/executor/sort.go b/executor/sort.go new file mode 100644 index 0000000..84cce5b --- /dev/null +++ b/executor/sort.go @@ -0,0 +1,318 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "container/heap" + "context" + "sort" + + "github.com/pingcap/tidb/expression" + plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/util/chunk" +) + +// SortExec represents sorting executor. +type SortExec struct { + baseExecutor + + ByItems []*plannercore.ByItems + Idx int + fetched bool + schema *expression.Schema + + // keyColumns is the column index of the by items. + keyColumns []int + // keyCmpFuncs is used to compare each ByItem. + keyCmpFuncs []chunk.CompareFunc + // rowChunks is the chunks to store row values. + rowChunks *chunk.List + // rowPointer store the chunk index and row index for each row. + rowPtrs []chunk.RowPtr +} + +// Close implements the Executor Close interface. +func (e *SortExec) Close() error { + return e.children[0].Close() +} + +// Open implements the Executor Open interface. +func (e *SortExec) Open(ctx context.Context) error { + e.fetched = false + e.Idx = 0 + return e.children[0].Open(ctx) +} + +// Next implements the Executor Next interface. +func (e *SortExec) Next(ctx context.Context, req *chunk.Chunk) error { + req.Reset() + if !e.fetched { + err := e.fetchRowChunks(ctx) + if err != nil { + return err + } + e.initPointers() + e.initCompareFuncs() + e.buildKeyColumns() + sort.Slice(e.rowPtrs, e.keyColumnsLess) + e.fetched = true + } + for !req.IsFull() && e.Idx < len(e.rowPtrs) { + rowPtr := e.rowPtrs[e.Idx] + req.AppendRow(e.rowChunks.GetRow(rowPtr)) + e.Idx++ + } + return nil +} + +func (e *SortExec) fetchRowChunks(ctx context.Context) error { + fields := retTypes(e) + e.rowChunks = chunk.NewList(fields, e.initCap, e.maxChunkSize) + for { + chk := newFirstChunk(e.children[0]) + err := Next(ctx, e.children[0], chk) + if err != nil { + return err + } + rowCount := chk.NumRows() + if rowCount == 0 { + break + } + e.rowChunks.Add(chk) + } + return nil +} + +func (e *SortExec) initPointers() { + e.rowPtrs = make([]chunk.RowPtr, 0, e.rowChunks.Len()) + for chkIdx := 0; chkIdx < e.rowChunks.NumChunks(); chkIdx++ { + rowChk := e.rowChunks.GetChunk(chkIdx) + for rowIdx := 0; rowIdx < rowChk.NumRows(); rowIdx++ { + e.rowPtrs = append(e.rowPtrs, chunk.RowPtr{ChkIdx: uint32(chkIdx), RowIdx: uint32(rowIdx)}) + } + } +} + +func (e *SortExec) initCompareFuncs() { + e.keyCmpFuncs = make([]chunk.CompareFunc, len(e.ByItems)) + for i := range e.ByItems { + keyType := e.ByItems[i].Expr.GetType() + e.keyCmpFuncs[i] = chunk.GetCompareFunc(keyType) + } +} + +func (e *SortExec) buildKeyColumns() { + e.keyColumns = make([]int, 0, len(e.ByItems)) + for _, by := range e.ByItems { + col := by.Expr.(*expression.Column) + e.keyColumns = append(e.keyColumns, col.Index) + } +} + +func (e *SortExec) lessRow(rowI, rowJ chunk.Row) bool { + for i, colIdx := range e.keyColumns { + cmpFunc := e.keyCmpFuncs[i] + cmp := cmpFunc(rowI, colIdx, rowJ, colIdx) + if e.ByItems[i].Desc { + cmp = -cmp + } + if cmp < 0 { + return true + } else if cmp > 0 { + return false + } + } + return false +} + +// keyColumnsLess is the less function for key columns. +func (e *SortExec) keyColumnsLess(i, j int) bool { + rowI := e.rowChunks.GetRow(e.rowPtrs[i]) + rowJ := e.rowChunks.GetRow(e.rowPtrs[j]) + return e.lessRow(rowI, rowJ) +} + +// TopNExec implements a Top-N algorithm and it is built from a SELECT statement with ORDER BY and LIMIT. +// Instead of sorting all the rows fetched from the table, it keeps the Top-N elements only in a heap to reduce memory usage. +type TopNExec struct { + SortExec + limit *plannercore.PhysicalLimit + totalLimit uint64 + + chkHeap *topNChunkHeap +} + +// topNChunkHeap implements heap.Interface. +type topNChunkHeap struct { + *TopNExec +} + +// Less implement heap.Interface, but since we mantains a max heap, +// this function returns true if row i is greater than row j. +func (h *topNChunkHeap) Less(i, j int) bool { + rowI := h.rowChunks.GetRow(h.rowPtrs[i]) + rowJ := h.rowChunks.GetRow(h.rowPtrs[j]) + return h.greaterRow(rowI, rowJ) +} + +func (h *topNChunkHeap) greaterRow(rowI, rowJ chunk.Row) bool { + for i, colIdx := range h.keyColumns { + cmpFunc := h.keyCmpFuncs[i] + cmp := cmpFunc(rowI, colIdx, rowJ, colIdx) + if h.ByItems[i].Desc { + cmp = -cmp + } + if cmp > 0 { + return true + } else if cmp < 0 { + return false + } + } + return false +} + +func (h *topNChunkHeap) Len() int { + return len(h.rowPtrs) +} + +func (h *topNChunkHeap) Push(x interface{}) { + // Should never be called. +} + +func (h *topNChunkHeap) Pop() interface{} { + h.rowPtrs = h.rowPtrs[:len(h.rowPtrs)-1] + // We don't need the popped value, return nil to avoid memory allocation. + return nil +} + +func (h *topNChunkHeap) Swap(i, j int) { + h.rowPtrs[i], h.rowPtrs[j] = h.rowPtrs[j], h.rowPtrs[i] +} + +// Open implements the Executor Open interface. +func (e *TopNExec) Open(ctx context.Context) error { + return e.SortExec.Open(ctx) +} + +// Next implements the Executor Next interface. +func (e *TopNExec) Next(ctx context.Context, req *chunk.Chunk) error { + req.Reset() + if !e.fetched { + e.totalLimit = e.limit.Offset + e.limit.Count + e.Idx = int(e.limit.Offset) + err := e.loadChunksUntilTotalLimit(ctx) + if err != nil { + return err + } + err = e.executeTopN(ctx) + if err != nil { + return err + } + e.fetched = true + } + if e.Idx >= len(e.rowPtrs) { + return nil + } + for !req.IsFull() && e.Idx < len(e.rowPtrs) { + row := e.rowChunks.GetRow(e.rowPtrs[e.Idx]) + req.AppendRow(row) + e.Idx++ + } + return nil +} + +func (e *TopNExec) loadChunksUntilTotalLimit(ctx context.Context) error { + e.chkHeap = &topNChunkHeap{e} + e.rowChunks = chunk.NewList(retTypes(e), e.initCap, e.maxChunkSize) + for uint64(e.rowChunks.Len()) < e.totalLimit { + srcChk := newFirstChunk(e.children[0]) + // adjust required rows by total limit + srcChk.SetRequiredRows(int(e.totalLimit-uint64(e.rowChunks.Len())), e.maxChunkSize) + err := Next(ctx, e.children[0], srcChk) + if err != nil { + return err + } + if srcChk.NumRows() == 0 { + break + } + e.rowChunks.Add(srcChk) + } + e.initPointers() + e.initCompareFuncs() + e.buildKeyColumns() + return nil +} + +const topNCompactionFactor = 4 + +func (e *TopNExec) executeTopN(ctx context.Context) error { + heap.Init(e.chkHeap) + for uint64(len(e.rowPtrs)) > e.totalLimit { + // The number of rows we loaded may exceeds total limit, remove greatest rows by Pop. + heap.Pop(e.chkHeap) + } + childRowChk := newFirstChunk(e.children[0]) + for { + err := Next(ctx, e.children[0], childRowChk) + if err != nil { + return err + } + if childRowChk.NumRows() == 0 { + break + } + err = e.processChildChk(childRowChk) + if err != nil { + return err + } + if e.rowChunks.Len() > len(e.rowPtrs)*topNCompactionFactor { + err = e.doCompaction() + if err != nil { + return err + } + } + } + sort.Slice(e.rowPtrs, e.keyColumnsLess) + return nil +} + +func (e *TopNExec) processChildChk(childRowChk *chunk.Chunk) error { + for i := 0; i < childRowChk.NumRows(); i++ { + heapMaxPtr := e.rowPtrs[0] + var heapMax, next chunk.Row + heapMax = e.rowChunks.GetRow(heapMaxPtr) + next = childRowChk.GetRow(i) + if e.chkHeap.greaterRow(heapMax, next) { + // Evict heap max, keep the next row. + e.rowPtrs[0] = e.rowChunks.AppendRow(childRowChk.GetRow(i)) + heap.Fix(e.chkHeap, 0) + } + } + return nil +} + +// doCompaction rebuild the chunks and row pointers to release memory. +// If we don't do compaction, in a extreme case like the child data is already ascending sorted +// but we want descending top N, then we will keep all data in memory. +// But if data is distributed randomly, this function will be called log(n) times. +func (e *TopNExec) doCompaction() error { + newRowChunks := chunk.NewList(retTypes(e), e.initCap, e.maxChunkSize) + newRowPtrs := make([]chunk.RowPtr, 0, e.rowChunks.Len()) + for _, rowPtr := range e.rowPtrs { + newRowPtr := newRowChunks.AppendRow(e.rowChunks.GetRow(rowPtr)) + newRowPtrs = append(newRowPtrs, newRowPtr) + } + e.rowChunks = newRowChunks + e.rowPtrs = newRowPtrs + return nil +} diff --git a/executor/table_reader.go b/executor/table_reader.go new file mode 100644 index 0000000..12256c9 --- /dev/null +++ b/executor/table_reader.go @@ -0,0 +1,167 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "context" + "github.com/pingcap/tidb/distsql" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/model" + plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/ranger" + "github.com/pingcap/tipb/go-tipb" +) + +// make sure `TableReaderExecutor` implements `Executor`. +var _ Executor = &TableReaderExecutor{} + +// TableReaderExecutor sends DAG request and reads table data from kv layer. +type TableReaderExecutor struct { + baseExecutor + + table table.Table + ranges []*ranger.Range + // kvRanges are only use for union scan. + kvRanges []kv.KeyRange + dagPB *tipb.DAGRequest + startTS uint64 + // columns are only required by union scan and virtual column. + columns []*model.ColumnInfo + + // resultHandler handles the order of the result. Since (MAXInt64, MAXUint64] stores before [0, MaxInt64] physically + // for unsigned int. + resultHandler *tableResultHandler + plans []plannercore.PhysicalPlan + + keepOrder bool + desc bool +} + +// Open initialzes necessary variables for using this executor. +func (e *TableReaderExecutor) Open(ctx context.Context) error { + e.resultHandler = &tableResultHandler{} + firstPartRanges, secondPartRanges := splitRanges(e.ranges, e.keepOrder, e.desc) + firstResult, err := e.buildResp(ctx, firstPartRanges) + if err != nil { + return err + } + if len(secondPartRanges) == 0 { + e.resultHandler.open(nil, firstResult) + return nil + } + var secondResult distsql.SelectResult + secondResult, err = e.buildResp(ctx, secondPartRanges) + if err != nil { + return err + } + e.resultHandler.open(firstResult, secondResult) + return nil +} + +// Next fills data into the chunk passed by its caller. +// The task was actually done by tableReaderHandler. +func (e *TableReaderExecutor) Next(ctx context.Context, req *chunk.Chunk) error { + return e.resultHandler.nextChunk(ctx, req) +} + +// Close implements the Executor Close interface. +func (e *TableReaderExecutor) Close() error { + var err error + if e.resultHandler != nil { + err = e.resultHandler.Close() + } + return err +} + +// buildResp first builds request and sends it to tikv using distsql.Select. It uses SelectResut returned by the callee +// to fetch all results. +func (e *TableReaderExecutor) buildResp(ctx context.Context, ranges []*ranger.Range) (distsql.SelectResult, error) { + var builder distsql.RequestBuilder + kvReq, err := builder.SetTableRanges(getPhysicalTableID(e.table), ranges). + SetDAGRequest(e.dagPB). + SetStartTS(e.startTS). + SetDesc(e.desc). + SetKeepOrder(e.keepOrder). + SetFromSessionVars(e.ctx.GetSessionVars()). + Build() + if err != nil { + return nil, err + } + e.kvRanges = append(e.kvRanges, kvReq.KeyRanges...) + return distsql.Select(ctx, e.ctx, kvReq, retTypes(e)) +} + +type tableResultHandler struct { + // If the pk is unsigned and we have KeepOrder=true and want ascending order, + // `optionalResult` will handles the request whose range is in signed int range, and + // `result` will handle the request whose range is exceed signed int range. + // If we want descending order, `optionalResult` will handles the request whose range is exceed signed, and + // the `result` will handle the request whose range is in signed. + // Otherwise, we just set `optionalFinished` true and the `result` handles the whole ranges. + optionalResult distsql.SelectResult + result distsql.SelectResult + + optionalFinished bool +} + +func (tr *tableResultHandler) open(optionalResult, result distsql.SelectResult) { + if optionalResult == nil { + tr.optionalFinished = true + tr.result = result + return + } + tr.optionalResult = optionalResult + tr.result = result + tr.optionalFinished = false +} + +func (tr *tableResultHandler) nextChunk(ctx context.Context, chk *chunk.Chunk) error { + if !tr.optionalFinished { + err := tr.optionalResult.Next(ctx, chk) + if err != nil { + return err + } + if chk.NumRows() > 0 { + return nil + } + tr.optionalFinished = true + } + return tr.result.Next(ctx, chk) +} + +func (tr *tableResultHandler) nextRaw(ctx context.Context) (data []byte, err error) { + if !tr.optionalFinished { + data, err = tr.optionalResult.NextRaw(ctx) + if err != nil { + return nil, err + } + if data != nil { + return data, nil + } + tr.optionalFinished = true + } + data, err = tr.result.NextRaw(ctx) + if err != nil { + return nil, err + } + return data, nil +} + +func (tr *tableResultHandler) Close() error { + err := closeAll(tr.optionalResult, tr.result) + tr.optionalResult, tr.result = nil, nil + return err +} diff --git a/executor/testdata/agg_suite_in.json b/executor/testdata/agg_suite_in.json new file mode 100644 index 0000000..472e5aa --- /dev/null +++ b/executor/testdata/agg_suite_in.json @@ -0,0 +1,12 @@ +[ + { + "name": "TestInjectProjBelowTopN", + "cases": [ + "explain select * from t order by i + 1", + "select * from t order by i + 1", + "explain select * from t order by i + 1 limit 2", + "select * from t order by i + 1 limit 2", + "select i, i, i from t order by i + 1" + ] + } +] diff --git a/executor/testdata/agg_suite_out.json b/executor/testdata/agg_suite_out.json new file mode 100644 index 0000000..d234b50 --- /dev/null +++ b/executor/testdata/agg_suite_out.json @@ -0,0 +1,48 @@ +[ + { + "Name": "TestInjectProjBelowTopN", + "Cases": [ + [ + "Projection_8 10000.00 root test.t.i", + "└─Sort_4 10000.00 root Column#3:asc", + " └─Projection_9 10000.00 root test.t.i, plus(test.t.i, 1)->Column#3", + " └─TableReader_7 10000.00 root data:TableScan_6", + " └─TableScan_6 10000.00 cop table:t, range:[-inf,+inf], keep order:false, stats:pseudo" + ], + [ + "1", + "1", + "1", + "2", + "2", + "2", + "3", + "3", + "3" + ], + [ + "Projection_15 2.00 root test.t.i", + "└─TopN_7 2.00 root Column#3:asc, offset:0, count:2", + " └─Projection_16 2.00 root test.t.i, plus(test.t.i, 1)->Column#3", + " └─TableReader_12 2.00 root data:TopN_11", + " └─TopN_11 2.00 cop plus(test.t.i, 1):asc, offset:0, count:2", + " └─TableScan_10 10000.00 cop table:t, range:[-inf,+inf], keep order:false, stats:pseudo" + ], + [ + "1", + "1" + ], + [ + "1 1 1", + "1 1 1", + "1 1 1", + "2 2 2", + "2 2 2", + "2 2 2", + "3 3 3", + "3 3 3", + "3 3 3" + ] + ] + } +] diff --git a/executor/union_scan.go b/executor/union_scan.go new file mode 100644 index 0000000..d371471 --- /dev/null +++ b/executor/union_scan.go @@ -0,0 +1,300 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "context" + "sync" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" +) + +// DirtyDB stores uncommitted write operations for a transaction. +// It is stored and retrieved by context.Value and context.SetValue method. +type DirtyDB struct { + sync.Mutex + + // tables is a map whose key is tableID. + tables map[int64]*DirtyTable +} + +// GetDirtyTable gets the DirtyTable by id from the DirtyDB. +func (udb *DirtyDB) GetDirtyTable(tid int64) *DirtyTable { + // The index join access the tables map parallelly. + // But the map throws panic in this case. So it's locked. + udb.Lock() + dt, ok := udb.tables[tid] + if !ok { + dt = &DirtyTable{ + tid: tid, + addedRows: make(map[int64]struct{}), + deletedRows: make(map[int64]struct{}), + } + udb.tables[tid] = dt + } + udb.Unlock() + return dt +} + +// DirtyTable stores uncommitted write operation for a transaction. +type DirtyTable struct { + tid int64 + // addedRows ... + // the key is handle. + addedRows map[int64]struct{} + deletedRows map[int64]struct{} +} + +// AddRow adds a row to the DirtyDB. +func (dt *DirtyTable) AddRow(handle int64) { + dt.addedRows[handle] = struct{}{} +} + +// DeleteRow deletes a row from the DirtyDB. +func (dt *DirtyTable) DeleteRow(handle int64) { + delete(dt.addedRows, handle) + dt.deletedRows[handle] = struct{}{} +} + +// GetDirtyDB returns the DirtyDB bind to the context. +func GetDirtyDB(ctx sessionctx.Context) *DirtyDB { + var udb *DirtyDB + x := ctx.GetSessionVars().TxnCtx.DirtyDB + if x == nil { + udb = &DirtyDB{tables: make(map[int64]*DirtyTable)} + ctx.GetSessionVars().TxnCtx.DirtyDB = udb + } else { + udb = x.(*DirtyDB) + } + return udb +} + +// UnionScanExec merges the rows from dirty table and the rows from distsql request. +type UnionScanExec struct { + baseExecutor + + dirty *DirtyTable + // usedIndex is the column offsets of the index which Src executor has used. + usedIndex []int + desc bool + conditions []expression.Expression + columns []*model.ColumnInfo + table table.Table + // belowHandleIndex is the handle's position of the below scan plan. + belowHandleIndex int + + addedRows [][]types.Datum + cursor4AddRows int + sortErr error + snapshotRows [][]types.Datum + cursor4SnapshotRows int + snapshotChunkBuffer *chunk.Chunk + mutableRow chunk.MutRow +} + +// Open implements the Executor Open interface. +func (us *UnionScanExec) Open(ctx context.Context) error { + if err := us.baseExecutor.Open(ctx); err != nil { + return err + } + return us.open(ctx) +} + +func (us *UnionScanExec) open(ctx context.Context) error { + var err error + reader := us.children[0] + switch x := reader.(type) { + case *TableReaderExecutor: + us.addedRows, err = buildMemTableReader(us, x).getMemRows() + case *IndexReaderExecutor: + mIdxReader := buildMemIndexReader(us, x) + us.addedRows, err = mIdxReader.getMemRows() + case *IndexLookUpExecutor: + idxLookup := buildMemIndexLookUpReader(us, x) + us.addedRows, err = idxLookup.getMemRows() + } + if err != nil { + return err + } + us.snapshotChunkBuffer = newFirstChunk(us) + return nil +} + +// Next implements the Executor Next interface. +func (us *UnionScanExec) Next(ctx context.Context, req *chunk.Chunk) error { + req.GrowAndReset(us.maxChunkSize) + mutableRow := chunk.MutRowFromTypes(retTypes(us)) + for i, batchSize := 0, req.Capacity(); i < batchSize; i++ { + row, err := us.getOneRow(ctx) + if err != nil { + return err + } + // no more data. + if row == nil { + return nil + } + mutableRow.SetDatums(row...) + req.AppendRow(mutableRow.ToRow()) + } + return nil +} + +// getOneRow gets one result row from dirty table or child. +func (us *UnionScanExec) getOneRow(ctx context.Context) ([]types.Datum, error) { + for { + snapshotRow, err := us.getSnapshotRow(ctx) + if err != nil { + return nil, err + } + addedRow := us.getAddedRow() + var row []types.Datum + var isSnapshotRow bool + if addedRow == nil { + row = snapshotRow + isSnapshotRow = true + } else if snapshotRow == nil { + row = addedRow + } else { + isSnapshotRow, err = us.shouldPickFirstRow(snapshotRow, addedRow) + if err != nil { + return nil, err + } + if isSnapshotRow { + row = snapshotRow + } else { + row = addedRow + } + } + if row == nil { + return nil, nil + } + + if isSnapshotRow { + us.cursor4SnapshotRows++ + } else { + us.cursor4AddRows++ + } + return row, nil + } +} + +func (us *UnionScanExec) getSnapshotRow(ctx context.Context) ([]types.Datum, error) { + if us.cursor4SnapshotRows < len(us.snapshotRows) { + return us.snapshotRows[us.cursor4SnapshotRows], nil + } + var err error + us.cursor4SnapshotRows = 0 + us.snapshotRows = us.snapshotRows[:0] + for len(us.snapshotRows) == 0 { + err = Next(ctx, us.children[0], us.snapshotChunkBuffer) + if err != nil || us.snapshotChunkBuffer.NumRows() == 0 { + return nil, err + } + iter := chunk.NewIterator4Chunk(us.snapshotChunkBuffer) + for row := iter.Begin(); row != iter.End(); row = iter.Next() { + snapshotHandle := row.GetInt64(us.belowHandleIndex) + if _, ok := us.dirty.deletedRows[snapshotHandle]; ok { + continue + } + if _, ok := us.dirty.addedRows[snapshotHandle]; ok { + // If src handle appears in added rows, it means there is conflict and the transaction will fail to + // commit, but for simplicity, we don't handle it here. + continue + } + us.snapshotRows = append(us.snapshotRows, row.GetDatumRow(retTypes(us.children[0]))) + } + } + return us.snapshotRows[0], nil +} + +func (us *UnionScanExec) getAddedRow() []types.Datum { + var addedRow []types.Datum + if us.cursor4AddRows < len(us.addedRows) { + addedRow = us.addedRows[us.cursor4AddRows] + } + return addedRow +} + +// shouldPickFirstRow picks the suitable row in order. +// The value returned is used to determine whether to pick the first input row. +func (us *UnionScanExec) shouldPickFirstRow(a, b []types.Datum) (bool, error) { + var isFirstRow bool + addedCmpSrc, err := us.compare(a, b) + if err != nil { + return isFirstRow, err + } + // Compare result will never be 0. + if us.desc { + if addedCmpSrc > 0 { + isFirstRow = true + } + } else { + if addedCmpSrc < 0 { + isFirstRow = true + } + } + return isFirstRow, nil +} + +func (us *UnionScanExec) compare(a, b []types.Datum) (int, error) { + sc := us.ctx.GetSessionVars().StmtCtx + for _, colOff := range us.usedIndex { + aColumn := a[colOff] + bColumn := b[colOff] + cmp, err := aColumn.CompareDatum(sc, &bColumn) + if err != nil { + return 0, err + } + if cmp != 0 { + return cmp, nil + } + } + aHandle := a[us.belowHandleIndex].GetInt64() + bHandle := b[us.belowHandleIndex].GetInt64() + var cmp int + if aHandle == bHandle { + cmp = 0 + } else if aHandle > bHandle { + cmp = 1 + } else { + cmp = -1 + } + return cmp, nil +} + +// Len implements sort.Interface interface. +func (us *UnionScanExec) Len() int { + return len(us.addedRows) +} + +// Less implements sort.Interface interface. +func (us *UnionScanExec) Less(i, j int) bool { + cmp, err := us.compare(us.addedRows[i], us.addedRows[j]) + if err != nil { + us.sortErr = errors.Trace(err) + return true + } + return cmp < 0 +} + +// Swap implements sort.Interface interface. +func (us *UnionScanExec) Swap(i, j int) { + us.addedRows[i], us.addedRows[j] = us.addedRows[j], us.addedRows[i] +} diff --git a/executor/union_scan_test.go b/executor/union_scan_test.go new file mode 100644 index 0000000..41fe217 --- /dev/null +++ b/executor/union_scan_test.go @@ -0,0 +1,100 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor_test + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/util/testkit" +) + +func (s *testSuite7) TestDirtyTransaction(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (a int primary key, b int, index idx_b (b));") + tk.MustExec("insert t value (2, 3), (4, 8), (6, 8)") + tk.MustExec("begin") + tk.MustQuery("select * from t").Check(testkit.Rows("2 3", "4 8", "6 8")) + tk.MustExec("insert t values (1, 5), (3, 4), (7, 6)") + tk.MustQuery("select * from information_schema.columns") + tk.MustQuery("select * from t").Check(testkit.Rows("1 5", "2 3", "3 4", "4 8", "6 8", "7 6")) + tk.MustQuery("select * from t where a = 1").Check(testkit.Rows("1 5")) + tk.MustQuery("select * from t order by a desc").Check(testkit.Rows("7 6", "6 8", "4 8", "3 4", "2 3", "1 5")) + tk.MustQuery("select * from t order by b, a").Check(testkit.Rows("2 3", "3 4", "1 5", "7 6", "4 8", "6 8")) + tk.MustQuery("select * from t order by b desc, a desc").Check(testkit.Rows("6 8", "4 8", "7 6", "1 5", "3 4", "2 3")) + tk.MustQuery("select b from t where b = 8 order by b desc").Check(testkit.Rows("8", "8")) + // Delete a snapshot row and a dirty row. + tk.MustExec("delete from t where a = 2 or a = 3") + tk.MustQuery("select * from t").Check(testkit.Rows("1 5", "4 8", "6 8", "7 6")) + tk.MustQuery("select * from t order by a desc").Check(testkit.Rows("7 6", "6 8", "4 8", "1 5")) + tk.MustQuery("select * from t order by b, a").Check(testkit.Rows("1 5", "7 6", "4 8", "6 8")) + tk.MustQuery("select * from t order by b desc, a desc").Check(testkit.Rows("6 8", "4 8", "7 6", "1 5")) + // Add deleted row back. + tk.MustExec("insert t values (2, 3), (3, 4)") + tk.MustQuery("select * from t").Check(testkit.Rows("1 5", "2 3", "3 4", "4 8", "6 8", "7 6")) + tk.MustQuery("select * from t order by a desc").Check(testkit.Rows("7 6", "6 8", "4 8", "3 4", "2 3", "1 5")) + tk.MustQuery("select * from t order by b, a").Check(testkit.Rows("2 3", "3 4", "1 5", "7 6", "4 8", "6 8")) + tk.MustQuery("select * from t order by b desc, a desc").Check(testkit.Rows("6 8", "4 8", "7 6", "1 5", "3 4", "2 3")) + + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (a int, b int)") + tk.MustExec("insert t values (2, 3), (4, 5), (6, 7)") + tk.MustExec("begin") + tk.MustExec("insert t values (0, 1)") + tk.MustQuery("select * from t where b = 3").Check(testkit.Rows("2 3")) + tk.MustExec("commit") + + tk.MustExec(`drop table if exists t`) + tk.MustExec("create table t(a int, b int, c int, d int, index idx(c, d))") + tk.MustExec("begin") + tk.MustExec("insert into t values(1, 2, 3, 4)") + tk.MustQuery("select * from t use index(idx) where c > 1 and d = 4").Check(testkit.Rows("1 2 3 4")) + tk.MustExec("commit") +} + +func (s *testSuite7) TestUnionScanForMemBufferReader(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (a int,b int, index idx(b))") + tk.MustExec("insert t values (1,1),(2,2)") + + // Test for delete in union scan + tk.MustExec("begin") + tk.MustExec("delete from t") + tk.MustQuery("select * from t").Check(testkit.Rows()) + tk.MustExec("insert t values (1,1)") + tk.MustQuery("select a,b from t").Check(testkit.Rows("1 1")) + tk.MustQuery("select a,b from t use index(idx)").Check(testkit.Rows("1 1")) + tk.MustExec("commit") + + // Test index reader corner case. + tk.MustExec("drop table if exists t1") + tk.MustExec("create table t1 (a int,b int,primary key(a,b));") + tk.MustExec("begin;") + tk.MustExec("insert into t1 values(1, 1);") + tk.MustQuery("select * from t1 use index(primary) where a=1;").Check(testkit.Rows("1 1")) + tk.MustExec("commit") + + // Test index reader with pk handle. + tk.MustExec("drop table if exists t1") + tk.MustExec("create table t1 (a int unsigned key,b int,c varchar(10), index idx(b,a,c));") + tk.MustExec("begin;") + tk.MustExec("insert into t1 (a,b) values (0, 0), (1, 1);") + tk.MustQuery("select a,b from t1 use index(idx) where b>0;").Check(testkit.Rows("1 1")) + tk.MustQuery("select a,b,c from t1 ignore index(idx) where a>=1 order by a desc").Check(testkit.Rows("1 1 ")) + tk.MustExec("insert into t1 values (2, 2, null), (3, 3, 'a');") + tk.MustQuery("select a,b from t1 use index(idx) where b>1 and c is not null;").Check(testkit.Rows("3 3")) + tk.MustExec("commit") +} diff --git a/executor/write.go b/executor/write.go new file mode 100644 index 0000000..3b9f9e9 --- /dev/null +++ b/executor/write.go @@ -0,0 +1,35 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +var ( + _ Executor = &DeleteExec{} + _ Executor = &InsertExec{} + _ Executor = &ReplaceExec{} +) + +// resetErrDataTooLong reset ErrDataTooLong error msg. +// types.ErrDataTooLong is produced in types.ProduceStrWithSpecifiedTp, there is no column info in there, +// so we reset the error msg here, and wrap old err with errors.Wrap. +func resetErrDataTooLong(colName string, rowIdx int, err error) error { + newErr := types.ErrDataTooLong.GenWithStack("Data too long for column '%v' at row %v", colName, rowIdx) + logutil.BgLogger().Error("data too long for column", zap.String("colName", colName), zap.Int("rowIndex", rowIdx)) + return newErr +} diff --git a/executor/write_test.go b/executor/write_test.go new file mode 100644 index 0000000..e9a52e4 --- /dev/null +++ b/executor/write_test.go @@ -0,0 +1,521 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor_test + +import ( + "context" + "errors" + "fmt" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/table/tables" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/mock" + "github.com/pingcap/tidb/util/testkit" +) + +type testBypassSuite struct{} + +func (s *testBypassSuite) SetUpSuite(c *C) { +} + +func (s *testSuite4) TestInsert(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + testSQL := `drop table if exists insert_test;create table insert_test (id int PRIMARY KEY AUTO_INCREMENT, c1 int, c2 int, c3 int default 1);` + tk.MustExec(testSQL) + testSQL = `insert insert_test (c1) values (1),(2),(NULL);` + tk.MustExec(testSQL) + + errInsertSelectSQL := `insert insert_test (c1) values ();` + tk.MustExec("begin") + _, err := tk.Exec(errInsertSelectSQL) + c.Assert(err, NotNil) + tk.MustExec("rollback") + + errInsertSelectSQL = `insert insert_test (c1, c2) values (1,2),(1);` + tk.MustExec("begin") + _, err = tk.Exec(errInsertSelectSQL) + c.Assert(err, NotNil) + tk.MustExec("rollback") + + errInsertSelectSQL = `insert insert_test (xxx) values (3);` + tk.MustExec("begin") + _, err = tk.Exec(errInsertSelectSQL) + c.Assert(err, NotNil) + tk.MustExec("rollback") + + errInsertSelectSQL = `insert insert_test_xxx (c1) values ();` + tk.MustExec("begin") + _, err = tk.Exec(errInsertSelectSQL) + c.Assert(err, NotNil) + tk.MustExec("rollback") + + insertSetSQL := `insert insert_test set c1 = 3;` + tk.MustExec(insertSetSQL) + + errInsertSelectSQL = `insert insert_test set c1 = 4, c1 = 5;` + tk.MustExec("begin") + _, err = tk.Exec(errInsertSelectSQL) + c.Assert(err, NotNil) + tk.MustExec("rollback") + + errInsertSelectSQL = `insert insert_test set xxx = 6;` + tk.MustExec("begin") + _, err = tk.Exec(errInsertSelectSQL) + c.Assert(err, NotNil) + tk.MustExec("rollback") + + insertSelectSQL := `create table insert_test_1 (id int, c1 int);` + tk.MustExec(insertSelectSQL) + insertSelectSQL = `insert insert_test_1 select id, c1 from insert_test;` + tk.MustExec(insertSelectSQL) + + errInsertSelectSQL = `insert insert_test_1 select c1 from insert_test;` + tk.MustExec("begin") + _, err = tk.Exec(errInsertSelectSQL) + c.Assert(err, NotNil) + tk.MustExec("rollback") + + errInsertSelectSQL = `insert insert_test_1 values(default, default, default, default, default)` + tk.MustExec("begin") + _, err = tk.Exec(errInsertSelectSQL) + c.Assert(err, NotNil) + tk.MustExec("rollback") + + tk.MustExec("create table insert_err (id int, c1 varchar(8))") + _, err = tk.Exec("insert insert_err values (1, 'abcdabcdabcd')") + c.Assert(types.ErrDataTooLong.Equal(err), IsTrue) + _, err = tk.Exec("insert insert_err values (1, '你好,世界')") + c.Assert(err, IsNil) + + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(c binary(255))") + _, err = tk.Exec("insert into t value(1)") + c.Assert(err, IsNil) + r := tk.MustQuery("select length(c) from t;") + r.Check(testkit.Rows("255")) + + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(c varbinary(255))") + _, err = tk.Exec("insert into t value(1)") + c.Assert(err, IsNil) + r = tk.MustQuery("select length(c) from t;") + r.Check(testkit.Rows("1")) + + // issue 3832 + tk.MustExec("create table t1 (b char(0));") + _, err = tk.Exec(`insert into t1 values ("");`) + c.Assert(err, IsNil) + + // test auto_increment with unsigned. + tk.MustExec("drop table if exists test") + tk.MustExec("CREATE TABLE test(id int(10) UNSIGNED NOT NULL AUTO_INCREMENT, p int(10) UNSIGNED NOT NULL, PRIMARY KEY(p), KEY(id))") + tk.MustExec("insert into test(p) value(1)") + tk.MustQuery("select * from test").Check(testkit.Rows("1 1")) + tk.MustQuery("select * from test use index (id) where id = 1").Check(testkit.Rows("1 1")) + tk.MustExec("insert into test values(NULL, 2)") + tk.MustQuery("select * from test use index (id) where id = 2").Check(testkit.Rows("2 2")) + tk.MustExec("insert into test values(2, 3)") + tk.MustQuery("select * from test use index (id) where id = 2").Check(testkit.Rows("2 2", "2 3")) + + tk.MustExec("drop table if exists t") + tk.MustExec("set @@sql_mode=''") + tk.MustExec("create table t(a float unsigned, b double unsigned)") + tk.MustExec("insert into t value(-1.1, -1.1), (-2.1, -2.1), (0, 0), (1.1, 1.1)") + tk.MustQuery("show warnings"). + Check(testkit.Rows("Warning 1690 constant -1.1 overflows float", "Warning 1690 constant -1.1 overflows double", + "Warning 1690 constant -2.1 overflows float", "Warning 1690 constant -2.1 overflows double")) + tk.MustQuery("select * from t").Check(testkit.Rows("0 0", "0 0", "0 0", "1.1 1.1")) + + // issue 7061 + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int default 1, b int default 2)") + tk.MustExec("insert into t values(default, default)") + tk.MustQuery("select * from t").Check(testkit.Rows("1 2")) + tk.MustExec("delete from t") + tk.MustExec("insert into t values(default(b), default(a))") + tk.MustQuery("select * from t").Check(testkit.Rows("2 1")) + tk.MustExec("delete from t") + tk.MustExec("insert into t (b) values(default)") + tk.MustQuery("select * from t").Check(testkit.Rows("1 2")) + tk.MustExec("delete from t") + tk.MustExec("insert into t (b) values(default(a))") + tk.MustQuery("select * from t").Check(testkit.Rows("1 1")) +} + +func (s *testSuite4) TestInsertAutoInc(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + createSQL := `drop table if exists insert_autoinc_test; create table insert_autoinc_test (id int primary key auto_increment, c1 int);` + tk.MustExec(createSQL) + + insertSQL := `insert into insert_autoinc_test(c1) values (1), (2)` + tk.MustExec(insertSQL) + tk.MustExec("begin") + r := tk.MustQuery("select * from insert_autoinc_test;") + rowStr1 := fmt.Sprintf("%v %v", "1", "1") + rowStr2 := fmt.Sprintf("%v %v", "2", "2") + r.Check(testkit.Rows(rowStr1, rowStr2)) + tk.MustExec("commit") + + tk.MustExec("begin") + insertSQL = `insert into insert_autoinc_test(id, c1) values (5,5)` + tk.MustExec(insertSQL) + insertSQL = `insert into insert_autoinc_test(c1) values (6)` + tk.MustExec(insertSQL) + tk.MustExec("commit") + tk.MustExec("begin") + r = tk.MustQuery("select * from insert_autoinc_test;") + rowStr3 := fmt.Sprintf("%v %v", "5", "5") + rowStr4 := fmt.Sprintf("%v %v", "6", "6") + r.Check(testkit.Rows(rowStr1, rowStr2, rowStr3, rowStr4)) + tk.MustExec("commit") + + tk.MustExec("begin") + insertSQL = `insert into insert_autoinc_test(id, c1) values (3,3)` + tk.MustExec(insertSQL) + tk.MustExec("commit") + tk.MustExec("begin") + r = tk.MustQuery("select * from insert_autoinc_test;") + rowStr5 := fmt.Sprintf("%v %v", "3", "3") + r.Check(testkit.Rows(rowStr1, rowStr2, rowStr5, rowStr3, rowStr4)) + tk.MustExec("commit") + + tk.MustExec("begin") + insertSQL = `insert into insert_autoinc_test(c1) values (7)` + tk.MustExec(insertSQL) + tk.MustExec("commit") + tk.MustExec("begin") + r = tk.MustQuery("select * from insert_autoinc_test;") + rowStr6 := fmt.Sprintf("%v %v", "7", "7") + r.Check(testkit.Rows(rowStr1, rowStr2, rowStr5, rowStr3, rowStr4, rowStr6)) + tk.MustExec("commit") + + // issue-962 + createSQL = `drop table if exists insert_autoinc_test; create table insert_autoinc_test (id int primary key auto_increment, c1 int);` + tk.MustExec(createSQL) + insertSQL = `insert into insert_autoinc_test(id, c1) values (0.3, 1)` + tk.MustExec(insertSQL) + r = tk.MustQuery("select * from insert_autoinc_test;") + rowStr1 = fmt.Sprintf("%v %v", "1", "1") + r.Check(testkit.Rows(rowStr1)) + insertSQL = `insert into insert_autoinc_test(id, c1) values (-0.3, 2)` + tk.MustExec(insertSQL) + r = tk.MustQuery("select * from insert_autoinc_test;") + rowStr2 = fmt.Sprintf("%v %v", "2", "2") + r.Check(testkit.Rows(rowStr1, rowStr2)) + insertSQL = `insert into insert_autoinc_test(id, c1) values (-3.3, 3)` + tk.MustExec(insertSQL) + r = tk.MustQuery("select * from insert_autoinc_test;") + rowStr3 = fmt.Sprintf("%v %v", "-3", "3") + r.Check(testkit.Rows(rowStr3, rowStr1, rowStr2)) + insertSQL = `insert into insert_autoinc_test(id, c1) values (4.3, 4)` + tk.MustExec(insertSQL) + r = tk.MustQuery("select * from insert_autoinc_test;") + rowStr4 = fmt.Sprintf("%v %v", "4", "4") + r.Check(testkit.Rows(rowStr3, rowStr1, rowStr2, rowStr4)) + insertSQL = `insert into insert_autoinc_test(c1) values (5)` + tk.MustExec(insertSQL) + r = tk.MustQuery("select * from insert_autoinc_test;") + rowStr5 = fmt.Sprintf("%v %v", "5", "5") + r.Check(testkit.Rows(rowStr3, rowStr1, rowStr2, rowStr4, rowStr5)) + insertSQL = `insert into insert_autoinc_test(id, c1) values (null, 6)` + tk.MustExec(insertSQL) + r = tk.MustQuery("select * from insert_autoinc_test;") + rowStr6 = fmt.Sprintf("%v %v", "6", "6") + r.Check(testkit.Rows(rowStr3, rowStr1, rowStr2, rowStr4, rowStr5, rowStr6)) + + // SQL_MODE=NO_AUTO_VALUE_ON_ZERO + createSQL = `drop table if exists insert_autoinc_test; create table insert_autoinc_test (id int primary key auto_increment, c1 int);` + tk.MustExec(createSQL) + insertSQL = `insert into insert_autoinc_test(id, c1) values (5, 1)` + tk.MustExec(insertSQL) + r = tk.MustQuery("select * from insert_autoinc_test;") + rowStr1 = fmt.Sprintf("%v %v", "5", "1") + r.Check(testkit.Rows(rowStr1)) + insertSQL = `insert into insert_autoinc_test(id, c1) values (0, 2)` + tk.MustExec(insertSQL) + r = tk.MustQuery("select * from insert_autoinc_test;") + rowStr2 = fmt.Sprintf("%v %v", "6", "2") + r.Check(testkit.Rows(rowStr1, rowStr2)) + insertSQL = `insert into insert_autoinc_test(id, c1) values (0, 3)` + tk.MustExec(insertSQL) + r = tk.MustQuery("select * from insert_autoinc_test;") + rowStr3 = fmt.Sprintf("%v %v", "7", "3") + r.Check(testkit.Rows(rowStr1, rowStr2, rowStr3)) + tk.MustExec("set SQL_MODE=NO_AUTO_VALUE_ON_ZERO") + insertSQL = `insert into insert_autoinc_test(id, c1) values (0, 4)` + tk.MustExec(insertSQL) + r = tk.MustQuery("select * from insert_autoinc_test;") + rowStr4 = fmt.Sprintf("%v %v", "0", "4") + r.Check(testkit.Rows(rowStr4, rowStr1, rowStr2, rowStr3)) + insertSQL = `insert into insert_autoinc_test(id, c1) values (0, 5)` + _, err := tk.Exec(insertSQL) + // ERROR 1062 (23000): Duplicate entry '0' for key 'PRIMARY' + c.Assert(err, NotNil) + insertSQL = `insert into insert_autoinc_test(c1) values (6)` + tk.MustExec(insertSQL) + r = tk.MustQuery("select * from insert_autoinc_test;") + rowStr5 = fmt.Sprintf("%v %v", "8", "6") + r.Check(testkit.Rows(rowStr4, rowStr1, rowStr2, rowStr3, rowStr5)) + insertSQL = `insert into insert_autoinc_test(id, c1) values (null, 7)` + tk.MustExec(insertSQL) + r = tk.MustQuery("select * from insert_autoinc_test;") + rowStr6 = fmt.Sprintf("%v %v", "9", "7") + r.Check(testkit.Rows(rowStr4, rowStr1, rowStr2, rowStr3, rowStr5, rowStr6)) + tk.MustExec("set SQL_MODE='';") + insertSQL = `insert into insert_autoinc_test(id, c1) values (0, 8)` + tk.MustExec(insertSQL) + r = tk.MustQuery("select * from insert_autoinc_test;") + rowStr7 := fmt.Sprintf("%v %v", "10", "8") + r.Check(testkit.Rows(rowStr4, rowStr1, rowStr2, rowStr3, rowStr5, rowStr6, rowStr7)) + insertSQL = `insert into insert_autoinc_test(id, c1) values (null, 9)` + tk.MustExec(insertSQL) + r = tk.MustQuery("select * from insert_autoinc_test;") + rowStr8 := fmt.Sprintf("%v %v", "11", "9") + r.Check(testkit.Rows(rowStr4, rowStr1, rowStr2, rowStr3, rowStr5, rowStr6, rowStr7, rowStr8)) +} + +func (s *testSuite4) TestInsertSetWithDefault(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + // Assign `DEFAULT` in `INSERT ... SET ...` statement + tk.MustExec("drop table if exists t1, t2;") + tk.MustExec("create table t1 (a int default 10, b int default 20);") + tk.MustExec("insert into t1 set a=default;") + tk.MustQuery("select * from t1;").Check(testkit.Rows("10 20")) + tk.MustExec("delete from t1;") + tk.MustExec("insert into t1 set b=default;") + tk.MustQuery("select * from t1;").Check(testkit.Rows("10 20")) + tk.MustExec("delete from t1;") + tk.MustExec("insert into t1 set b=default, a=1;") + tk.MustQuery("select * from t1;").Check(testkit.Rows("1 20")) + tk.MustExec("delete from t1;") + tk.MustExec("insert into t1 set a=default(a);") + tk.MustQuery("select * from t1;").Check(testkit.Rows("10 20")) + tk.MustExec("delete from t1;") + tk.MustExec("insert into t1 set a=default(b), b=default(a)") + tk.MustQuery("select * from t1;").Check(testkit.Rows("20 10")) + tk.MustExec("delete from t1;") + tk.MustExec("insert into t1 set a=default(b)+default(a);") + tk.MustQuery("select * from t1;").Check(testkit.Rows("30 20")) +} + +func (s *testSuite4) TestReplace(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + testSQL := `drop table if exists replace_test; + create table replace_test (id int PRIMARY KEY AUTO_INCREMENT, c1 int, c2 int, c3 int default 1);` + tk.MustExec(testSQL) + testSQL = `replace replace_test (c1) values (1),(2),(NULL);` + tk.MustExec(testSQL) + + errReplaceSQL := `replace replace_test (c1) values ();` + tk.MustExec("begin") + _, err := tk.Exec(errReplaceSQL) + c.Assert(err, NotNil) + tk.MustExec("rollback") + + errReplaceSQL = `replace replace_test (c1, c2) values (1,2),(1);` + tk.MustExec("begin") + _, err = tk.Exec(errReplaceSQL) + c.Assert(err, NotNil) + tk.MustExec("rollback") + + errReplaceSQL = `replace replace_test (xxx) values (3);` + tk.MustExec("begin") + _, err = tk.Exec(errReplaceSQL) + c.Assert(err, NotNil) + tk.MustExec("rollback") + + errReplaceSQL = `replace replace_test_xxx (c1) values ();` + tk.MustExec("begin") + _, err = tk.Exec(errReplaceSQL) + c.Assert(err, NotNil) + tk.MustExec("rollback") + + replaceSetSQL := `replace replace_test set c1 = 3;` + tk.MustExec(replaceSetSQL) + + errReplaceSetSQL := `replace replace_test set c1 = 4, c1 = 5;` + tk.MustExec("begin") + _, err = tk.Exec(errReplaceSetSQL) + c.Assert(err, NotNil) + tk.MustExec("rollback") + + errReplaceSetSQL = `replace replace_test set xxx = 6;` + tk.MustExec("begin") + _, err = tk.Exec(errReplaceSetSQL) + c.Assert(err, NotNil) + tk.MustExec("rollback") + + replaceSelectSQL := `create table replace_test_1 (id int, c1 int);` + tk.MustExec(replaceSelectSQL) + replaceSelectSQL = `replace replace_test_1 select id, c1 from replace_test;` + tk.MustExec(replaceSelectSQL) + + errReplaceSelectSQL := `replace replace_test_1 select c1 from replace_test;` + tk.MustExec("begin") + _, err = tk.Exec(errReplaceSelectSQL) + c.Assert(err, NotNil) + tk.MustExec("rollback") + + replaceUniqueIndexSQL := `create table replace_test_3 (c1 int, c2 int, UNIQUE INDEX (c2));` + tk.MustExec(replaceUniqueIndexSQL) + replaceUniqueIndexSQL = `replace into replace_test_3 set c2=1;` + tk.MustExec(replaceUniqueIndexSQL) + replaceUniqueIndexSQL = `replace into replace_test_3 set c2=1;` + tk.MustExec(replaceUniqueIndexSQL) + c.Assert(int64(tk.Se.AffectedRows()), Equals, int64(1)) + + replaceUniqueIndexSQL = `replace into replace_test_3 set c1=1, c2=1;` + tk.MustExec(replaceUniqueIndexSQL) + c.Assert(int64(tk.Se.AffectedRows()), Equals, int64(2)) + + replaceUniqueIndexSQL = `replace into replace_test_3 set c2=NULL;` + tk.MustExec(replaceUniqueIndexSQL) + replaceUniqueIndexSQL = `replace into replace_test_3 set c2=NULL;` + tk.MustExec(replaceUniqueIndexSQL) + c.Assert(int64(tk.Se.AffectedRows()), Equals, int64(1)) + + replaceUniqueIndexSQL = `create table replace_test_4 (c1 int, c2 int, c3 int, UNIQUE INDEX (c1, c2));` + tk.MustExec(replaceUniqueIndexSQL) + replaceUniqueIndexSQL = `replace into replace_test_4 set c2=NULL;` + tk.MustExec(replaceUniqueIndexSQL) + replaceUniqueIndexSQL = `replace into replace_test_4 set c2=NULL;` + tk.MustExec(replaceUniqueIndexSQL) + c.Assert(int64(tk.Se.AffectedRows()), Equals, int64(1)) + + replacePrimaryKeySQL := `create table replace_test_5 (c1 int, c2 int, c3 int, PRIMARY KEY (c1, c2));` + tk.MustExec(replacePrimaryKeySQL) + replacePrimaryKeySQL = `replace into replace_test_5 set c1=1, c2=2;` + tk.MustExec(replacePrimaryKeySQL) + replacePrimaryKeySQL = `replace into replace_test_5 set c1=1, c2=2;` + tk.MustExec(replacePrimaryKeySQL) + c.Assert(int64(tk.Se.AffectedRows()), Equals, int64(1)) + + // For Issue989 + issue989SQL := `CREATE TABLE tIssue989 (a int, b int, PRIMARY KEY(a), UNIQUE KEY(b));` + tk.MustExec(issue989SQL) + issue989SQL = `insert into tIssue989 (a, b) values (1, 2);` + tk.MustExec(issue989SQL) + issue989SQL = `replace into tIssue989(a, b) values (111, 2);` + tk.MustExec(issue989SQL) + r := tk.MustQuery("select * from tIssue989;") + r.Check(testkit.Rows("111 2")) + + // For Issue1012 + issue1012SQL := `CREATE TABLE tIssue1012 (a int, b int, PRIMARY KEY(a), UNIQUE KEY(b));` + tk.MustExec(issue1012SQL) + issue1012SQL = `insert into tIssue1012 (a, b) values (1, 2);` + tk.MustExec(issue1012SQL) + issue1012SQL = `insert into tIssue1012 (a, b) values (2, 1);` + tk.MustExec(issue1012SQL) + issue1012SQL = `replace into tIssue1012(a, b) values (1, 1);` + tk.MustExec(issue1012SQL) + c.Assert(int64(tk.Se.AffectedRows()), Equals, int64(3)) + + r = tk.MustQuery("select * from tIssue1012;") + r.Check(testkit.Rows("1 1")) + + // Test Replace with info message + tk.MustExec(`drop table if exists t1`) + tk.MustExec(`create table t1(a int primary key, b int);`) + tk.MustExec(`insert into t1 values(1,1),(2,2),(3,3),(4,4),(5,5);`) + tk.MustExec(`replace into t1 values(1,1);`) + c.Assert(int64(tk.Se.AffectedRows()), Equals, int64(1)) + + tk.MustExec(`replace into t1 values(1,1),(2,2);`) + c.Assert(int64(tk.Se.AffectedRows()), Equals, int64(2)) + tk.MustExec(`replace into t1 values(4,14),(5,15),(6,16),(7,17),(8,18)`) + c.Assert(int64(tk.Se.AffectedRows()), Equals, int64(7)) + tk.MustExec(`replace into t1 select * from (select 1, 2) as tmp;`) + c.Assert(int64(tk.Se.AffectedRows()), Equals, int64(2)) + + // Assign `DEFAULT` in `REPLACE` statement + tk.MustExec("drop table if exists t1, t2;") + tk.MustExec("create table t1 (a int primary key, b int default 20, c int default 30);") + tk.MustExec("insert into t1 value (1, 2, 3);") + tk.MustExec("replace t1 set a=1, b=default;") + tk.MustQuery("select * from t1;").Check(testkit.Rows("1 20 30")) + tk.MustExec("replace t1 set a=2, b=default, c=default") + tk.MustQuery("select * from t1;").Check(testkit.Rows("1 20 30", "2 20 30")) + tk.MustExec("replace t1 set a=2, b=default(c), c=default(b);") + tk.MustQuery("select * from t1;").Check(testkit.Rows("1 20 30", "2 30 20")) + tk.MustExec("replace t1 set a=default(b)+default(c)") + tk.MustQuery("select * from t1;").Check(testkit.Rows("1 20 30", "2 30 20", "50 20 30")) +} + +func (s *testSuite) TestDelete(c *C) { + tk := testkit.NewTestKit(c, s.store) + s.fillData(tk, "delete_test") + + tk.MustExec(`delete from delete_test where id = 2 limit 1;`) + tk.CheckExecResult(1, 0) + + // Test delete with false condition + tk.MustExec(`delete from delete_test where 0;`) + tk.CheckExecResult(0, 0) + + tk.MustExec("insert into delete_test values (2, 'abc')") + tk.MustExec(`delete from delete_test where delete_test.id = 2 limit 1`) + tk.CheckExecResult(1, 0) + + // Select data + tk.MustExec("begin") + rows := tk.MustQuery(`SELECT * from delete_test limit 2;`) + rows.Check(testkit.Rows("1 hello")) + tk.MustExec("commit") +} + +func (s *testSuite4) TestNotNullDefault(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test; drop table if exists t1,t2;") + defer tk.MustExec("drop table t1,t2") + tk.MustExec("create table t1 (a int not null default null default 1);") + tk.MustExec("create table t2 (a int);") + tk.MustExec("alter table t2 change column a a int not null default null default 1;") +} + +func (s *testSuite7) TestReplaceLog(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec(`create table testLog (a int not null primary key, b int unique key);`) + + // Make some dangling index. + s.ctx = mock.NewContext() + s.ctx.Store = s.store + is := s.domain.InfoSchema() + dbName := model.NewCIStr("test") + tblName := model.NewCIStr("testLog") + tbl, err := is.TableByName(dbName, tblName) + c.Assert(err, IsNil) + tblInfo := tbl.Meta() + idxInfo := tblInfo.FindIndexByName("b") + indexOpr := tables.NewIndex(tblInfo.ID, tblInfo, idxInfo) + + txn, err := s.store.Begin() + c.Assert(err, IsNil) + _, err = indexOpr.Create(s.ctx, txn, types.MakeDatums(1), 1) + c.Assert(err, IsNil) + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + + _, err = tk.Exec(`replace into testLog values (0, 0), (1, 1);`) + c.Assert(err, NotNil) + expErr := errors.New(`can not be duplicated row, due to old row not found. handle 1 not found`) + c.Assert(expErr.Error() == err.Error(), IsTrue, Commentf("obtained error: (%s)\nexpected error: (%s)", err.Error(), expErr.Error())) +} diff --git a/expression/aggregation/agg_to_pb.go b/expression/aggregation/agg_to_pb.go new file mode 100644 index 0000000..2250ac6 --- /dev/null +++ b/expression/aggregation/agg_to_pb.go @@ -0,0 +1,92 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggregation + +import ( + "github.com/pingcap/errors" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tipb/go-tipb" +) + +// AggFuncToPBExpr converts aggregate function to pb. +func AggFuncToPBExpr(sc *stmtctx.StatementContext, client kv.Client, aggFunc *AggFuncDesc) *tipb.Expr { + pc := expression.NewPBConverter(client, sc) + var tp tipb.ExprType + switch aggFunc.Name { + case ast.AggFuncCount: + tp = tipb.ExprType_Count + case ast.AggFuncFirstRow: + tp = tipb.ExprType_First + case ast.AggFuncMax: + tp = tipb.ExprType_Max + case ast.AggFuncMin: + tp = tipb.ExprType_Min + case ast.AggFuncSum: + tp = tipb.ExprType_Sum + case ast.AggFuncAvg: + tp = tipb.ExprType_Avg + } + if !client.IsRequestTypeSupported(kv.ReqTypeSelect, int64(tp)) { + return nil + } + + children := make([]*tipb.Expr, 0, len(aggFunc.Args)) + for _, arg := range aggFunc.Args { + pbArg := pc.ExprToPB(arg) + if pbArg == nil { + return nil + } + children = append(children, pbArg) + } + return &tipb.Expr{Tp: tp, Children: children, FieldType: expression.ToPBFieldType(aggFunc.RetTp)} +} + +// PBExprToAggFuncDesc converts pb to aggregate function. +func PBExprToAggFuncDesc(sc *stmtctx.StatementContext, aggFunc *tipb.Expr, fieldTps []*types.FieldType) (*AggFuncDesc, error) { + var name string + switch aggFunc.Tp { + case tipb.ExprType_Count: + name = ast.AggFuncCount + case tipb.ExprType_First: + name = ast.AggFuncFirstRow + case tipb.ExprType_Max: + name = ast.AggFuncMax + case tipb.ExprType_Min: + name = ast.AggFuncMin + case tipb.ExprType_Sum: + name = ast.AggFuncSum + case tipb.ExprType_Avg: + name = ast.AggFuncAvg + default: + return nil, errors.Errorf("unknown aggregation function type: %v", aggFunc.Tp) + } + + args, err := expression.PBToExprs(aggFunc.Children, fieldTps, sc) + if err != nil { + return nil, err + } + base := baseFuncDesc{ + Name: name, + Args: args, + RetTp: expression.FieldTypeFromPB(aggFunc.FieldType), + } + return &AggFuncDesc{ + baseFuncDesc: base, + Mode: Partial1Mode, + }, nil +} diff --git a/expression/aggregation/aggregation.go b/expression/aggregation/aggregation.go new file mode 100644 index 0000000..b1bef98 --- /dev/null +++ b/expression/aggregation/aggregation.go @@ -0,0 +1,161 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggregation + +import ( + "bytes" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tipb/go-tipb" +) + +// Aggregation stands for aggregate functions. +type Aggregation interface { + // Update during executing. + Update(evalCtx *AggEvaluateContext, sc *stmtctx.StatementContext, row chunk.Row) error + + // GetPartialResult will called by coprocessor to get partial results. For avg function, partial results will return + // sum and count values at the same time. + GetPartialResult(evalCtx *AggEvaluateContext) []types.Datum + + // GetResult will be called when all data have been processed. + GetResult(evalCtx *AggEvaluateContext) types.Datum + + // CreateContext creates a new AggEvaluateContext for the aggregation function. + CreateContext(sc *stmtctx.StatementContext) *AggEvaluateContext + + // ResetContext resets the content of the evaluate context. + ResetContext(sc *stmtctx.StatementContext, evalCtx *AggEvaluateContext) +} + +// NewDistAggFunc creates new Aggregate function for mock tikv. +func NewDistAggFunc(expr *tipb.Expr, fieldTps []*types.FieldType, sc *stmtctx.StatementContext) (Aggregation, error) { + args := make([]expression.Expression, 0, len(expr.Children)) + for _, child := range expr.Children { + arg, err := expression.PBToExpr(child, fieldTps, sc) + if err != nil { + return nil, err + } + args = append(args, arg) + } + switch expr.Tp { + case tipb.ExprType_Sum: + return &sumFunction{aggFunction: newAggFunc(ast.AggFuncSum, args)}, nil + case tipb.ExprType_Count: + return &countFunction{aggFunction: newAggFunc(ast.AggFuncCount, args)}, nil + case tipb.ExprType_Avg: + return &avgFunction{aggFunction: newAggFunc(ast.AggFuncAvg, args)}, nil + case tipb.ExprType_Max: + return &maxMinFunction{aggFunction: newAggFunc(ast.AggFuncMax, args), isMax: true}, nil + case tipb.ExprType_Min: + return &maxMinFunction{aggFunction: newAggFunc(ast.AggFuncMin, args)}, nil + case tipb.ExprType_First: + return &firstRowFunction{aggFunction: newAggFunc(ast.AggFuncFirstRow, args)}, nil + } + return nil, errors.Errorf("Unknown aggregate function type %v", expr.Tp) +} + +// AggEvaluateContext is used to store intermediate result when calculating aggregate functions. +type AggEvaluateContext struct { + Count int64 + Value types.Datum + Buffer *bytes.Buffer // Buffer is used for group_concat. + GotFirstRow bool // It will check if the agg has met the first row key. +} + +// AggFunctionMode stands for the aggregation function's mode. +type AggFunctionMode int + +// |-----------------|--------------|--------------| +// | AggFunctionMode | input | output | +// |-----------------|--------------|--------------| +// | CompleteMode | origin data | final result | +// | FinalMode | partial data | final result | +// | Partial1Mode | origin data | partial data | +// | Partial2Mode | partial data | partial data | +// |-----------------|--------------|--------------| +const ( + CompleteMode AggFunctionMode = iota + FinalMode + Partial1Mode + Partial2Mode +) + +type aggFunction struct { + *AggFuncDesc +} + +func newAggFunc(funcName string, args []expression.Expression) aggFunction { + agg := &AggFuncDesc{} + agg.Name = funcName + agg.Args = args + return aggFunction{AggFuncDesc: agg} +} + +// CreateContext implements Aggregation interface. +func (af *aggFunction) CreateContext(sc *stmtctx.StatementContext) *AggEvaluateContext { + evalCtx := &AggEvaluateContext{} + return evalCtx +} + +func (af *aggFunction) ResetContext(sc *stmtctx.StatementContext, evalCtx *AggEvaluateContext) { + evalCtx.Value.SetNull() +} + +func (af *aggFunction) updateSum(sc *stmtctx.StatementContext, evalCtx *AggEvaluateContext, row chunk.Row) error { + a := af.Args[0] + value, err := a.Eval(row) + if err != nil { + return err + } + if value.IsNull() { + return nil + } + evalCtx.Value, err = calculateSum(sc, evalCtx.Value, value) + if err != nil { + return err + } + evalCtx.Count++ + return nil +} + +// NeedCount indicates whether the aggregate function should record count. +func NeedCount(name string) bool { + return name == ast.AggFuncCount || name == ast.AggFuncAvg +} + +// NeedValue indicates whether the aggregate function should record value. +func NeedValue(name string) bool { + switch name { + case ast.AggFuncSum, ast.AggFuncAvg, ast.AggFuncFirstRow, ast.AggFuncMax, ast.AggFuncMin: + return true + default: + return false + } +} + +// IsAllFirstRow checks whether functions in `aggFuncs` are all FirstRow. +func IsAllFirstRow(aggFuncs []*AggFuncDesc) bool { + for _, fun := range aggFuncs { + if fun.Name != ast.AggFuncFirstRow { + return false + } + } + return true +} diff --git a/expression/aggregation/aggregation_test.go b/expression/aggregation/aggregation_test.go new file mode 100644 index 0000000..6c494b9 --- /dev/null +++ b/expression/aggregation/aggregation_test.go @@ -0,0 +1,259 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggregation + +import ( + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/mock" +) + +var _ = Suite(&testAggFuncSuit{}) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + TestingT(t) +} + +type testAggFuncSuit struct { + ctx sessionctx.Context + rows []chunk.Row + nullRow chunk.Row +} + +func generateRowData() []chunk.Row { + rows := make([]chunk.Row, 0, 5050) + for i := 1; i <= 100; i++ { + for j := 0; j < i; j++ { + rows = append(rows, chunk.MutRowFromDatums(types.MakeDatums(i)).ToRow()) + } + } + return rows +} + +func (s *testAggFuncSuit) SetUpSuite(c *C) { + s.ctx = mock.NewContext() + s.ctx.GetSessionVars().GlobalVarsAccessor = variable.NewMockGlobalAccessor() + s.rows = generateRowData() + s.nullRow = chunk.MutRowFromDatums([]types.Datum{{}}).ToRow() +} + +func (s *testAggFuncSuit) TestAvg(c *C) { + col := &expression.Column{ + Index: 0, + RetType: types.NewFieldType(mysql.TypeLonglong), + } + ctx := mock.NewContext() + desc, err := NewAggFuncDesc(s.ctx, ast.AggFuncAvg, []expression.Expression{col}) + c.Assert(err, IsNil) + avgFunc := desc.GetAggFunc(ctx) + evalCtx := avgFunc.CreateContext(s.ctx.GetSessionVars().StmtCtx) + + result := avgFunc.GetResult(evalCtx) + c.Assert(result.IsNull(), IsTrue) + + for _, row := range s.rows { + err := avgFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row) + c.Assert(err, IsNil) + } + result = avgFunc.GetResult(evalCtx) + c.Assert(result.GetInt64(), Equals, int64(67)) + err = avgFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, s.nullRow) + c.Assert(err, IsNil) + result = avgFunc.GetResult(evalCtx) + c.Assert(result.GetInt64(), Equals, int64(67)) +} + +func (s *testAggFuncSuit) TestAvgFinalMode(c *C) { + rows := make([][]types.Datum, 0, 100) + for i := 1; i <= 100; i++ { + rows = append(rows, types.MakeDatums(i, int64(i*i))) + } + ctx := mock.NewContext() + cntCol := &expression.Column{ + Index: 0, + RetType: types.NewFieldType(mysql.TypeLonglong), + } + sumCol := &expression.Column{ + Index: 1, + RetType: types.NewFieldType(mysql.TypeLonglong), + } + aggFunc, err := NewAggFuncDesc(s.ctx, ast.AggFuncAvg, []expression.Expression{cntCol, sumCol}) + c.Assert(err, IsNil) + aggFunc.Mode = FinalMode + avgFunc := aggFunc.GetAggFunc(ctx) + evalCtx := avgFunc.CreateContext(s.ctx.GetSessionVars().StmtCtx) + + for _, row := range rows { + err := avgFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, chunk.MutRowFromDatums(row).ToRow()) + c.Assert(err, IsNil) + } + result := avgFunc.GetResult(evalCtx) + c.Assert(result.GetInt64(), Equals, int64(67)) +} + +func (s *testAggFuncSuit) TestSum(c *C) { + col := &expression.Column{ + Index: 0, + RetType: types.NewFieldType(mysql.TypeLonglong), + } + ctx := mock.NewContext() + desc, err := NewAggFuncDesc(s.ctx, ast.AggFuncSum, []expression.Expression{col}) + c.Assert(err, IsNil) + sumFunc := desc.GetAggFunc(ctx) + evalCtx := sumFunc.CreateContext(s.ctx.GetSessionVars().StmtCtx) + + result := sumFunc.GetResult(evalCtx) + c.Assert(result.IsNull(), IsTrue) + + for _, row := range s.rows { + err := sumFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row) + c.Assert(err, IsNil) + } + result = sumFunc.GetResult(evalCtx) + c.Assert(result.GetInt64(), Equals, int64(338350)) + err = sumFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, s.nullRow) + c.Assert(err, IsNil) + result = sumFunc.GetResult(evalCtx) + c.Assert(result.GetInt64(), Equals, int64(338350)) + partialResult := sumFunc.GetPartialResult(evalCtx) + c.Assert(partialResult[0].GetInt64(), Equals, int64(338350)) +} + +func (s *testAggFuncSuit) TestCount(c *C) { + col := &expression.Column{ + Index: 0, + RetType: types.NewFieldType(mysql.TypeLonglong), + } + ctx := mock.NewContext() + desc, err := NewAggFuncDesc(s.ctx, ast.AggFuncCount, []expression.Expression{col}) + c.Assert(err, IsNil) + countFunc := desc.GetAggFunc(ctx) + evalCtx := countFunc.CreateContext(s.ctx.GetSessionVars().StmtCtx) + + result := countFunc.GetResult(evalCtx) + c.Assert(result.GetInt64(), Equals, int64(0)) + + for _, row := range s.rows { + err := countFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row) + c.Assert(err, IsNil) + } + result = countFunc.GetResult(evalCtx) + c.Assert(result.GetInt64(), Equals, int64(5050)) + err = countFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, s.nullRow) + c.Assert(err, IsNil) + result = countFunc.GetResult(evalCtx) + c.Assert(result.GetInt64(), Equals, int64(5050)) + partialResult := countFunc.GetPartialResult(evalCtx) + c.Assert(partialResult[0].GetInt64(), Equals, int64(5050)) +} + +func (s *testAggFuncSuit) TestFirstRow(c *C) { + col := &expression.Column{ + Index: 0, + RetType: types.NewFieldType(mysql.TypeLonglong), + } + + ctx := mock.NewContext() + desc, err := NewAggFuncDesc(s.ctx, ast.AggFuncFirstRow, []expression.Expression{col}) + c.Assert(err, IsNil) + firstRowFunc := desc.GetAggFunc(ctx) + evalCtx := firstRowFunc.CreateContext(s.ctx.GetSessionVars().StmtCtx) + + row := chunk.MutRowFromDatums(types.MakeDatums(1)).ToRow() + err = firstRowFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row) + c.Assert(err, IsNil) + result := firstRowFunc.GetResult(evalCtx) + c.Assert(result.GetUint64(), Equals, uint64(1)) + + row = chunk.MutRowFromDatums(types.MakeDatums(2)).ToRow() + err = firstRowFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row) + c.Assert(err, IsNil) + result = firstRowFunc.GetResult(evalCtx) + c.Assert(result.GetUint64(), Equals, uint64(1)) + partialResult := firstRowFunc.GetPartialResult(evalCtx) + c.Assert(partialResult[0].GetUint64(), Equals, uint64(1)) +} + +func (s *testAggFuncSuit) TestMaxMin(c *C) { + col := &expression.Column{ + Index: 0, + RetType: types.NewFieldType(mysql.TypeLonglong), + } + + ctx := mock.NewContext() + desc, err := NewAggFuncDesc(s.ctx, ast.AggFuncMax, []expression.Expression{col}) + c.Assert(err, IsNil) + maxFunc := desc.GetAggFunc(ctx) + desc, err = NewAggFuncDesc(s.ctx, ast.AggFuncMin, []expression.Expression{col}) + c.Assert(err, IsNil) + minFunc := desc.GetAggFunc(ctx) + maxEvalCtx := maxFunc.CreateContext(s.ctx.GetSessionVars().StmtCtx) + minEvalCtx := minFunc.CreateContext(s.ctx.GetSessionVars().StmtCtx) + + result := maxFunc.GetResult(maxEvalCtx) + c.Assert(result.IsNull(), IsTrue) + result = minFunc.GetResult(minEvalCtx) + c.Assert(result.IsNull(), IsTrue) + + row := chunk.MutRowFromDatums(types.MakeDatums(2)) + err = maxFunc.Update(maxEvalCtx, s.ctx.GetSessionVars().StmtCtx, row.ToRow()) + c.Assert(err, IsNil) + result = maxFunc.GetResult(maxEvalCtx) + c.Assert(result.GetInt64(), Equals, int64(2)) + err = minFunc.Update(minEvalCtx, s.ctx.GetSessionVars().StmtCtx, row.ToRow()) + c.Assert(err, IsNil) + result = minFunc.GetResult(minEvalCtx) + c.Assert(result.GetInt64(), Equals, int64(2)) + + row.SetDatum(0, types.NewIntDatum(3)) + err = maxFunc.Update(maxEvalCtx, s.ctx.GetSessionVars().StmtCtx, row.ToRow()) + c.Assert(err, IsNil) + result = maxFunc.GetResult(maxEvalCtx) + c.Assert(result.GetInt64(), Equals, int64(3)) + err = minFunc.Update(minEvalCtx, s.ctx.GetSessionVars().StmtCtx, row.ToRow()) + c.Assert(err, IsNil) + result = minFunc.GetResult(minEvalCtx) + c.Assert(result.GetInt64(), Equals, int64(2)) + + row.SetDatum(0, types.NewIntDatum(1)) + err = maxFunc.Update(maxEvalCtx, s.ctx.GetSessionVars().StmtCtx, row.ToRow()) + c.Assert(err, IsNil) + result = maxFunc.GetResult(maxEvalCtx) + c.Assert(result.GetInt64(), Equals, int64(3)) + err = minFunc.Update(minEvalCtx, s.ctx.GetSessionVars().StmtCtx, row.ToRow()) + c.Assert(err, IsNil) + result = minFunc.GetResult(minEvalCtx) + c.Assert(result.GetInt64(), Equals, int64(1)) + + row.SetDatum(0, types.NewDatum(nil)) + err = maxFunc.Update(maxEvalCtx, s.ctx.GetSessionVars().StmtCtx, row.ToRow()) + c.Assert(err, IsNil) + result = maxFunc.GetResult(maxEvalCtx) + c.Assert(result.GetInt64(), Equals, int64(3)) + err = minFunc.Update(minEvalCtx, s.ctx.GetSessionVars().StmtCtx, row.ToRow()) + c.Assert(err, IsNil) + result = minFunc.GetResult(minEvalCtx) + c.Assert(result.GetInt64(), Equals, int64(1)) + partialResult := minFunc.GetPartialResult(minEvalCtx) + c.Assert(partialResult[0].GetInt64(), Equals, int64(1)) +} diff --git a/expression/aggregation/avg.go b/expression/aggregation/avg.go new file mode 100644 index 0000000..8f7e1f7 --- /dev/null +++ b/expression/aggregation/avg.go @@ -0,0 +1,81 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggregation + +import ( + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" +) + +type avgFunction struct { + aggFunction +} + +func (af *avgFunction) updateAvg(sc *stmtctx.StatementContext, evalCtx *AggEvaluateContext, row chunk.Row) error { + a := af.Args[1] + value, err := a.Eval(row) + if err != nil { + return err + } + if value.IsNull() { + return nil + } + evalCtx.Value, err = calculateSum(sc, evalCtx.Value, value) + if err != nil { + return err + } + count, err := af.Args[0].Eval(row) + if err != nil { + return err + } + evalCtx.Count += count.GetInt64() + return nil +} + +func (af *avgFunction) ResetContext(sc *stmtctx.StatementContext, evalCtx *AggEvaluateContext) { + evalCtx.Value.SetNull() + evalCtx.Count = 0 +} + +// Update implements Aggregation interface. +func (af *avgFunction) Update(evalCtx *AggEvaluateContext, sc *stmtctx.StatementContext, row chunk.Row) (err error) { + switch af.Mode { + case Partial1Mode, CompleteMode: + err = af.updateSum(sc, evalCtx, row) + case Partial2Mode, FinalMode: + err = af.updateAvg(sc, evalCtx, row) + } + return err +} + +// GetResult implements Aggregation interface. +func (af *avgFunction) GetResult(evalCtx *AggEvaluateContext) (d types.Datum) { + switch evalCtx.Value.Kind() { + case types.KindFloat64: + sum := evalCtx.Value.GetFloat64() + d.SetFloat64(sum / float64(evalCtx.Count)) + return + case types.KindInt64: + sum := evalCtx.Value.GetInt64() + d.SetInt64(sum / evalCtx.Count) + return + } + return +} + +// GetPartialResult implements Aggregation interface. +func (af *avgFunction) GetPartialResult(evalCtx *AggEvaluateContext) []types.Datum { + return []types.Datum{types.NewIntDatum(evalCtx.Count), evalCtx.Value} +} diff --git a/expression/aggregation/base_func.go b/expression/aggregation/base_func.go new file mode 100644 index 0000000..891e9b8 --- /dev/null +++ b/expression/aggregation/base_func.go @@ -0,0 +1,194 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggregation + +import ( + "bytes" + "fmt" + log "github.com/sirupsen/logrus" + "strings" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" +) + +// baseFuncDesc describes an function signature, only used in planner. +type baseFuncDesc struct { + // Name represents the function name. + Name string + // Args represents the arguments of the function. + Args []expression.Expression + // RetTp represents the return type of the function. + RetTp *types.FieldType +} + +func newBaseFuncDesc(ctx sessionctx.Context, name string, args []expression.Expression) (baseFuncDesc, error) { + b := baseFuncDesc{Name: strings.ToLower(name), Args: args} + err := b.typeInfer(ctx) + if err != nil { + return b, err + } + if _, ok := noNeedCastAggFuncs[name]; ok { + return b, nil + } + for _, arg := range args { + if arg.GetType().EvalType() != b.RetTp.EvalType() { + log.Warn(fmt.Sprintf("unmatched arg tp %v with return tp %v", arg.GetType().EvalType(), b.RetTp.EvalType())) + } + } + return b, nil +} + +func (a *baseFuncDesc) equal(ctx sessionctx.Context, other *baseFuncDesc) bool { + if a.Name != other.Name || len(a.Args) != len(other.Args) { + return false + } + for i := range a.Args { + if !a.Args[i].Equal(ctx, other.Args[i]) { + return false + } + } + return true +} + +func (a *baseFuncDesc) clone() *baseFuncDesc { + clone := *a + newTp := *a.RetTp + clone.RetTp = &newTp + clone.Args = make([]expression.Expression, len(a.Args)) + for i := range a.Args { + clone.Args[i] = a.Args[i].Clone() + } + return &clone +} + +// String implements the fmt.Stringer interface. +func (a *baseFuncDesc) String() string { + buffer := bytes.NewBufferString(a.Name) + buffer.WriteString("(") + for i, arg := range a.Args { + buffer.WriteString(arg.String()) + if i+1 != len(a.Args) { + buffer.WriteString(", ") + } + } + buffer.WriteString(")") + return buffer.String() +} + +// typeInfer infers the arguments and return types of an function. +func (a *baseFuncDesc) typeInfer(ctx sessionctx.Context) error { + switch a.Name { + case ast.AggFuncCount: + a.typeInfer4Count(ctx) + case ast.AggFuncSum: + a.typeInfer4Sum(ctx) + case ast.AggFuncAvg: + a.typeInfer4Avg(ctx) + case ast.AggFuncMax, ast.AggFuncMin, ast.AggFuncFirstRow: + a.typeInfer4MaxMin(ctx) + default: + return errors.Errorf("unsupported agg function: %s", a.Name) + } + return nil +} + +func (a *baseFuncDesc) typeInfer4Count(ctx sessionctx.Context) { + a.RetTp = types.NewFieldType(mysql.TypeLonglong) + a.RetTp.Flen = 21 + types.SetBinChsClnFlag(a.RetTp) +} + +// typeInfer4Sum should returns a "decimal", otherwise it returns a "double". +// Because child returns integer or decimal type. +func (a *baseFuncDesc) typeInfer4Sum(ctx sessionctx.Context) { + switch a.Args[0].GetType().Tp { + case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong: + a.RetTp = types.NewFieldType(mysql.TypeLonglong) + case mysql.TypeDouble, mysql.TypeFloat: + a.RetTp = types.NewFieldType(mysql.TypeDouble) + a.RetTp.Flen, a.RetTp.Decimal = mysql.MaxRealWidth, a.Args[0].GetType().Decimal + default: + a.RetTp = types.NewFieldType(mysql.TypeDouble) + a.RetTp.Flen, a.RetTp.Decimal = mysql.MaxRealWidth, types.UnspecifiedLength + } + types.SetBinChsClnFlag(a.RetTp) +} + +// typeInfer4Avg should returns a "decimal", otherwise it returns a "double". +// Because child returns integer or decimal type. +func (a *baseFuncDesc) typeInfer4Avg(ctx sessionctx.Context) { + switch a.Args[0].GetType().Tp { + case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong: + a.RetTp = types.NewFieldType(mysql.TypeLonglong) + case mysql.TypeDouble, mysql.TypeFloat: + a.RetTp = types.NewFieldType(mysql.TypeDouble) + a.RetTp.Flen, a.RetTp.Decimal = mysql.MaxRealWidth, a.Args[0].GetType().Decimal + default: + a.RetTp = types.NewFieldType(mysql.TypeDouble) + a.RetTp.Flen, a.RetTp.Decimal = mysql.MaxRealWidth, types.UnspecifiedLength + } + types.SetBinChsClnFlag(a.RetTp) +} + +func (a *baseFuncDesc) typeInfer4MaxMin(ctx sessionctx.Context) { + a.RetTp = a.Args[0].GetType() + if (a.Name == ast.AggFuncMax || a.Name == ast.AggFuncMin) && a.RetTp.Tp != mysql.TypeBit { + a.RetTp = a.Args[0].GetType().Clone() + a.RetTp.Flag &^= mysql.NotNullFlag + } + if a.RetTp.Tp == mysql.TypeEnum || a.RetTp.Tp == mysql.TypeSet { + a.RetTp = &types.FieldType{Tp: mysql.TypeString, Flen: mysql.MaxFieldCharLength} + } +} + +// GetDefaultValue gets the default value when the function's input is null. +// According to MySQL, default values of the function are listed as follows: +// e.g. +// Table t which is empty: +// +-------+---------+---------+ +// | Table | Field | Type | +// +-------+---------+---------+ +// | t | a | int(11) | +// +-------+---------+---------+ +// +// Query: `select a, avg(a), sum(a), count(a), bit_xor(a), bit_or(a), bit_and(a), max(a), min(a), group_concat(a) from t;` +// +------+--------+--------+----------+------------+-----------+----------------------+--------+--------+-----------------+ +// | a | avg(a) | sum(a) | count(a) | bit_xor(a) | bit_or(a) | bit_and(a) | max(a) | min(a) | group_concat(a) | +// +------+--------+--------+----------+------------+-----------+----------------------+--------+--------+-----------------+ +// | NULL | NULL | NULL | 0 | 0 | 0 | 18446744073709551615 | NULL | NULL | NULL | +// +------+--------+--------+----------+------------+-----------+----------------------+--------+--------+-----------------+ +func (a *baseFuncDesc) GetDefaultValue() (v types.Datum) { + switch a.Name { + case ast.AggFuncCount: + v = types.NewIntDatum(0) + case ast.AggFuncFirstRow, ast.AggFuncAvg, ast.AggFuncSum, ast.AggFuncMax, + ast.AggFuncMin: + v = types.Datum{} + } + return +} + +// We do not need to wrap cast upon these functions, +// since the EvalXXX method called by the arg is determined by the corresponding arg type. +var noNeedCastAggFuncs = map[string]struct{}{ + ast.AggFuncCount: {}, + ast.AggFuncMax: {}, + ast.AggFuncMin: {}, + ast.AggFuncFirstRow: {}, +} diff --git a/expression/aggregation/base_func_test.go b/expression/aggregation/base_func_test.go new file mode 100644 index 0000000..a88d1a5 --- /dev/null +++ b/expression/aggregation/base_func_test.go @@ -0,0 +1,52 @@ +package aggregation + +import ( + "github.com/pingcap/check" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/mock" +) + +var _ = check.Suite(&testBaseFuncSuite{}) + +type testBaseFuncSuite struct { + ctx sessionctx.Context +} + +func (s *testBaseFuncSuite) SetUpSuite(c *check.C) { + s.ctx = mock.NewContext() +} + +func (s *testBaseFuncSuite) TestClone(c *check.C) { + col := &expression.Column{ + UniqueID: 0, + RetType: types.NewFieldType(mysql.TypeLonglong), + } + desc, err := newBaseFuncDesc(s.ctx, ast.AggFuncFirstRow, []expression.Expression{col}) + c.Assert(err, check.IsNil) + cloned := desc.clone() + c.Assert(desc.equal(s.ctx, cloned), check.IsTrue) + + col1 := &expression.Column{ + UniqueID: 1, + RetType: types.NewFieldType(mysql.TypeVarchar), + } + cloned.Args[0] = col1 + + c.Assert(desc.Args[0], check.Equals, col) + c.Assert(desc.equal(s.ctx, cloned), check.IsFalse) +} + +func (s *testBaseFuncSuite) TestMaxMin(c *check.C) { + col := &expression.Column{ + UniqueID: 0, + RetType: types.NewFieldType(mysql.TypeLonglong), + } + col.RetType.Flag |= mysql.NotNullFlag + desc, err := newBaseFuncDesc(s.ctx, ast.AggFuncMax, []expression.Expression{col}) + c.Assert(err, check.IsNil) + c.Assert(mysql.HasNotNullFlag(desc.RetTp.Flag), check.IsFalse) +} diff --git a/expression/aggregation/bench_test.go b/expression/aggregation/bench_test.go new file mode 100644 index 0000000..072219d --- /dev/null +++ b/expression/aggregation/bench_test.go @@ -0,0 +1,61 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggregation + +import ( + "testing" + + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/mock" +) + +func BenchmarkCreateContext(b *testing.B) { + col := &expression.Column{ + Index: 0, + RetType: types.NewFieldType(mysql.TypeLonglong), + } + ctx := mock.NewContext() + desc, err := NewAggFuncDesc(ctx, ast.AggFuncAvg, []expression.Expression{col}) + if err != nil { + b.Fatal(err) + } + fun := desc.GetAggFunc(ctx) + b.StartTimer() + for i := 0; i < b.N; i++ { + fun.CreateContext(ctx.GetSessionVars().StmtCtx) + } + b.ReportAllocs() +} + +func BenchmarkResetContext(b *testing.B) { + col := &expression.Column{ + Index: 0, + RetType: types.NewFieldType(mysql.TypeLonglong), + } + ctx := mock.NewContext() + desc, err := NewAggFuncDesc(ctx, ast.AggFuncAvg, []expression.Expression{col}) + if err != nil { + b.Fatal(err) + } + fun := desc.GetAggFunc(ctx) + evalCtx := fun.CreateContext(ctx.GetSessionVars().StmtCtx) + b.StartTimer() + for i := 0; i < b.N; i++ { + fun.ResetContext(ctx.GetSessionVars().StmtCtx, evalCtx) + } + b.ReportAllocs() +} diff --git a/expression/aggregation/count.go b/expression/aggregation/count.go new file mode 100644 index 0000000..4fb07b1 --- /dev/null +++ b/expression/aggregation/count.go @@ -0,0 +1,59 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggregation + +import ( + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" +) + +type countFunction struct { + aggFunction +} + +// Update implements Aggregation interface. +func (cf *countFunction) Update(evalCtx *AggEvaluateContext, sc *stmtctx.StatementContext, row chunk.Row) error { + for _, a := range cf.Args { + value, err := a.Eval(row) + if err != nil { + return err + } + if value.IsNull() { + return nil + } + if cf.Mode == FinalMode || cf.Mode == Partial2Mode { + evalCtx.Count += value.GetInt64() + } + } + if cf.Mode == CompleteMode || cf.Mode == Partial1Mode { + evalCtx.Count++ + } + return nil +} + +func (cf *countFunction) ResetContext(sc *stmtctx.StatementContext, evalCtx *AggEvaluateContext) { + evalCtx.Count = 0 +} + +// GetResult implements Aggregation interface. +func (cf *countFunction) GetResult(evalCtx *AggEvaluateContext) (d types.Datum) { + d.SetInt64(evalCtx.Count) + return d +} + +// GetPartialResult implements Aggregation interface. +func (cf *countFunction) GetPartialResult(evalCtx *AggEvaluateContext) []types.Datum { + return []types.Datum{cf.GetResult(evalCtx)} +} diff --git a/expression/aggregation/descriptor.go b/expression/aggregation/descriptor.go new file mode 100644 index 0000000..6c852ea --- /dev/null +++ b/expression/aggregation/descriptor.go @@ -0,0 +1,179 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggregation + +import ( + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" +) + +// AggFuncDesc describes an aggregation function signature, only used in planner. +type AggFuncDesc struct { + baseFuncDesc + // Mode represents the execution mode of the aggregation function. + Mode AggFunctionMode +} + +// NewAggFuncDesc creates an aggregation function signature descriptor. +func NewAggFuncDesc(ctx sessionctx.Context, name string, args []expression.Expression) (*AggFuncDesc, error) { + b, err := newBaseFuncDesc(ctx, name, args) + if err != nil { + return nil, err + } + return &AggFuncDesc{baseFuncDesc: b}, nil +} + +// Equal checks whether two aggregation function signatures are equal. +func (a *AggFuncDesc) Equal(ctx sessionctx.Context, other *AggFuncDesc) bool { + return a.baseFuncDesc.equal(ctx, &other.baseFuncDesc) +} + +// Clone copies an aggregation function signature totally. +func (a *AggFuncDesc) Clone() *AggFuncDesc { + clone := *a + clone.baseFuncDesc = *a.baseFuncDesc.clone() + return &clone +} + +// Split splits `a` into two aggregate descriptors for partial phase and +// final phase individually. +// This function is only used when executing aggregate function parallelly. +// ordinal indicates the column ordinal of the intermediate result. +func (a *AggFuncDesc) Split(ordinal []int) (partialAggDesc, finalAggDesc *AggFuncDesc) { + partialAggDesc = a.Clone() + if a.Mode == CompleteMode { + partialAggDesc.Mode = Partial1Mode + } else if a.Mode == FinalMode { + partialAggDesc.Mode = Partial2Mode + } else { + panic("Error happened during AggFuncDesc.Split, the AggFunctionMode is not CompleteMode or FinalMode.") + } + finalAggDesc = &AggFuncDesc{ + Mode: FinalMode, // We only support FinalMode now in final phase. + } + finalAggDesc.Name = a.Name + finalAggDesc.RetTp = a.RetTp + switch a.Name { + case ast.AggFuncAvg: + args := make([]expression.Expression, 0, 2) + args = append(args, &expression.Column{ + Index: ordinal[0], + RetType: types.NewFieldType(mysql.TypeLonglong), + }) + args = append(args, &expression.Column{ + Index: ordinal[1], + RetType: a.RetTp, + }) + finalAggDesc.Args = args + default: + args := make([]expression.Expression, 0, 1) + args = append(args, &expression.Column{ + Index: ordinal[0], + RetType: a.RetTp, + }) + finalAggDesc.Args = args + } + return +} + +// EvalNullValueInOuterJoin gets the null value when the aggregation is upon an outer join, +// and the aggregation function's input is null. +// If there is no matching row for the inner table of an outer join, +// an aggregation function only involves constant and/or columns belongs to the inner table +// will be set to the null value. +// The input stands for the schema of Aggregation's child. If the function can't produce a null value, the second +// return value will be false. +// e.g. +// Table t with only one row: +// +-------+---------+---------+ +// | Table | Field | Type | +// +-------+---------+---------+ +// | t | a | int(11) | +// +-------+---------+---------+ +// +------+ +// | a | +// +------+ +// | 1 | +// +------+ +// +// Table s which is empty: +// +-------+---------+---------+ +// | Table | Field | Type | +// +-------+---------+---------+ +// | s | a | int(11) | +// +-------+---------+---------+ +// +// Query: `select t.a as `t.a`, count(95), sum(95), avg(95), bit_or(95), bit_and(95), bit_or(95), max(95), min(95), s.a as `s.a`, avg(95) from t left join s on t.a = s.a;` +// +------+-----------+---------+---------+------------+-------------+------------+---------+---------+------+----------+ +// | t.a | count(95) | sum(95) | avg(95) | bit_or(95) | bit_and(95) | bit_or(95) | max(95) | min(95) | s.a | avg(s.a) | +// +------+-----------+---------+---------+------------+-------------+------------+---------+---------+------+----------+ +// | 1 | 1 | 95 | 95.0000 | 95 | 95 | 95 | 95 | 95 | NULL | NULL | +// +------+-----------+---------+---------+------------+-------------+------------+---------+---------+------+----------+ +func (a *AggFuncDesc) EvalNullValueInOuterJoin(ctx sessionctx.Context, schema *expression.Schema) (types.Datum, bool) { + switch a.Name { + case ast.AggFuncCount: + return a.evalNullValueInOuterJoin4Count(ctx, schema) + case ast.AggFuncSum, ast.AggFuncMax, ast.AggFuncMin, + ast.AggFuncFirstRow: + return a.evalNullValueInOuterJoin4Sum(ctx, schema) + case ast.AggFuncAvg: + return types.Datum{}, false + default: + panic("unsupported agg function") + } +} + +// GetAggFunc gets an evaluator according to the aggregation function signature. +func (a *AggFuncDesc) GetAggFunc(ctx sessionctx.Context) Aggregation { + aggFunc := aggFunction{AggFuncDesc: a} + switch a.Name { + case ast.AggFuncSum: + return &sumFunction{aggFunction: aggFunc} + case ast.AggFuncCount: + return &countFunction{aggFunction: aggFunc} + case ast.AggFuncAvg: + return &avgFunction{aggFunction: aggFunc} + case ast.AggFuncMax: + return &maxMinFunction{aggFunction: aggFunc, isMax: true} + case ast.AggFuncMin: + return &maxMinFunction{aggFunction: aggFunc, isMax: false} + case ast.AggFuncFirstRow: + return &firstRowFunction{aggFunction: aggFunc} + default: + panic("unsupported agg function") + } +} + +func (a *AggFuncDesc) evalNullValueInOuterJoin4Count(ctx sessionctx.Context, schema *expression.Schema) (types.Datum, bool) { + for _, arg := range a.Args { + result := expression.EvaluateExprWithNull(ctx, schema, arg) + con, ok := result.(*expression.Constant) + if !ok || con.Value.IsNull() { + return types.Datum{}, ok + } + } + return types.NewDatum(1), true +} + +func (a *AggFuncDesc) evalNullValueInOuterJoin4Sum(ctx sessionctx.Context, schema *expression.Schema) (types.Datum, bool) { + result := expression.EvaluateExprWithNull(ctx, schema, a.Args[0]) + con, ok := result.(*expression.Constant) + if !ok || con.Value.IsNull() { + return types.Datum{}, ok + } + return con.Value, true +} diff --git a/expression/aggregation/explain.go b/expression/aggregation/explain.go new file mode 100644 index 0000000..a3c7534 --- /dev/null +++ b/expression/aggregation/explain.go @@ -0,0 +1,33 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggregation + +import ( + "bytes" + "fmt" +) + +// ExplainAggFunc generates explain information for a aggregation function. +func ExplainAggFunc(agg *AggFuncDesc) string { + var buffer bytes.Buffer + fmt.Fprintf(&buffer, "%s(", agg.Name) + for i, arg := range agg.Args { + buffer.WriteString(arg.ExplainInfo()) + if i+1 < len(agg.Args) { + buffer.WriteString(", ") + } + } + buffer.WriteString(")") + return buffer.String() +} diff --git a/expression/aggregation/first_row.go b/expression/aggregation/first_row.go new file mode 100644 index 0000000..947cb0f --- /dev/null +++ b/expression/aggregation/first_row.go @@ -0,0 +1,56 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggregation + +import ( + "github.com/pingcap/errors" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" +) + +type firstRowFunction struct { + aggFunction +} + +// Update implements Aggregation interface. +func (ff *firstRowFunction) Update(evalCtx *AggEvaluateContext, sc *stmtctx.StatementContext, row chunk.Row) error { + if evalCtx.GotFirstRow { + return nil + } + if len(ff.Args) != 1 { + return errors.New("Wrong number of args for AggFuncFirstRow") + } + value, err := ff.Args[0].Eval(row) + if err != nil { + return err + } + evalCtx.Value = types.CloneDatum(value) + evalCtx.GotFirstRow = true + return nil +} + +// GetResult implements Aggregation interface. +func (ff *firstRowFunction) GetResult(evalCtx *AggEvaluateContext) types.Datum { + return evalCtx.Value +} + +func (ff *firstRowFunction) ResetContext(_ *stmtctx.StatementContext, evalCtx *AggEvaluateContext) { + evalCtx.GotFirstRow = false +} + +// GetPartialResult implements Aggregation interface. +func (ff *firstRowFunction) GetPartialResult(evalCtx *AggEvaluateContext) []types.Datum { + return []types.Datum{ff.GetResult(evalCtx)} +} diff --git a/expression/aggregation/max_min.go b/expression/aggregation/max_min.go new file mode 100644 index 0000000..458bdcf --- /dev/null +++ b/expression/aggregation/max_min.go @@ -0,0 +1,59 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggregation + +import ( + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" +) + +type maxMinFunction struct { + aggFunction + isMax bool +} + +// GetResult implements Aggregation interface. +func (mmf *maxMinFunction) GetResult(evalCtx *AggEvaluateContext) (d types.Datum) { + return evalCtx.Value +} + +// GetPartialResult implements Aggregation interface. +func (mmf *maxMinFunction) GetPartialResult(evalCtx *AggEvaluateContext) []types.Datum { + return []types.Datum{mmf.GetResult(evalCtx)} +} + +// Update implements Aggregation interface. +func (mmf *maxMinFunction) Update(evalCtx *AggEvaluateContext, sc *stmtctx.StatementContext, row chunk.Row) error { + a := mmf.Args[0] + value, err := a.Eval(row) + if err != nil { + return err + } + if evalCtx.Value.IsNull() { + evalCtx.Value = *(&value).Copy() + } + if value.IsNull() { + return nil + } + var c int + c, err = evalCtx.Value.CompareDatum(sc, &value) + if err != nil { + return err + } + if (mmf.isMax && c == -1) || (!mmf.isMax && c == 1) { + evalCtx.Value = *(&value).Copy() + } + return nil +} diff --git a/expression/aggregation/sum.go b/expression/aggregation/sum.go new file mode 100644 index 0000000..aeae368 --- /dev/null +++ b/expression/aggregation/sum.go @@ -0,0 +1,39 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggregation + +import ( + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" +) + +type sumFunction struct { + aggFunction +} + +// Update implements Aggregation interface. +func (sf *sumFunction) Update(evalCtx *AggEvaluateContext, sc *stmtctx.StatementContext, row chunk.Row) error { + return sf.updateSum(sc, evalCtx, row) +} + +// GetResult implements Aggregation interface. +func (sf *sumFunction) GetResult(evalCtx *AggEvaluateContext) (d types.Datum) { + return evalCtx.Value +} + +// GetPartialResult implements Aggregation interface. +func (sf *sumFunction) GetPartialResult(evalCtx *AggEvaluateContext) []types.Datum { + return []types.Datum{sf.GetResult(evalCtx)} +} diff --git a/expression/aggregation/util.go b/expression/aggregation/util.go new file mode 100644 index 0000000..585b594 --- /dev/null +++ b/expression/aggregation/util.go @@ -0,0 +1,91 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggregation + +import ( + "github.com/pingcap/errors" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/codec" + "github.com/pingcap/tidb/util/mvmap" +) + +// distinctChecker stores existing keys and checks if given data is distinct. +type distinctChecker struct { + existingKeys *mvmap.MVMap + key []byte + vals [][]byte + sc *stmtctx.StatementContext +} + +// createDistinctChecker creates a new distinct checker. +func createDistinctChecker(sc *stmtctx.StatementContext) *distinctChecker { + return &distinctChecker{ + existingKeys: mvmap.NewMVMap(), + sc: sc, + } +} + +// Check checks if values is distinct. +func (d *distinctChecker) Check(values []types.Datum) (bool, error) { + d.key = d.key[:0] + var err error + d.key, err = codec.EncodeValue(d.sc, d.key, values...) + if err != nil { + return false, err + } + d.vals = d.existingKeys.Get(d.key, d.vals[:0]) + if len(d.vals) > 0 { + return false, nil + } + d.existingKeys.Put(d.key, []byte{}) + return true, nil +} + +// calculateSum adds v to sum. +func calculateSum(sc *stmtctx.StatementContext, sum, v types.Datum) (data types.Datum, err error) { + // for avg and sum calculation + // avg and sum use decimal for integer and decimal type, use float for others + // see https://dev.mysql.com/doc/refman/5.7/en/group-by-functions.html + + switch v.Kind() { + case types.KindNull: + case types.KindInt64, types.KindUint64: + d, err := v.ToInt64(sc) + if err == nil { + data = types.NewIntDatum(d) + } + default: + var f float64 + f, err = v.ToFloat64(sc) + if err == nil { + data = types.NewFloat64Datum(f) + } + } + + if err != nil { + return data, err + } + if data.IsNull() { + return sum, nil + } + switch sum.Kind() { + case types.KindNull: + return data, nil + case types.KindFloat64, types.KindInt64: + return types.ComputePlus(sum, data) + default: + return data, errors.Errorf("invalid value %v for aggregate", sum.Kind()) + } +} diff --git a/expression/aggregation/util_test.go b/expression/aggregation/util_test.go new file mode 100644 index 0000000..00a2f77 --- /dev/null +++ b/expression/aggregation/util_test.go @@ -0,0 +1,35 @@ +package aggregation + +import ( + "time" + + "github.com/pingcap/check" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" +) + +var _ = check.Suite(&testUtilSuite{}) + +type testUtilSuite struct { +} + +func (s *testUtilSuite) TestDistinct(c *check.C) { + sc := &stmtctx.StatementContext{TimeZone: time.Local} + dc := createDistinctChecker(sc) + tests := []struct { + vals []interface{} + expect bool + }{ + {[]interface{}{1, 1}, true}, + {[]interface{}{1, 1}, false}, + {[]interface{}{1, 2}, true}, + {[]interface{}{1, 2}, false}, + {[]interface{}{1, nil}, true}, + {[]interface{}{1, nil}, false}, + } + for _, tt := range tests { + d, err := dc.Check(types.MakeDatums(tt.vals...)) + c.Assert(err, check.IsNil) + c.Assert(d, check.Equals, tt.expect) + } +} diff --git a/expression/bench_test.go b/expression/bench_test.go new file mode 100644 index 0000000..12e269e --- /dev/null +++ b/expression/bench_test.go @@ -0,0 +1,876 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +// This file contains benchmarks of our expression evaluation. + +import ( + "flag" + "fmt" + "math/rand" + "reflect" + "strings" + "testing" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/mock" +) + +func BenchmarkScalarFunctionClone(b *testing.B) { + col := &Column{RetType: types.NewFieldType(mysql.TypeLonglong)} + con1 := One.Clone() + con2 := Zero.Clone() + add := NewFunctionInternal(mock.NewContext(), ast.Plus, types.NewFieldType(mysql.TypeLonglong), col, con1) + sub := NewFunctionInternal(mock.NewContext(), ast.Plus, types.NewFieldType(mysql.TypeLonglong), add, con2) + b.ResetTimer() + for i := 0; i < b.N; i++ { + sub.Clone() + } + b.ReportAllocs() +} + +// dataGenerator is used to generate data for test. +type dataGenerator interface { + gen() interface{} +} + +type defaultGener struct { + nullRation float64 + eType types.EvalType +} + +func (g *defaultGener) gen() interface{} { + if rand.Float64() < g.nullRation { + return nil + } + switch g.eType { + case types.ETInt: + if rand.Float64() < 0.5 { + return -rand.Int63() + } + return rand.Int63() + case types.ETReal: + if rand.Float64() < 0.5 { + return -rand.Float64() * 1000000 + } + return rand.Float64() * 1000000 + case types.ETString: + return randString() + } + return nil +} + +// selectStringGener select one string randomly from the candidates array +type selectStringGener struct { + candidates []string +} + +func (g *selectStringGener) gen() interface{} { + if len(g.candidates) == 0 { + return nil + } + return g.candidates[rand.Intn(len(g.candidates))] +} + +// rangeRealGener is used to generate float64 items in [begin, end]. +type rangeRealGener struct { + begin float64 + end float64 + + nullRation float64 +} + +func (g *rangeRealGener) gen() interface{} { + if rand.Float64() < g.nullRation { + return nil + } + if g.end < g.begin { + g.begin = -100 + g.end = 100 + } + return rand.Float64()*(g.end-g.begin) + g.begin +} + +// rangeInt64Gener is used to generate int64 items in [begin, end). +type rangeInt64Gener struct { + begin int + end int +} + +func (rig *rangeInt64Gener) gen() interface{} { + return int64(rand.Intn(rig.end-rig.begin) + rig.begin) +} + +// numStrGener is used to generate number strings. +type numStrGener struct { + rangeInt64Gener +} + +func (g *numStrGener) gen() interface{} { + return fmt.Sprintf("%v", g.rangeInt64Gener.gen()) +} + +// randLenStrGener is used to generate strings whose lengths are in [lenBegin, lenEnd). +type randLenStrGener struct { + lenBegin int + lenEnd int +} + +func (g *randLenStrGener) gen() interface{} { + n := rand.Intn(g.lenEnd-g.lenBegin) + g.lenBegin + buf := make([]byte, n) + for i := range buf { + x := rand.Intn(62) + if x < 10 { + buf[i] = byte('0' + x) + } else if x-10 < 26 { + buf[i] = byte('a' + x - 10) + } else { + buf[i] = byte('A' + x - 10 - 26) + } + } + return string(buf) +} + +type vecExprBenchCase struct { + // retEvalType is the EvalType of the expression result. + // This field is required. + retEvalType types.EvalType + // childrenTypes is the EvalTypes of the expression children(arguments). + // This field is required. + childrenTypes []types.EvalType + // childrenFieldTypes is the field types of the expression children(arguments). + // If childrenFieldTypes is not set, it will be converted from childrenTypes. + // This field is optional. + childrenFieldTypes []*types.FieldType + // geners are used to generate data for children and geners[i] generates data for children[i]. + // If geners[i] is nil, the default dataGenerator will be used for its corresponding child. + // The geners slice can be shorter than the children slice, if it has 3 children, then + // geners[gen1, gen2] will be regarded as geners[gen1, gen2, nil]. + // This field is optional. + geners []dataGenerator + // aesModeAttr information, needed by encryption functions + aesModes string + // constants are used to generate constant data for children[i]. + constants []*Constant + // chunkSize is used to specify the chunk size of children, the maximum is 1024. + // This field is optional, 1024 by default. + chunkSize int +} + +type vecExprBenchCases map[string][]vecExprBenchCase + +func fillColumn(eType types.EvalType, chk *chunk.Chunk, colIdx int, testCase vecExprBenchCase) { + var gen dataGenerator + if len(testCase.geners) > colIdx && testCase.geners[colIdx] != nil { + gen = testCase.geners[colIdx] + } + fillColumnWithGener(eType, chk, colIdx, gen) +} + +func fillColumnWithGener(eType types.EvalType, chk *chunk.Chunk, colIdx int, gen dataGenerator) { + batchSize := chk.Capacity() + if gen == nil { + gen = &defaultGener{0.2, eType} + } + + col := chk.Column(colIdx) + col.Reset(eType) + for i := 0; i < batchSize; i++ { + v := gen.gen() + if v == nil { + col.AppendNull() + continue + } + switch eType { + case types.ETInt: + col.AppendInt64(v.(int64)) + case types.ETReal: + col.AppendFloat64(v.(float64)) + case types.ETString: + col.AppendString(v.(string)) + } + } +} + +func randString() string { + n := 10 + rand.Intn(10) + buf := make([]byte, n) + for i := range buf { + x := rand.Intn(62) + if x < 10 { + buf[i] = byte('0' + x) + } else if x-10 < 26 { + buf[i] = byte('a' + x - 10) + } else { + buf[i] = byte('A' + x - 10 - 26) + } + } + return string(buf) +} + +func eType2FieldType(eType types.EvalType) *types.FieldType { + switch eType { + case types.ETInt: + return types.NewFieldType(mysql.TypeLonglong) + case types.ETReal: + return types.NewFieldType(mysql.TypeDouble) + case types.ETString: + return types.NewFieldType(mysql.TypeVarString) + default: + panic(fmt.Sprintf("EvalType=%v is not supported.", eType)) + } +} + +func genVecExprBenchCase(ctx sessionctx.Context, funcName string, testCase vecExprBenchCase) (expr Expression, fts []*types.FieldType, input *chunk.Chunk, output *chunk.Chunk) { + fts = make([]*types.FieldType, len(testCase.childrenTypes)) + for i := range fts { + if i < len(testCase.childrenFieldTypes) && testCase.childrenFieldTypes[i] != nil { + fts[i] = testCase.childrenFieldTypes[i] + } else { + fts[i] = eType2FieldType(testCase.childrenTypes[i]) + } + } + if testCase.chunkSize <= 0 || testCase.chunkSize > 1024 { + testCase.chunkSize = 1024 + } + cols := make([]Expression, len(testCase.childrenTypes)) + input = chunk.New(fts, testCase.chunkSize, testCase.chunkSize) + input.NumRows() + for i, eType := range testCase.childrenTypes { + fillColumn(eType, input, i, testCase) + if i < len(testCase.constants) && testCase.constants[i] != nil { + cols[i] = testCase.constants[i] + } else { + cols[i] = &Column{Index: i, RetType: fts[i]} + } + } + + expr, err := NewFunction(ctx, funcName, eType2FieldType(testCase.retEvalType), cols...) + if err != nil { + panic(err) + } + + output = chunk.New([]*types.FieldType{eType2FieldType(expr.GetType().EvalType())}, testCase.chunkSize, testCase.chunkSize) + return expr, fts, input, output +} + +// testVectorizedEvalOneVec is used to verify that the vectorized +// expression is evaluated correctly during projection +func testVectorizedEvalOneVec(c *C, vecExprCases vecExprBenchCases) { + ctx := mock.NewContext() + for funcName, testCases := range vecExprCases { + for _, testCase := range testCases { + expr, fts, input, output := genVecExprBenchCase(ctx, funcName, testCase) + commentf := func(row int) CommentInterface { + return Commentf("func: %v, case %+v, row: %v, rowData: %v", funcName, testCase, row, input.GetRow(row).GetDatumRow(fts)) + } + output2 := output.CopyConstruct() + c.Assert(evalOneVec(ctx, expr, input, output, 0), IsNil, Commentf("func: %v, case: %+v", funcName, testCase)) + it := chunk.NewIterator4Chunk(input) + c.Assert(evalOneColumn(ctx, expr, it, output2, 0), IsNil, Commentf("func: %v, case: %+v", funcName, testCase)) + + c1, c2 := output.Column(0), output2.Column(0) + switch expr.GetType().EvalType() { + case types.ETInt: + for i := 0; i < input.NumRows(); i++ { + c.Assert(c1.IsNull(i), Equals, c2.IsNull(i), commentf(i)) + if !c1.IsNull(i) { + c.Assert(c1.GetInt64(i), Equals, c2.GetInt64(i), commentf(i)) + } + } + case types.ETReal: + for i := 0; i < input.NumRows(); i++ { + c.Assert(c1.IsNull(i), Equals, c2.IsNull(i), commentf(i)) + if !c1.IsNull(i) { + c.Assert(c1.GetFloat64(i), Equals, c2.GetFloat64(i), commentf(i)) + } + } + case types.ETString: + for i := 0; i < input.NumRows(); i++ { + c.Assert(c1.IsNull(i), Equals, c2.IsNull(i), commentf(i)) + if !c1.IsNull(i) { + c.Assert(c1.GetString(i), Equals, c2.GetString(i), commentf(i)) + } + } + } + } + } +} + +// benchmarkVectorizedEvalOneVec is used to get the effect of +// using the vectorized expression evaluations during projection +func benchmarkVectorizedEvalOneVec(b *testing.B, vecExprCases vecExprBenchCases) { + ctx := mock.NewContext() + for funcName, testCases := range vecExprCases { + for _, testCase := range testCases { + expr, _, input, output := genVecExprBenchCase(ctx, funcName, testCase) + exprName := expr.String() + if sf, ok := expr.(*ScalarFunction); ok { + exprName = fmt.Sprintf("%v", reflect.TypeOf(sf.Function)) + tmp := strings.Split(exprName, ".") + exprName = tmp[len(tmp)-1] + } + + b.Run(exprName+"-EvalOneVec", func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + if err := evalOneVec(ctx, expr, input, output, 0); err != nil { + b.Fatal(err) + } + } + }) + b.Run(exprName+"-EvalOneCol", func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + it := chunk.NewIterator4Chunk(input) + if err := evalOneColumn(ctx, expr, it, output, 0); err != nil { + b.Fatal(err) + } + } + }) + } + } +} + +func genVecBuiltinFuncBenchCase(ctx sessionctx.Context, funcName string, testCase vecExprBenchCase) (baseFunc builtinFunc, fts []*types.FieldType, input *chunk.Chunk, result *chunk.Column) { + childrenNumber := len(testCase.childrenTypes) + fts = make([]*types.FieldType, childrenNumber) + for i := range fts { + if i < len(testCase.childrenFieldTypes) && testCase.childrenFieldTypes[i] != nil { + fts[i] = testCase.childrenFieldTypes[i] + } else { + fts[i] = eType2FieldType(testCase.childrenTypes[i]) + } + } + cols := make([]Expression, childrenNumber) + if testCase.chunkSize <= 0 || testCase.chunkSize > 1024 { + testCase.chunkSize = 1024 + } + input = chunk.New(fts, testCase.chunkSize, testCase.chunkSize) + for i, eType := range testCase.childrenTypes { + fillColumn(eType, input, i, testCase) + if i < len(testCase.constants) && testCase.constants[i] != nil { + cols[i] = testCase.constants[i] + } else { + cols[i] = &Column{Index: i, RetType: fts[i]} + } + } + if len(cols) == 0 { + input.SetNumVirtualRows(testCase.chunkSize) + } + + baseFunc, err := funcs[funcName].getFunction(ctx, cols) + if err != nil { + panic(err) + } + result = chunk.NewColumn(eType2FieldType(testCase.retEvalType), testCase.chunkSize) + // Mess up the output to make sure vecEvalXXX to call ResizeXXX/ReserveXXX itself. + result.AppendNull() + return baseFunc, fts, input, result +} + +// a hack way to calculate length of a chunk.Column. +func getColumnLen(col *chunk.Column, eType types.EvalType) int { + chk := chunk.New([]*types.FieldType{eType2FieldType(eType)}, 1024, 1024) + chk.SetCol(0, col) + return chk.NumRows() +} + +// removeTestOptions removes all not needed options like '-test.timeout=' from argument list +func removeTestOptions(args []string) []string { + argList := args[:0] + + // args contains '-test.timeout=' option for example + // excluding it to be able to run all tests + for _, arg := range args { + if strings.HasPrefix(arg, "builtin") || IsFunctionSupported(arg) { + argList = append(argList, arg) + } + } + return argList +} + +// testVectorizedBuiltinFunc is used to verify that the vectorized +// expression is evaluated correctly +func testVectorizedBuiltinFunc(c *C, vecExprCases vecExprBenchCases) { + testFunc := make(map[string]bool) + argList := removeTestOptions(flag.Args()) + testAll := len(argList) == 0 + for _, arg := range argList { + testFunc[arg] = true + } + for funcName, testCases := range vecExprCases { + for _, testCase := range testCases { + ctx := mock.NewContext() + err := ctx.GetSessionVars().SetSystemVar(variable.BlockEncryptionMode, testCase.aesModes) + c.Assert(err, IsNil) + baseFunc, fts, input, output := genVecBuiltinFuncBenchCase(ctx, funcName, testCase) + baseFuncName := fmt.Sprintf("%v", reflect.TypeOf(baseFunc)) + tmp := strings.Split(baseFuncName, ".") + baseFuncName = tmp[len(tmp)-1] + + if !testAll && (testFunc[baseFuncName] != true && testFunc[funcName] != true) { + continue + } + // do not forget to implement the vectorized method. + c.Assert(baseFunc.vectorized(), IsTrue, Commentf("func: %v, case: %+v", baseFuncName, testCase)) + commentf := func(row int) CommentInterface { + return Commentf("func: %v, case %+v, row: %v, rowData: %v", baseFuncName, testCase, row, input.GetRow(row).GetDatumRow(fts)) + } + it := chunk.NewIterator4Chunk(input) + i := 0 + var vecWarnCnt uint16 + switch testCase.retEvalType { + case types.ETInt: + err := baseFunc.vecEvalInt(input, output) + c.Assert(err, IsNil, Commentf("func: %v, case: %+v", baseFuncName, testCase)) + // do not forget to call ResizeXXX/ReserveXXX + c.Assert(getColumnLen(output, testCase.retEvalType), Equals, input.NumRows()) + vecWarnCnt = ctx.GetSessionVars().StmtCtx.WarningCount() + i64s := output.Int64s() + for row := it.Begin(); row != it.End(); row = it.Next() { + val, isNull, err := baseFunc.evalInt(row) + c.Assert(err, IsNil, commentf(i)) + c.Assert(isNull, Equals, output.IsNull(i), commentf(i)) + if !isNull { + c.Assert(val, Equals, i64s[i], commentf(i)) + } + i++ + } + case types.ETReal: + err := baseFunc.vecEvalReal(input, output) + c.Assert(err, IsNil, Commentf("func: %v, case: %+v", baseFuncName, testCase)) + // do not forget to call ResizeXXX/ReserveXXX + c.Assert(getColumnLen(output, testCase.retEvalType), Equals, input.NumRows()) + vecWarnCnt = ctx.GetSessionVars().StmtCtx.WarningCount() + f64s := output.Float64s() + for row := it.Begin(); row != it.End(); row = it.Next() { + val, isNull, err := baseFunc.evalReal(row) + c.Assert(err, IsNil, commentf(i)) + c.Assert(isNull, Equals, output.IsNull(i), commentf(i)) + if !isNull { + c.Assert(val, Equals, f64s[i], commentf(i)) + } + i++ + } + case types.ETString: + err := baseFunc.vecEvalString(input, output) + c.Assert(err, IsNil, Commentf("func: %v, case: %+v", baseFuncName, testCase)) + // do not forget to call ResizeXXX/ReserveXXX + c.Assert(getColumnLen(output, testCase.retEvalType), Equals, input.NumRows()) + vecWarnCnt = ctx.GetSessionVars().StmtCtx.WarningCount() + for row := it.Begin(); row != it.End(); row = it.Next() { + val, isNull, err := baseFunc.evalString(row) + c.Assert(err, IsNil, commentf(i)) + c.Assert(isNull, Equals, output.IsNull(i), commentf(i)) + if !isNull { + c.Assert(val, Equals, output.GetString(i), commentf(i)) + } + i++ + } + default: + c.Fatal(fmt.Sprintf("evalType=%v is not supported", testCase.retEvalType)) + } + + // check warnings + totalWarns := ctx.GetSessionVars().StmtCtx.WarningCount() + c.Assert(2*vecWarnCnt, Equals, totalWarns) + warns := ctx.GetSessionVars().StmtCtx.GetWarnings() + for i := 0; i < int(vecWarnCnt); i++ { + c.Assert(terror.ErrorEqual(warns[i].Err, warns[i+int(vecWarnCnt)].Err), IsTrue) + } + } + } +} + +// benchmarkVectorizedBuiltinFunc is used to get the effect of +// using the vectorized expression evaluations +func benchmarkVectorizedBuiltinFunc(b *testing.B, vecExprCases vecExprBenchCases) { + ctx := mock.NewContext() + testFunc := make(map[string]bool) + argList := removeTestOptions(flag.Args()) + testAll := len(argList) == 0 + for _, arg := range argList { + testFunc[arg] = true + } + for funcName, testCases := range vecExprCases { + for _, testCase := range testCases { + err := ctx.GetSessionVars().SetSystemVar(variable.BlockEncryptionMode, testCase.aesModes) + if err != nil { + panic(err) + } + baseFunc, _, input, output := genVecBuiltinFuncBenchCase(ctx, funcName, testCase) + baseFuncName := fmt.Sprintf("%v", reflect.TypeOf(baseFunc)) + tmp := strings.Split(baseFuncName, ".") + baseFuncName = tmp[len(tmp)-1] + + if !testAll && testFunc[baseFuncName] != true && testFunc[funcName] != true { + continue + } + + b.Run(baseFuncName+"-VecBuiltinFunc", func(b *testing.B) { + b.ResetTimer() + switch testCase.retEvalType { + case types.ETInt: + for i := 0; i < b.N; i++ { + if err := baseFunc.vecEvalInt(input, output); err != nil { + b.Fatal(err) + } + } + case types.ETReal: + for i := 0; i < b.N; i++ { + if err := baseFunc.vecEvalReal(input, output); err != nil { + b.Fatal(err) + } + } + case types.ETString: + for i := 0; i < b.N; i++ { + if err := baseFunc.vecEvalString(input, output); err != nil { + b.Fatal(err) + } + } + default: + b.Fatal(fmt.Sprintf("evalType=%v is not supported", testCase.retEvalType)) + } + }) + b.Run(baseFuncName+"-NonVecBuiltinFunc", func(b *testing.B) { + b.ResetTimer() + it := chunk.NewIterator4Chunk(input) + switch testCase.retEvalType { + case types.ETInt: + for i := 0; i < b.N; i++ { + output.Reset(testCase.retEvalType) + for row := it.Begin(); row != it.End(); row = it.Next() { + v, isNull, err := baseFunc.evalInt(row) + if err != nil { + b.Fatal(err) + } + if isNull { + output.AppendNull() + } else { + output.AppendInt64(v) + } + } + } + case types.ETReal: + for i := 0; i < b.N; i++ { + output.Reset(testCase.retEvalType) + for row := it.Begin(); row != it.End(); row = it.Next() { + v, isNull, err := baseFunc.evalReal(row) + if err != nil { + b.Fatal(err) + } + if isNull { + output.AppendNull() + } else { + output.AppendFloat64(v) + } + } + } + case types.ETString: + for i := 0; i < b.N; i++ { + output.Reset(testCase.retEvalType) + for row := it.Begin(); row != it.End(); row = it.Next() { + v, isNull, err := baseFunc.evalString(row) + if err != nil { + b.Fatal(err) + } + if isNull { + output.AppendNull() + } else { + output.AppendString(v) + } + } + } + default: + b.Fatal(fmt.Sprintf("evalType=%v is not supported", testCase.retEvalType)) + } + }) + } + } +} + +func genVecEvalBool(numCols int, colTypes, eTypes []types.EvalType) (CNFExprs, *chunk.Chunk) { + gens := make([]dataGenerator, 0, len(eTypes)) + for _, eType := range eTypes { + if eType == types.ETString { + gens = append(gens, &numStrGener{rangeInt64Gener{0, 10}}) + } else { + gens = append(gens, &defaultGener{nullRation: 0.05, eType: eType}) + } + } + + ts := make([]types.EvalType, 0, numCols) + gs := make([]dataGenerator, 0, numCols) + fts := make([]*types.FieldType, 0, numCols) + for i := 0; i < numCols; i++ { + idx := rand.Intn(len(eTypes)) + if colTypes != nil { + for j := range eTypes { + if colTypes[i] == eTypes[j] { + idx = j + break + } + } + } + ts = append(ts, eTypes[idx]) + gs = append(gs, gens[idx]) + fts = append(fts, eType2FieldType(eTypes[idx])) + } + + input := chunk.New(fts, 1024, 1024) + exprs := make(CNFExprs, 0, numCols) + for i := 0; i < numCols; i++ { + fillColumn(ts[i], input, i, vecExprBenchCase{geners: gs}) + exprs = append(exprs, &Column{Index: i, RetType: fts[i]}) + } + return exprs, input +} + +func generateRandomSel() []int { + rand.Seed(int64(time.Now().UnixNano())) + var sel []int + count := 0 + // Use constant 256 to make it faster to generate randomly arranged sel slices + num := rand.Intn(256) + 1 + existed := make([]bool, 1024) + for i := 0; i < 1024; i++ { + existed[i] = false + } + for count < num { + val := rand.Intn(1024) + if !existed[val] { + existed[val] = true + count++ + } + } + for i := 0; i < 1024; i++ { + if existed[i] { + sel = append(sel, i) + } + } + return sel +} + +func (s *testEvaluatorSuite) TestVecEvalBool(c *C) { + ctx := mock.NewContext() + eTypes := []types.EvalType{types.ETReal, types.ETString} + for numCols := 1; numCols <= 5; numCols++ { + for round := 0; round < 16; round++ { + exprs, input := genVecEvalBool(numCols, nil, eTypes) + selected, nulls, err := VecEvalBool(ctx, exprs, input, nil, nil) + c.Assert(err, IsNil) + it := chunk.NewIterator4Chunk(input) + i := 0 + for row := it.Begin(); row != it.End(); row = it.Next() { + ok, null, err := EvalBool(mock.NewContext(), exprs, row) + c.Assert(err, IsNil) + c.Assert(null, Equals, nulls[i]) + c.Assert(ok, Equals, selected[i]) + i++ + } + } + } +} + +func BenchmarkVecEvalBool(b *testing.B) { + ctx := mock.NewContext() + selected := make([]bool, 0, 1024) + nulls := make([]bool, 0, 1024) + eTypes := []types.EvalType{types.ETInt, types.ETReal, types.ETString} + tNames := []string{"int", "real", "decimal", "string", "timestamp", "datetime", "duration"} + for numCols := 1; numCols <= 2; numCols++ { + typeCombination := make([]types.EvalType, numCols) + var combFunc func(nCols int) + combFunc = func(nCols int) { + if nCols == 0 { + name := "" + for _, t := range typeCombination { + for i := range eTypes { + if t == eTypes[i] { + name += tNames[t] + "/" + } + } + } + exprs, input := genVecEvalBool(numCols, typeCombination, eTypes) + b.Run("Vec-"+name, func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, err := VecEvalBool(ctx, exprs, input, selected, nulls) + if err != nil { + b.Fatal(err) + } + } + }) + b.Run("Row-"+name, func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + it := chunk.NewIterator4Chunk(input) + for row := it.Begin(); row != it.End(); row = it.Next() { + _, _, err := EvalBool(ctx, exprs, row) + if err != nil { + b.Fatal(err) + } + } + } + }) + return + } + for _, eType := range eTypes { + typeCombination[nCols-1] = eType + combFunc(nCols - 1) + } + } + + combFunc(numCols) + } +} + +func (s *testEvaluatorSuite) TestRowBasedFilterAndVectorizedFilter(c *C) { + ctx := mock.NewContext() + eTypes := []types.EvalType{types.ETInt, types.ETReal, types.ETString} + for numCols := 1; numCols <= 5; numCols++ { + for round := 0; round < 16; round++ { + exprs, input := genVecEvalBool(numCols, nil, eTypes) + it := chunk.NewIterator4Chunk(input) + isNull := make([]bool, it.Len()) + selected, nulls, err := rowBasedFilter(ctx, exprs, it, nil, isNull) + c.Assert(err, IsNil) + selected2, nulls2, err2 := vectorizedFilter(ctx, exprs, it, nil, isNull) + c.Assert(err2, IsNil) + length := it.Len() + for i := 0; i < length; i++ { + c.Assert(nulls2[i], Equals, nulls[i]) + c.Assert(selected2[i], Equals, selected[i]) + } + } + } +} + +func BenchmarkRowBasedFilterAndVectorizedFilter(b *testing.B) { + ctx := mock.NewContext() + selected := make([]bool, 0, 1024) + nulls := make([]bool, 0, 1024) + eTypes := []types.EvalType{types.ETInt, types.ETReal, types.ETString} + tNames := []string{"int", "real", "string"} + for numCols := 1; numCols <= 2; numCols++ { + typeCombination := make([]types.EvalType, numCols) + var combFunc func(nCols int) + combFunc = func(nCols int) { + if nCols == 0 { + name := "" + for _, t := range typeCombination { + for i := range eTypes { + if t == eTypes[i] { + name += tNames[t] + "/" + } + } + } + exprs, input := genVecEvalBool(numCols, typeCombination, eTypes) + it := chunk.NewIterator4Chunk(input) + b.Run("Vec-"+name, func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, err := vectorizedFilter(ctx, exprs, it, selected, nulls) + if err != nil { + b.Fatal(err) + } + } + }) + b.Run("Row-"+name, func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, err := rowBasedFilter(ctx, exprs, it, selected, nulls) + if err != nil { + b.Fatal(err) + } + } + }) + return + } + for _, eType := range eTypes { + typeCombination[nCols-1] = eType + combFunc(nCols - 1) + } + } + combFunc(numCols) + } +} + +func (s *testEvaluatorSuite) TestVectorizedFilterConsiderNull(c *C) { + ctx := mock.NewContext() + dafaultEnableVectorizedExpressionVar := ctx.GetSessionVars().EnableVectorizedExpression + eTypes := []types.EvalType{types.ETInt, types.ETReal, types.ETString} + for numCols := 1; numCols <= 5; numCols++ { + for round := 0; round < 16; round++ { + exprs, input := genVecEvalBool(numCols, nil, eTypes) + it := chunk.NewIterator4Chunk(input) + isNull := make([]bool, it.Len()) + ctx.GetSessionVars().EnableVectorizedExpression = false + selected, nulls, err := VectorizedFilterConsiderNull(ctx, exprs, it, nil, isNull) + c.Assert(err, IsNil) + ctx.GetSessionVars().EnableVectorizedExpression = true + selected2, nulls2, err2 := VectorizedFilterConsiderNull(ctx, exprs, it, nil, isNull) + c.Assert(err2, IsNil) + length := it.Len() + for i := 0; i < length; i++ { + c.Assert(nulls2[i], Equals, nulls[i]) + c.Assert(selected2[i], Equals, selected[i]) + } + + // add test which sel is not nil + randomSel := generateRandomSel() + input.SetSel(randomSel) + it2 := chunk.NewIterator4Chunk(input) + isNull = isNull[:0] + ctx.GetSessionVars().EnableVectorizedExpression = false + selected3, nulls, err := VectorizedFilterConsiderNull(ctx, exprs, it2, nil, isNull) + c.Assert(err, IsNil) + ctx.GetSessionVars().EnableVectorizedExpression = true + selected4, nulls2, err2 := VectorizedFilterConsiderNull(ctx, exprs, it2, nil, isNull) + c.Assert(err2, IsNil) + for i := 0; i < length; i++ { + c.Assert(nulls2[i], Equals, nulls[i]) + c.Assert(selected4[i], Equals, selected3[i]) + } + + unselected := make([]bool, length) + // unselected[i] == false means that the i-th row is selected + for i := 0; i < length; i++ { + unselected[i] = true + } + for _, idx := range randomSel { + unselected[idx] = false + } + for i := range selected2 { + if selected2[i] && unselected[i] { + selected2[i] = false + } + } + for i := 0; i < length; i++ { + c.Assert(selected2[i], Equals, selected4[i]) + } + } + } + ctx.GetSessionVars().EnableVectorizedExpression = dafaultEnableVectorizedExpressionVar +} diff --git a/expression/builtin.go b/expression/builtin.go new file mode 100644 index 0000000..6d05f7d --- /dev/null +++ b/expression/builtin.go @@ -0,0 +1,396 @@ +// Copyright 2013 The ql Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSES/QL-LICENSE file. + +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate go run generator/compare_vec.go +//go:generate go run generator/control_vec.go +//go:generate go run generator/other_vec.go + +package expression + +import ( + "fmt" + "github.com/pingcap/log" + "sort" + "strings" + "sync" + + "github.com/gogo/protobuf/proto" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/charset" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/opcode" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tipb/go-tipb" +) + +// baseBuiltinFunc will be contained in every struct that implement builtinFunc interface. +type baseBuiltinFunc struct { + bufAllocator columnBufferAllocator + args []Expression + ctx sessionctx.Context + tp *types.FieldType + pbCode tipb.ScalarFuncSig + + childrenVectorizedOnce *sync.Once + childrenVectorized bool +} + +func (b *baseBuiltinFunc) PbCode() tipb.ScalarFuncSig { + return b.pbCode +} + +// metadata returns the metadata of a function. +// metadata means some functions contain extra inner fields which will not +// contain in `tipb.Expr.children` but must be pushed down to coprocessor +func (b *baseBuiltinFunc) metadata() proto.Message { + // We will not use a field to store them because of only + // a few functions contain implicit parameters + return nil +} + +func (b *baseBuiltinFunc) setPbCode(c tipb.ScalarFuncSig) { + b.pbCode = c +} + +func newBaseBuiltinFunc(ctx sessionctx.Context, args []Expression) baseBuiltinFunc { + if ctx == nil { + panic("ctx should not be nil") + } + return baseBuiltinFunc{ + bufAllocator: newLocalSliceBuffer(len(args)), + childrenVectorizedOnce: new(sync.Once), + + args: args, + ctx: ctx, + tp: types.NewFieldType(mysql.TypeUnspecified), + } +} + +// newBaseBuiltinFuncWithTp creates a built-in function signature with specified types of arguments and the return type of the function. +// argTps indicates the types of the args, retType indicates the return type of the built-in function. +// Every built-in function needs determined argTps and retType when we create it. +func newBaseBuiltinFuncWithTp(ctx sessionctx.Context, args []Expression, retType types.EvalType, argTps ...types.EvalType) (bf baseBuiltinFunc) { + if len(args) != len(argTps) { + panic("unexpected length of args and argTps") + } + if ctx == nil { + panic("ctx should not be nil") + } + for i := range args { + if argTps[i] != args[i].GetType().EvalType() { + log.Warn(fmt.Sprintf("unmatched arg type %v with %v", argTps[i], args[i].GetType().EvalType())) + } + } + var fieldType *types.FieldType + switch retType { + case types.ETInt: + fieldType = &types.FieldType{ + Tp: mysql.TypeLonglong, + Flen: mysql.MaxIntWidth, + Decimal: 0, + Flag: mysql.BinaryFlag, + } + case types.ETReal: + fieldType = &types.FieldType{ + Tp: mysql.TypeDouble, + Flen: mysql.MaxRealWidth, + Decimal: types.UnspecifiedLength, + Flag: mysql.BinaryFlag, + } + case types.ETString: + fieldType = &types.FieldType{ + Tp: mysql.TypeVarString, + Flen: 0, + Decimal: types.UnspecifiedLength, + } + } + if mysql.HasBinaryFlag(fieldType.Flag) { + fieldType.Charset, fieldType.Collate = charset.CharsetBin, charset.CollationBin + } else { + fieldType.Charset, fieldType.Collate = charset.GetDefaultCharsetAndCollate() + } + return baseBuiltinFunc{ + bufAllocator: newLocalSliceBuffer(len(args)), + childrenVectorizedOnce: new(sync.Once), + + args: args, + ctx: ctx, + tp: fieldType, + } +} + +func (b *baseBuiltinFunc) getArgs() []Expression { + return b.args +} + +func (b *baseBuiltinFunc) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + return errors.Errorf("baseBuiltinFunc.vecEvalInt() should never be called, please contact the TiDB team for help") +} + +func (b *baseBuiltinFunc) vecEvalReal(input *chunk.Chunk, result *chunk.Column) error { + return errors.Errorf("baseBuiltinFunc.vecEvalReal() should never be called, please contact the TiDB team for help") +} + +func (b *baseBuiltinFunc) vecEvalString(input *chunk.Chunk, result *chunk.Column) error { + return errors.Errorf("baseBuiltinFunc.vecEvalString() should never be called, please contact the TiDB team for help") +} + +func (b *baseBuiltinFunc) vecEvalDecimal(input *chunk.Chunk, result *chunk.Column) error { + return errors.Errorf("baseBuiltinFunc.vecEvalDecimal() should never be called, please contact the TiDB team for help") +} + +func (b *baseBuiltinFunc) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { + return errors.Errorf("baseBuiltinFunc.vecEvalTime() should never be called, please contact the TiDB team for help") +} + +func (b *baseBuiltinFunc) vecEvalDuration(input *chunk.Chunk, result *chunk.Column) error { + return errors.Errorf("baseBuiltinFunc.vecEvalDuration() should never be called, please contact the TiDB team for help") +} + +func (b *baseBuiltinFunc) vecEvalJSON(input *chunk.Chunk, result *chunk.Column) error { + return errors.Errorf("baseBuiltinFunc.vecEvalJSON() should never be called, please contact the TiDB team for help") +} + +func (b *baseBuiltinFunc) evalInt(row chunk.Row) (int64, bool, error) { + return 0, false, errors.Errorf("baseBuiltinFunc.evalInt() should never be called, please contact the TiDB team for help") +} + +func (b *baseBuiltinFunc) evalReal(row chunk.Row) (float64, bool, error) { + return 0, false, errors.Errorf("baseBuiltinFunc.evalReal() should never be called, please contact the TiDB team for help") +} + +func (b *baseBuiltinFunc) evalString(row chunk.Row) (string, bool, error) { + return "", false, errors.Errorf("baseBuiltinFunc.evalString() should never be called, please contact the TiDB team for help") +} + +func (b *baseBuiltinFunc) vectorized() bool { + return false +} + +func (b *baseBuiltinFunc) isChildrenVectorized() bool { + b.childrenVectorizedOnce.Do(func() { + b.childrenVectorized = true + for _, arg := range b.args { + if !arg.Vectorized() { + b.childrenVectorized = false + break + } + } + }) + return b.childrenVectorized +} + +func (b *baseBuiltinFunc) getRetTp() *types.FieldType { + switch b.tp.EvalType() { + case types.ETString: + if b.tp.Flen >= mysql.MaxBlobWidth { + b.tp.Tp = mysql.TypeLongBlob + } else if b.tp.Flen >= 65536 { + b.tp.Tp = mysql.TypeMediumBlob + } + if len(b.tp.Charset) <= 0 { + b.tp.Charset, b.tp.Collate = charset.GetDefaultCharsetAndCollate() + } + } + return b.tp +} + +func (b *baseBuiltinFunc) equal(fun builtinFunc) bool { + funArgs := fun.getArgs() + if len(funArgs) != len(b.args) { + return false + } + for i := range b.args { + if !b.args[i].Equal(b.ctx, funArgs[i]) { + return false + } + } + return true +} + +func (b *baseBuiltinFunc) getCtx() sessionctx.Context { + return b.ctx +} + +func (b *baseBuiltinFunc) cloneFrom(from *baseBuiltinFunc) { + b.args = make([]Expression, 0, len(b.args)) + for _, arg := range from.args { + b.args = append(b.args, arg.Clone()) + } + b.ctx = from.ctx + b.tp = from.tp + b.pbCode = from.pbCode + b.bufAllocator = newLocalSliceBuffer(len(b.args)) + b.childrenVectorizedOnce = new(sync.Once) +} + +func (b *baseBuiltinFunc) Clone() builtinFunc { + panic("you should not call this method.") +} + +// vecBuiltinFunc contains all vectorized methods for a builtin function. +type vecBuiltinFunc interface { + // vectorized returns if this builtin function itself supports vectorized evaluation. + vectorized() bool + + // isChildrenVectorized returns if its all children support vectorized evaluation. + isChildrenVectorized() bool + + // vecEvalInt evaluates this builtin function in a vectorized manner. + vecEvalInt(input *chunk.Chunk, result *chunk.Column) error + + // vecEvalReal evaluates this builtin function in a vectorized manner. + vecEvalReal(input *chunk.Chunk, result *chunk.Column) error + + // vecEvalString evaluates this builtin function in a vectorized manner. + vecEvalString(input *chunk.Chunk, result *chunk.Column) error + + // vecEvalDecimal evaluates this builtin function in a vectorized manner. + vecEvalDecimal(input *chunk.Chunk, result *chunk.Column) error + + // vecEvalTime evaluates this builtin function in a vectorized manner. + vecEvalTime(input *chunk.Chunk, result *chunk.Column) error + + // vecEvalDuration evaluates this builtin function in a vectorized manner. + vecEvalDuration(input *chunk.Chunk, result *chunk.Column) error + + // vecEvalJSON evaluates this builtin function in a vectorized manner. + vecEvalJSON(input *chunk.Chunk, result *chunk.Column) error +} + +// builtinFunc stands for a particular function signature. +type builtinFunc interface { + vecBuiltinFunc + + // evalInt evaluates int result of builtinFunc by given row. + evalInt(row chunk.Row) (val int64, isNull bool, err error) + // evalReal evaluates real representation of builtinFunc by given row. + evalReal(row chunk.Row) (val float64, isNull bool, err error) + // evalString evaluates string representation of builtinFunc by given row. + evalString(row chunk.Row) (val string, isNull bool, err error) + // getArgs returns the arguments expressions. + getArgs() []Expression + // equal check if this function equals to another function. + equal(builtinFunc) bool + // getCtx returns this function's context. + getCtx() sessionctx.Context + // getRetTp returns the return type of the built-in function. + getRetTp() *types.FieldType + // setPbCode sets pbCode for signature. + setPbCode(tipb.ScalarFuncSig) + // PbCode returns PbCode of this signature. + PbCode() tipb.ScalarFuncSig + // metadata returns the metadata of a function. + // metadata means some functions contain extra inner fields which will not + // contain in `tipb.Expr.children` but must be pushed down to coprocessor + metadata() proto.Message + // Clone returns a copy of itself. + Clone() builtinFunc +} + +// baseFunctionClass will be contained in every struct that implement functionClass interface. +type baseFunctionClass struct { + funcName string + minArgs int + maxArgs int +} + +func (b *baseFunctionClass) verifyArgs(args []Expression) error { + l := len(args) + if l < b.minArgs || (b.maxArgs != -1 && l > b.maxArgs) { + return ErrIncorrectParameterCount.GenWithStackByArgs(b.funcName) + } + return nil +} + +// functionClass is the interface for a function which may contains multiple functions. +type functionClass interface { + // getFunction gets a function signature by the types and the counts of given arguments. + getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) +} + +// funcs holds all registered builtin functions. When new function is added, +// check expression/function_traits.go to see if it should be appended to +// any set there. +var funcs = map[string]functionClass{ + // common functions + ast.IsNull: &isNullFunctionClass{baseFunctionClass{ast.IsNull, 1, 1}}, + + // string functions + ast.Length: &lengthFunctionClass{baseFunctionClass{ast.Length, 1, 1}}, + ast.OctetLength: &lengthFunctionClass{baseFunctionClass{ast.OctetLength, 1, 1}}, + ast.Strcmp: &strcmpFunctionClass{baseFunctionClass{ast.Strcmp, 2, 2}}, + + // control functions + ast.If: &ifFunctionClass{baseFunctionClass{ast.If, 3, 3}}, + ast.Ifnull: &ifNullFunctionClass{baseFunctionClass{ast.Ifnull, 2, 2}}, + + ast.LogicAnd: &logicAndFunctionClass{baseFunctionClass{ast.LogicAnd, 2, 2}}, + ast.LogicOr: &logicOrFunctionClass{baseFunctionClass{ast.LogicOr, 2, 2}}, + ast.GE: &compareFunctionClass{baseFunctionClass{ast.GE, 2, 2}, opcode.GE}, + ast.LE: &compareFunctionClass{baseFunctionClass{ast.LE, 2, 2}, opcode.LE}, + ast.EQ: &compareFunctionClass{baseFunctionClass{ast.EQ, 2, 2}, opcode.EQ}, + ast.NE: &compareFunctionClass{baseFunctionClass{ast.NE, 2, 2}, opcode.NE}, + ast.LT: &compareFunctionClass{baseFunctionClass{ast.LT, 2, 2}, opcode.LT}, + ast.GT: &compareFunctionClass{baseFunctionClass{ast.GT, 2, 2}, opcode.GT}, + ast.Plus: &arithmeticPlusFunctionClass{baseFunctionClass{ast.Plus, 2, 2}}, + ast.Minus: &arithmeticMinusFunctionClass{baseFunctionClass{ast.Minus, 2, 2}}, + ast.Div: &arithmeticDivideFunctionClass{baseFunctionClass{ast.Div, 2, 2}}, + ast.Mul: &arithmeticMultiplyFunctionClass{baseFunctionClass{ast.Mul, 2, 2}}, + ast.UnaryNot: &unaryNotFunctionClass{baseFunctionClass{ast.UnaryNot, 1, 1}}, + ast.UnaryMinus: &unaryMinusFunctionClass{baseFunctionClass{ast.UnaryMinus, 1, 1}}, + ast.In: &inFunctionClass{baseFunctionClass{ast.In, 2, -1}}, + ast.RowFunc: &rowFunctionClass{baseFunctionClass{ast.RowFunc, 2, -1}}, + ast.SetVar: &setVarFunctionClass{baseFunctionClass{ast.SetVar, 2, 2}}, + ast.GetVar: &getVarFunctionClass{baseFunctionClass{ast.GetVar, 1, 1}}, +} + +// IsFunctionSupported check if given function name is a builtin sql function. +func IsFunctionSupported(name string) bool { + _, ok := funcs[name] + return ok +} + +// GetBuiltinList returns a list of builtin functions +func GetBuiltinList() []string { + res := make([]string, 0, len(funcs)) + notImplementedFunctions := []string{ast.RowFunc} + for funcName := range funcs { + skipFunc := false + // Skip not implemented functions + for _, notImplFunc := range notImplementedFunctions { + if funcName == notImplFunc { + skipFunc = true + } + } + // Skip literal functions + // (their names are not readable: 'tidb`.(dateliteral, for example) + // See: https://github.com/pingcap/tidb/parser/pull/591 + if strings.HasPrefix(funcName, "'tidb`.(") { + skipFunc = true + } + if skipFunc { + continue + } + res = append(res, funcName) + } + sort.Strings(res) + return res +} diff --git a/expression/builtin_arithmetic.go b/expression/builtin_arithmetic.go new file mode 100644 index 0000000..ff883d9 --- /dev/null +++ b/expression/builtin_arithmetic.go @@ -0,0 +1,471 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "fmt" + "math" + + "github.com/cznic/mathutil" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tipb/go-tipb" +) + +var ( + _ functionClass = &arithmeticPlusFunctionClass{} + _ functionClass = &arithmeticMinusFunctionClass{} + _ functionClass = &arithmeticDivideFunctionClass{} + _ functionClass = &arithmeticMultiplyFunctionClass{} +) + +var ( + _ builtinFunc = &builtinArithmeticPlusRealSig{} + _ builtinFunc = &builtinArithmeticPlusIntSig{} + _ builtinFunc = &builtinArithmeticMinusRealSig{} + _ builtinFunc = &builtinArithmeticMinusIntSig{} + _ builtinFunc = &builtinArithmeticDivideRealSig{} + _ builtinFunc = &builtinArithmeticMultiplyRealSig{} + _ builtinFunc = &builtinArithmeticMultiplyIntUnsignedSig{} + _ builtinFunc = &builtinArithmeticMultiplyIntSig{} +) + +// numericContextResultType returns types.EvalType for numeric function's parameters. +// the returned types.EvalType should be one of: types.ETInt, types.ETDecimal, types.ETReal +func numericContextResultType(ft *types.FieldType) types.EvalType { + if types.IsBinaryStr(ft) { + return types.ETInt + } + evalTp4Ft := types.ETReal + if !ft.Hybrid() { + evalTp4Ft = ft.EvalType() + if evalTp4Ft != types.ETInt { + evalTp4Ft = types.ETReal + } + } + return evalTp4Ft +} + +// setFlenDecimal4Int is called to set proper `Flen` and `Decimal` of return +// type according to the two input parameter's types. +func setFlenDecimal4Int(retTp, a, b *types.FieldType) { + retTp.Decimal = 0 + retTp.Flen = mysql.MaxIntWidth +} + +// setFlenDecimal4RealOrDecimal is called to set proper `Flen` and `Decimal` of return +// type according to the two input parameter's types. +func setFlenDecimal4RealOrDecimal(retTp, a, b *types.FieldType, isReal bool, isMultiply bool) { + if a.Decimal != types.UnspecifiedLength && b.Decimal != types.UnspecifiedLength { + retTp.Decimal = a.Decimal + b.Decimal + if !isMultiply { + retTp.Decimal = mathutil.Max(a.Decimal, b.Decimal) + } + if !isReal && retTp.Decimal > mysql.MaxDecimalScale { + retTp.Decimal = mysql.MaxDecimalScale + } + if a.Flen == types.UnspecifiedLength || b.Flen == types.UnspecifiedLength { + retTp.Flen = types.UnspecifiedLength + return + } + digitsInt := mathutil.Max(a.Flen-a.Decimal, b.Flen-b.Decimal) + if isMultiply { + digitsInt = a.Flen - a.Decimal + b.Flen - b.Decimal + } + retTp.Flen = digitsInt + retTp.Decimal + 3 + if isReal { + retTp.Flen = mathutil.Min(retTp.Flen, mysql.MaxRealWidth) + return + } + retTp.Flen = mathutil.Min(retTp.Flen, mysql.MaxDecimalWidth) + return + } + if isReal { + retTp.Flen, retTp.Decimal = types.UnspecifiedLength, types.UnspecifiedLength + } else { + retTp.Flen, retTp.Decimal = mysql.MaxDecimalWidth, mysql.MaxDecimalScale + } +} + +func (c *arithmeticDivideFunctionClass) setType4DivReal(retTp *types.FieldType) { + retTp.Decimal = types.UnspecifiedLength + retTp.Flen = mysql.MaxRealWidth +} + +type arithmeticPlusFunctionClass struct { + baseFunctionClass +} + +func (c *arithmeticPlusFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) { + if err := c.verifyArgs(args); err != nil { + return nil, err + } + lhsTp, rhsTp := args[0].GetType(), args[1].GetType() + lhsEvalTp, rhsEvalTp := numericContextResultType(lhsTp), numericContextResultType(rhsTp) + if lhsEvalTp == types.ETReal || rhsEvalTp == types.ETReal { + bf := newBaseBuiltinFuncWithTp(ctx, args, types.ETReal, types.ETReal, types.ETReal) + setFlenDecimal4RealOrDecimal(bf.tp, args[0].GetType(), args[1].GetType(), true, false) + sig := &builtinArithmeticPlusRealSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_PlusReal) + return sig, nil + } + bf := newBaseBuiltinFuncWithTp(ctx, args, types.ETInt, types.ETInt, types.ETInt) + if mysql.HasUnsignedFlag(args[0].GetType().Flag) || mysql.HasUnsignedFlag(args[1].GetType().Flag) { + bf.tp.Flag |= mysql.UnsignedFlag + } + setFlenDecimal4Int(bf.tp, args[0].GetType(), args[1].GetType()) + sig := &builtinArithmeticPlusIntSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_PlusInt) + return sig, nil +} + +type builtinArithmeticPlusIntSig struct { + baseBuiltinFunc +} + +func (s *builtinArithmeticPlusIntSig) Clone() builtinFunc { + newSig := &builtinArithmeticPlusIntSig{} + newSig.cloneFrom(&s.baseBuiltinFunc) + return newSig +} + +func (s *builtinArithmeticPlusIntSig) evalInt(row chunk.Row) (val int64, isNull bool, err error) { + a, isNull, err := s.args[0].EvalInt(s.ctx, row) + if isNull || err != nil { + return 0, isNull, err + } + + b, isNull, err := s.args[1].EvalInt(s.ctx, row) + if isNull || err != nil { + return 0, isNull, err + } + + isLHSUnsigned := mysql.HasUnsignedFlag(s.args[0].GetType().Flag) + isRHSUnsigned := mysql.HasUnsignedFlag(s.args[1].GetType().Flag) + + switch { + case isLHSUnsigned && isRHSUnsigned: + if uint64(a) > math.MaxUint64-uint64(b) { + return 0, true, types.ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%s + %s)", s.args[0].String(), s.args[1].String())) + } + case isLHSUnsigned && !isRHSUnsigned: + if b < 0 && uint64(-b) > uint64(a) { + return 0, true, types.ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%s + %s)", s.args[0].String(), s.args[1].String())) + } + if b > 0 && uint64(a) > math.MaxUint64-uint64(b) { + return 0, true, types.ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%s + %s)", s.args[0].String(), s.args[1].String())) + } + case !isLHSUnsigned && isRHSUnsigned: + if a < 0 && uint64(-a) > uint64(b) { + return 0, true, types.ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%s + %s)", s.args[0].String(), s.args[1].String())) + } + if a > 0 && uint64(b) > math.MaxUint64-uint64(a) { + return 0, true, types.ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%s + %s)", s.args[0].String(), s.args[1].String())) + } + case !isLHSUnsigned && !isRHSUnsigned: + if (a > 0 && b > math.MaxInt64-a) || (a < 0 && b < math.MinInt64-a) { + return 0, true, types.ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%s + %s)", s.args[0].String(), s.args[1].String())) + } + } + + return a + b, false, nil +} + +type builtinArithmeticPlusRealSig struct { + baseBuiltinFunc +} + +func (s *builtinArithmeticPlusRealSig) Clone() builtinFunc { + newSig := &builtinArithmeticPlusRealSig{} + newSig.cloneFrom(&s.baseBuiltinFunc) + return newSig +} + +func (s *builtinArithmeticPlusRealSig) evalReal(row chunk.Row) (float64, bool, error) { + a, isNull, err := s.args[0].EvalReal(s.ctx, row) + if isNull || err != nil { + return 0, isNull, err + } + b, isNull, err := s.args[1].EvalReal(s.ctx, row) + if isNull || err != nil { + return 0, isNull, err + } + if (a > 0 && b > math.MaxFloat64-a) || (a < 0 && b < -math.MaxFloat64-a) { + return 0, true, types.ErrOverflow.GenWithStackByArgs("DOUBLE", fmt.Sprintf("(%s + %s)", s.args[0].String(), s.args[1].String())) + } + return a + b, false, nil +} + +type arithmeticMinusFunctionClass struct { + baseFunctionClass +} + +func (c *arithmeticMinusFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) { + if err := c.verifyArgs(args); err != nil { + return nil, err + } + lhsTp, rhsTp := args[0].GetType(), args[1].GetType() + lhsEvalTp, rhsEvalTp := numericContextResultType(lhsTp), numericContextResultType(rhsTp) + if lhsEvalTp == types.ETReal || rhsEvalTp == types.ETReal { + bf := newBaseBuiltinFuncWithTp(ctx, args, types.ETReal, types.ETReal, types.ETReal) + setFlenDecimal4RealOrDecimal(bf.tp, args[0].GetType(), args[1].GetType(), true, false) + sig := &builtinArithmeticMinusRealSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_MinusReal) + return sig, nil + } + bf := newBaseBuiltinFuncWithTp(ctx, args, types.ETInt, types.ETInt, types.ETInt) + setFlenDecimal4Int(bf.tp, args[0].GetType(), args[1].GetType()) + if (mysql.HasUnsignedFlag(args[0].GetType().Flag) || mysql.HasUnsignedFlag(args[1].GetType().Flag)) && !ctx.GetSessionVars().SQLMode.HasNoUnsignedSubtractionMode() { + bf.tp.Flag |= mysql.UnsignedFlag + } + sig := &builtinArithmeticMinusIntSig{baseBuiltinFunc: bf} + sig.setPbCode(tipb.ScalarFuncSig_MinusInt) + return sig, nil +} + +type builtinArithmeticMinusRealSig struct { + baseBuiltinFunc +} + +func (s *builtinArithmeticMinusRealSig) Clone() builtinFunc { + newSig := &builtinArithmeticMinusRealSig{} + newSig.cloneFrom(&s.baseBuiltinFunc) + return newSig +} + +func (s *builtinArithmeticMinusRealSig) evalReal(row chunk.Row) (float64, bool, error) { + a, isNull, err := s.args[0].EvalReal(s.ctx, row) + if isNull || err != nil { + return 0, isNull, err + } + b, isNull, err := s.args[1].EvalReal(s.ctx, row) + if isNull || err != nil { + return 0, isNull, err + } + if (a > 0 && -b > math.MaxFloat64-a) || (a < 0 && -b < -math.MaxFloat64-a) { + return 0, true, types.ErrOverflow.GenWithStackByArgs("DOUBLE", fmt.Sprintf("(%s - %s)", s.args[0].String(), s.args[1].String())) + } + return a - b, false, nil +} + +type builtinArithmeticMinusIntSig struct { + baseBuiltinFunc +} + +func (s *builtinArithmeticMinusIntSig) Clone() builtinFunc { + newSig := &builtinArithmeticMinusIntSig{} + newSig.cloneFrom(&s.baseBuiltinFunc) + return newSig +} + +func (s *builtinArithmeticMinusIntSig) evalInt(row chunk.Row) (val int64, isNull bool, err error) { + a, isNull, err := s.args[0].EvalInt(s.ctx, row) + if isNull || err != nil { + return 0, isNull, err + } + + b, isNull, err := s.args[1].EvalInt(s.ctx, row) + if isNull || err != nil { + return 0, isNull, err + } + forceToSigned := s.ctx.GetSessionVars().SQLMode.HasNoUnsignedSubtractionMode() + isLHSUnsigned := !forceToSigned && mysql.HasUnsignedFlag(s.args[0].GetType().Flag) + isRHSUnsigned := !forceToSigned && mysql.HasUnsignedFlag(s.args[1].GetType().Flag) + + if forceToSigned && mysql.HasUnsignedFlag(s.args[0].GetType().Flag) { + if a < 0 || (a > math.MaxInt64) { + return 0, true, types.ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%s - %s)", s.args[0].String(), s.args[1].String())) + } + } + if forceToSigned && mysql.HasUnsignedFlag(s.args[1].GetType().Flag) { + if b < 0 || (b > math.MaxInt64) { + return 0, true, types.ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%s - %s)", s.args[0].String(), s.args[1].String())) + } + } + + switch { + case isLHSUnsigned && isRHSUnsigned: + if uint64(a) < uint64(b) { + return 0, true, types.ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%s - %s)", s.args[0].String(), s.args[1].String())) + } + case isLHSUnsigned && !isRHSUnsigned: + if b >= 0 && uint64(a) < uint64(b) { + return 0, true, types.ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%s - %s)", s.args[0].String(), s.args[1].String())) + } + if b < 0 && uint64(a) > math.MaxUint64-uint64(-b) { + return 0, true, types.ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%s - %s)", s.args[0].String(), s.args[1].String())) + } + case !isLHSUnsigned && isRHSUnsigned: + if a < 0 || uint64(a) < uint64(b) { + return 0, true, types.ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%s - %s)", s.args[0].String(), s.args[1].String())) + } + case !isLHSUnsigned && !isRHSUnsigned: + // We need `(a >= 0 && b == math.MinInt64)` due to `-(math.MinInt64) == math.MinInt64`. + // If `a<0 && b<=0`: `a-b` will not overflow even though b==math.MinInt64. + // If `a<0 && b>0`: `a-b` will not overflow only if `math.MinInt64<=a-b` satisfied + if (a >= 0 && b == math.MinInt64) || (a > 0 && -b > math.MaxInt64-a) || (a < 0 && -b < math.MinInt64-a) { + return 0, true, types.ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%s - %s)", s.args[0].String(), s.args[1].String())) + } + } + return a - b, false, nil +} + +type arithmeticMultiplyFunctionClass struct { + baseFunctionClass +} + +func (c *arithmeticMultiplyFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) { + if err := c.verifyArgs(args); err != nil { + return nil, err + } + lhsTp, rhsTp := args[0].GetType(), args[1].GetType() + lhsEvalTp, rhsEvalTp := numericContextResultType(lhsTp), numericContextResultType(rhsTp) + if lhsEvalTp == types.ETReal || rhsEvalTp == types.ETReal { + bf := newBaseBuiltinFuncWithTp(ctx, args, types.ETReal, types.ETReal, types.ETReal) + setFlenDecimal4RealOrDecimal(bf.tp, args[0].GetType(), args[1].GetType(), true, true) + sig := &builtinArithmeticMultiplyRealSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_MultiplyReal) + return sig, nil + } + bf := newBaseBuiltinFuncWithTp(ctx, args, types.ETInt, types.ETInt, types.ETInt) + if mysql.HasUnsignedFlag(lhsTp.Flag) || mysql.HasUnsignedFlag(rhsTp.Flag) { + bf.tp.Flag |= mysql.UnsignedFlag + setFlenDecimal4Int(bf.tp, args[0].GetType(), args[1].GetType()) + sig := &builtinArithmeticMultiplyIntUnsignedSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_MultiplyIntUnsigned) + return sig, nil + } + setFlenDecimal4Int(bf.tp, args[0].GetType(), args[1].GetType()) + sig := &builtinArithmeticMultiplyIntSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_MultiplyInt) + return sig, nil +} + +type builtinArithmeticMultiplyRealSig struct{ baseBuiltinFunc } + +func (s *builtinArithmeticMultiplyRealSig) Clone() builtinFunc { + newSig := &builtinArithmeticMultiplyRealSig{} + newSig.cloneFrom(&s.baseBuiltinFunc) + return newSig +} + +type builtinArithmeticMultiplyIntUnsignedSig struct{ baseBuiltinFunc } + +func (s *builtinArithmeticMultiplyIntUnsignedSig) Clone() builtinFunc { + newSig := &builtinArithmeticMultiplyIntUnsignedSig{} + newSig.cloneFrom(&s.baseBuiltinFunc) + return newSig +} + +type builtinArithmeticMultiplyIntSig struct{ baseBuiltinFunc } + +func (s *builtinArithmeticMultiplyIntSig) Clone() builtinFunc { + newSig := &builtinArithmeticMultiplyIntSig{} + newSig.cloneFrom(&s.baseBuiltinFunc) + return newSig +} + +func (s *builtinArithmeticMultiplyRealSig) evalReal(row chunk.Row) (float64, bool, error) { + a, isNull, err := s.args[0].EvalReal(s.ctx, row) + if isNull || err != nil { + return 0, isNull, err + } + b, isNull, err := s.args[1].EvalReal(s.ctx, row) + if isNull || err != nil { + return 0, isNull, err + } + result := a * b + if math.IsInf(result, 0) { + return 0, true, types.ErrOverflow.GenWithStackByArgs("DOUBLE", fmt.Sprintf("(%s * %s)", s.args[0].String(), s.args[1].String())) + } + return result, false, nil +} + +func (s *builtinArithmeticMultiplyIntUnsignedSig) evalInt(row chunk.Row) (val int64, isNull bool, err error) { + a, isNull, err := s.args[0].EvalInt(s.ctx, row) + if isNull || err != nil { + return 0, isNull, err + } + unsignedA := uint64(a) + b, isNull, err := s.args[1].EvalInt(s.ctx, row) + if isNull || err != nil { + return 0, isNull, err + } + unsignedB := uint64(b) + result := unsignedA * unsignedB + if unsignedA != 0 && result/unsignedA != unsignedB { + return 0, true, types.ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%s * %s)", s.args[0].String(), s.args[1].String())) + } + return int64(result), false, nil +} + +func (s *builtinArithmeticMultiplyIntSig) evalInt(row chunk.Row) (val int64, isNull bool, err error) { + a, isNull, err := s.args[0].EvalInt(s.ctx, row) + if isNull || err != nil { + return 0, isNull, err + } + b, isNull, err := s.args[1].EvalInt(s.ctx, row) + if isNull || err != nil { + return 0, isNull, err + } + result := a * b + if a != 0 && result/a != b { + return 0, true, types.ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%s * %s)", s.args[0].String(), s.args[1].String())) + } + return result, false, nil +} + +type arithmeticDivideFunctionClass struct { + baseFunctionClass +} + +func (c *arithmeticDivideFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) { + if err := c.verifyArgs(args); err != nil { + return nil, err + } + bf := newBaseBuiltinFuncWithTp(ctx, args, types.ETReal, types.ETReal, types.ETReal) + c.setType4DivReal(bf.tp) + sig := &builtinArithmeticDivideRealSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_DivideReal) + return sig, nil +} + +type builtinArithmeticDivideRealSig struct{ baseBuiltinFunc } + +func (s *builtinArithmeticDivideRealSig) Clone() builtinFunc { + newSig := &builtinArithmeticDivideRealSig{} + newSig.cloneFrom(&s.baseBuiltinFunc) + return newSig +} + +func (s *builtinArithmeticDivideRealSig) evalReal(row chunk.Row) (float64, bool, error) { + a, isNull, err := s.args[0].EvalReal(s.ctx, row) + if isNull || err != nil { + return 0, isNull, err + } + b, isNull, err := s.args[1].EvalReal(s.ctx, row) + if isNull || err != nil { + return 0, isNull, err + } + if b == 0 { + return 0, true, handleDivisionByZeroError(s.ctx) + } + result := a / b + if math.IsInf(result, 0) { + return 0, true, types.ErrOverflow.GenWithStackByArgs("DOUBLE", fmt.Sprintf("(%s / %s)", s.args[0].String(), s.args[1].String())) + } + return result, false, nil +} diff --git a/expression/builtin_arithmetic_test.go b/expression/builtin_arithmetic_test.go new file mode 100644 index 0000000..569adcc --- /dev/null +++ b/expression/builtin_arithmetic_test.go @@ -0,0 +1,293 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/testutil" +) + +func (s *testEvaluatorSuite) TestSetFlenDecimal4RealOrDecimal(c *C) { + ret := &types.FieldType{} + a := &types.FieldType{ + Decimal: 1, + Flen: 3, + } + b := &types.FieldType{ + Decimal: 0, + Flen: 2, + } + setFlenDecimal4RealOrDecimal(ret, a, b, true, false) + c.Assert(ret.Decimal, Equals, 1) + c.Assert(ret.Flen, Equals, 6) + + b.Flen = 65 + setFlenDecimal4RealOrDecimal(ret, a, b, true, false) + c.Assert(ret.Decimal, Equals, 1) + c.Assert(ret.Flen, Equals, mysql.MaxRealWidth) + setFlenDecimal4RealOrDecimal(ret, a, b, false, false) + c.Assert(ret.Decimal, Equals, 1) + c.Assert(ret.Flen, Equals, mysql.MaxDecimalWidth) + + b.Flen = types.UnspecifiedLength + setFlenDecimal4RealOrDecimal(ret, a, b, true, false) + c.Assert(ret.Decimal, Equals, 1) + c.Assert(ret.Flen, Equals, types.UnspecifiedLength) + + b.Decimal = types.UnspecifiedLength + setFlenDecimal4RealOrDecimal(ret, a, b, true, false) + c.Assert(ret.Decimal, Equals, types.UnspecifiedLength) + c.Assert(ret.Flen, Equals, types.UnspecifiedLength) + + ret = &types.FieldType{} + a = &types.FieldType{ + Decimal: 1, + Flen: 3, + } + b = &types.FieldType{ + Decimal: 0, + Flen: 2, + } + setFlenDecimal4RealOrDecimal(ret, a, b, true, true) + c.Assert(ret.Decimal, Equals, 1) + c.Assert(ret.Flen, Equals, 8) + + b.Flen = 65 + setFlenDecimal4RealOrDecimal(ret, a, b, true, true) + c.Assert(ret.Decimal, Equals, 1) + c.Assert(ret.Flen, Equals, mysql.MaxRealWidth) + setFlenDecimal4RealOrDecimal(ret, a, b, false, true) + c.Assert(ret.Decimal, Equals, 1) + c.Assert(ret.Flen, Equals, mysql.MaxDecimalWidth) + + b.Flen = types.UnspecifiedLength + setFlenDecimal4RealOrDecimal(ret, a, b, true, true) + c.Assert(ret.Decimal, Equals, 1) + c.Assert(ret.Flen, Equals, types.UnspecifiedLength) + + b.Decimal = types.UnspecifiedLength + setFlenDecimal4RealOrDecimal(ret, a, b, true, true) + c.Assert(ret.Decimal, Equals, types.UnspecifiedLength) + c.Assert(ret.Flen, Equals, types.UnspecifiedLength) +} + +func (s *testEvaluatorSuite) TestSetFlenDecimal4Int(c *C) { + ret := &types.FieldType{} + a := &types.FieldType{ + Decimal: 1, + Flen: 3, + } + b := &types.FieldType{ + Decimal: 0, + Flen: 2, + } + setFlenDecimal4Int(ret, a, b) + c.Assert(ret.Decimal, Equals, 0) + c.Assert(ret.Flen, Equals, mysql.MaxIntWidth) + + b.Flen = mysql.MaxIntWidth + 1 + setFlenDecimal4Int(ret, a, b) + c.Assert(ret.Decimal, Equals, 0) + c.Assert(ret.Flen, Equals, mysql.MaxIntWidth) + + b.Flen = types.UnspecifiedLength + setFlenDecimal4Int(ret, a, b) + c.Assert(ret.Decimal, Equals, 0) + c.Assert(ret.Flen, Equals, mysql.MaxIntWidth) +} + +func (s *testEvaluatorSuite) TestArithmeticPlus(c *C) { + // case: 1 + args := []interface{}{int64(12), int64(1)} + + bf, err := funcs[ast.Plus].getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(args...))) + c.Assert(err, IsNil) + c.Assert(bf, NotNil) + intSig, ok := bf.(*builtinArithmeticPlusIntSig) + c.Assert(ok, IsTrue) + c.Assert(intSig, NotNil) + + intResult, isNull, err := intSig.evalInt(chunk.Row{}) + c.Assert(err, IsNil) + c.Assert(isNull, IsFalse) + c.Assert(intResult, Equals, int64(13)) + + // case 2 + args = []interface{}{float64(1.01001), float64(-0.01)} + + bf, err = funcs[ast.Plus].getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(args...))) + c.Assert(err, IsNil) + c.Assert(bf, NotNil) + realSig, ok := bf.(*builtinArithmeticPlusRealSig) + c.Assert(ok, IsTrue) + c.Assert(realSig, NotNil) + + realResult, isNull, err := realSig.evalReal(chunk.Row{}) + c.Assert(err, IsNil) + c.Assert(isNull, IsFalse) + c.Assert(realResult, Equals, float64(1.00001)) + + // case 3 + args = []interface{}{nil, float64(-0.11101)} + + bf, err = funcs[ast.Plus].getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(args...))) + c.Assert(err, IsNil) + c.Assert(bf, NotNil) + realSig, ok = bf.(*builtinArithmeticPlusRealSig) + c.Assert(ok, IsTrue) + c.Assert(realSig, NotNil) + + realResult, isNull, err = realSig.evalReal(chunk.Row{}) + c.Assert(err, IsNil) + c.Assert(isNull, IsTrue) + c.Assert(realResult, Equals, float64(0)) + + // case 4 + args = []interface{}{nil, nil} + + bf, err = funcs[ast.Plus].getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(args...))) + c.Assert(err, IsNil) + c.Assert(bf, NotNil) + realSig, ok = bf.(*builtinArithmeticPlusRealSig) + c.Assert(ok, IsTrue) + c.Assert(realSig, NotNil) + + realResult, isNull, err = realSig.evalReal(chunk.Row{}) + c.Assert(err, IsNil) + c.Assert(isNull, IsTrue) + c.Assert(realResult, Equals, float64(0)) +} + +func (s *testEvaluatorSuite) TestArithmeticMinus(c *C) { + // case: 1 + args := []interface{}{int64(12), int64(1)} + + bf, err := funcs[ast.Minus].getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(args...))) + c.Assert(err, IsNil) + c.Assert(bf, NotNil) + intSig, ok := bf.(*builtinArithmeticMinusIntSig) + c.Assert(ok, IsTrue) + c.Assert(intSig, NotNil) + + intResult, isNull, err := intSig.evalInt(chunk.Row{}) + c.Assert(err, IsNil) + c.Assert(isNull, IsFalse) + c.Assert(intResult, Equals, int64(11)) + + // case 2 + args = []interface{}{float64(1.01001), float64(-0.01)} + + bf, err = funcs[ast.Minus].getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(args...))) + c.Assert(err, IsNil) + c.Assert(bf, NotNil) + realSig, ok := bf.(*builtinArithmeticMinusRealSig) + c.Assert(ok, IsTrue) + c.Assert(realSig, NotNil) + + realResult, isNull, err := realSig.evalReal(chunk.Row{}) + c.Assert(err, IsNil) + c.Assert(isNull, IsFalse) + c.Assert(realResult, Equals, float64(1.02001)) + + // case 3 + args = []interface{}{nil, float64(-0.11101)} + + bf, err = funcs[ast.Minus].getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(args...))) + c.Assert(err, IsNil) + c.Assert(bf, NotNil) + realSig, ok = bf.(*builtinArithmeticMinusRealSig) + c.Assert(ok, IsTrue) + c.Assert(realSig, NotNil) + + realResult, isNull, err = realSig.evalReal(chunk.Row{}) + c.Assert(err, IsNil) + c.Assert(isNull, IsTrue) + c.Assert(realResult, Equals, float64(0)) + + // case 4 + args = []interface{}{float64(1.01), nil} + + bf, err = funcs[ast.Minus].getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(args...))) + c.Assert(err, IsNil) + c.Assert(bf, NotNil) + realSig, ok = bf.(*builtinArithmeticMinusRealSig) + c.Assert(ok, IsTrue) + c.Assert(realSig, NotNil) + + realResult, isNull, err = realSig.evalReal(chunk.Row{}) + c.Assert(err, IsNil) + c.Assert(isNull, IsTrue) + c.Assert(realResult, Equals, float64(0)) + + // case 5 + args = []interface{}{nil, nil} + + bf, err = funcs[ast.Minus].getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(args...))) + c.Assert(err, IsNil) + c.Assert(bf, NotNil) + realSig, ok = bf.(*builtinArithmeticMinusRealSig) + c.Assert(ok, IsTrue) + c.Assert(realSig, NotNil) + + realResult, isNull, err = realSig.evalReal(chunk.Row{}) + c.Assert(err, IsNil) + c.Assert(isNull, IsTrue) + c.Assert(realResult, Equals, float64(0)) +} + +func (s *testEvaluatorSuite) TestArithmeticMultiply(c *C) { + testCases := []struct { + args []interface{} + expect interface{} + err error + }{ + { + args: []interface{}{int64(11), int64(11)}, + expect: int64(121), + }, + { + args: []interface{}{uint64(11), uint64(11)}, + expect: int64(121), + }, + { + args: []interface{}{float64(11), float64(11)}, + expect: float64(121), + }, + { + args: []interface{}{nil, float64(-0.11101)}, + expect: nil, + }, + { + args: []interface{}{float64(1.01), nil}, + expect: nil, + }, + { + args: []interface{}{nil, nil}, + expect: nil, + }, + } + + for _, tc := range testCases { + sig, err := funcs[ast.Mul].getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(tc.args...))) + c.Assert(err, IsNil) + c.Assert(sig, NotNil) + val, err := evalBuiltinFunc(sig, chunk.Row{}) + c.Assert(err, IsNil) + c.Assert(val, testutil.DatumEquals, types.NewDatum(tc.expect)) + } +} diff --git a/expression/builtin_arithmetic_vec.go b/expression/builtin_arithmetic_vec.go new file mode 100644 index 0000000..580b3e2 --- /dev/null +++ b/expression/builtin_arithmetic_vec.go @@ -0,0 +1,532 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "fmt" + "math" + + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" +) + +func (b *builtinArithmeticMultiplyRealSig) vectorized() bool { + return true +} + +func (b *builtinArithmeticMultiplyRealSig) vecEvalReal(input *chunk.Chunk, result *chunk.Column) error { + if err := b.args[0].VecEvalReal(b.ctx, input, result); err != nil { + return err + } + n := input.NumRows() + buf, err := b.bufAllocator.get(types.ETReal, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf) + if err := b.args[1].VecEvalReal(b.ctx, input, buf); err != nil { + return err + } + + result.MergeNulls(buf) + x := result.Float64s() + y := buf.Float64s() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + x[i] = x[i] * y[i] + if math.IsInf(x[i], 0) { + return types.ErrOverflow.GenWithStackByArgs("DOUBLE", fmt.Sprintf("(%s * %s)", b.args[0].String(), b.args[1].String())) + } + } + return nil +} + +func (b *builtinArithmeticMinusRealSig) vectorized() bool { + return true +} + +func (b *builtinArithmeticMinusRealSig) vecEvalReal(input *chunk.Chunk, result *chunk.Column) error { + if err := b.args[0].VecEvalReal(b.ctx, input, result); err != nil { + return err + } + n := input.NumRows() + buf, err := b.bufAllocator.get(types.ETReal, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf) + if err := b.args[1].VecEvalReal(b.ctx, input, buf); err != nil { + return err + } + + result.MergeNulls(buf) + x := result.Float64s() + y := buf.Float64s() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + if (x[i] > 0 && -y[i] > math.MaxFloat64-x[i]) || (x[i] < 0 && -y[i] < -math.MaxFloat64-x[i]) { + return types.ErrOverflow.GenWithStackByArgs("DOUBLE", fmt.Sprintf("(%s - %s)", b.args[0].String(), b.args[1].String())) + } + x[i] = x[i] - y[i] + } + return nil +} + +func (b *builtinArithmeticMinusIntSig) vectorized() bool { + return true +} + +func (b *builtinArithmeticMinusIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + lh, err := b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(lh) + + if err := b.args[0].VecEvalInt(b.ctx, input, lh); err != nil { + return err + } + + if err := b.args[1].VecEvalInt(b.ctx, input, result); err != nil { + return err + } + + result.MergeNulls(lh) + + rh := result + lhi64s := lh.Int64s() + rhi64s := rh.Int64s() + resulti64s := result.Int64s() + + forceToSigned := b.ctx.GetSessionVars().SQLMode.HasNoUnsignedSubtractionMode() + isLHSUnsigned := mysql.HasUnsignedFlag(b.args[0].GetType().Flag) + isRHSUnsigned := mysql.HasUnsignedFlag(b.args[1].GetType().Flag) + + switch { + case forceToSigned && isLHSUnsigned && isRHSUnsigned: + err = b.minusFUU(result, lhi64s, rhi64s, resulti64s) + case forceToSigned && isLHSUnsigned && !isRHSUnsigned: + err = b.minusFUS(result, lhi64s, rhi64s, resulti64s) + case forceToSigned && !isLHSUnsigned && isRHSUnsigned: + err = b.minusFSU(result, lhi64s, rhi64s, resulti64s) + case forceToSigned && !isLHSUnsigned && !isRHSUnsigned: + err = b.minusSS(result, lhi64s, rhi64s, resulti64s) + case !forceToSigned && isLHSUnsigned && isRHSUnsigned: + err = b.minusUU(result, lhi64s, rhi64s, resulti64s) + case !forceToSigned && isLHSUnsigned && !isRHSUnsigned: + err = b.minusUS(result, lhi64s, rhi64s, resulti64s) + case !forceToSigned && !isLHSUnsigned && isRHSUnsigned: + err = b.minusSU(result, lhi64s, rhi64s, resulti64s) + case !forceToSigned && !isLHSUnsigned && !isRHSUnsigned: + err = b.minusSS(result, lhi64s, rhi64s, resulti64s) + } + return err +} +func (b *builtinArithmeticMinusIntSig) minusFUU(result *chunk.Column, lhi64s, rhi64s, resulti64s []int64) error { + for i := 0; i < len(lhi64s); i++ { + if result.IsNull(i) { + continue + } + lh, rh := lhi64s[i], rhi64s[i] + + if lh < 0 || (lh > math.MaxInt64) { + return types.ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%s - %s)", b.args[0].String(), b.args[1].String())) + } + + if rh < 0 || (rh > math.MaxInt64) { + return types.ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%s - %s)", b.args[0].String(), b.args[1].String())) + } + + if (lh > 0 && -rh > math.MaxInt64-lh) || (lh < 0 && -rh < math.MinInt64-lh) { + return types.ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%s + %s)", b.args[0].String(), b.args[1].String())) + } + + resulti64s[i] = lh - rh + } + return nil +} + +func (b *builtinArithmeticMinusIntSig) minusFUS(result *chunk.Column, lhi64s, rhi64s, resulti64s []int64) error { + for i := 0; i < len(lhi64s); i++ { + if result.IsNull(i) { + continue + } + lh, rh := lhi64s[i], rhi64s[i] + + if lh < 0 || (lh > math.MaxInt64) { + return types.ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%s - %s)", b.args[0].String(), b.args[1].String())) + } + + if (lh > 0 && -rh > math.MaxInt64-lh) || (lh < 0 && -rh < math.MinInt64-lh) { + return types.ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%s + %s)", b.args[0].String(), b.args[1].String())) + } + + resulti64s[i] = lh - rh + } + return nil +} + +func (b *builtinArithmeticMinusIntSig) minusFSU(result *chunk.Column, lhi64s, rhi64s, resulti64s []int64) error { + for i := 0; i < len(lhi64s); i++ { + if result.IsNull(i) { + continue + } + lh, rh := lhi64s[i], rhi64s[i] + + if rh < 0 || (rh > math.MaxInt64) { + return types.ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%s - %s)", b.args[0].String(), b.args[1].String())) + } + + if (lh > 0 && -rh > math.MaxInt64-lh) || (lh < 0 && -rh < math.MinInt64-lh) { + return types.ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%s + %s)", b.args[0].String(), b.args[1].String())) + } + + resulti64s[i] = lh - rh + } + return nil +} +func (b *builtinArithmeticMinusIntSig) minusUU(result *chunk.Column, lhi64s, rhi64s, resulti64s []int64) error { + for i := 0; i < len(lhi64s); i++ { + if result.IsNull(i) { + continue + } + lh, rh := lhi64s[i], rhi64s[i] + + if uint64(lh) < uint64(rh) { + return types.ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%s - %s)", b.args[0].String(), b.args[1].String())) + } + + resulti64s[i] = lh - rh + } + return nil +} + +func (b *builtinArithmeticMinusIntSig) minusUS(result *chunk.Column, lhi64s, rhi64s, resulti64s []int64) error { + for i := 0; i < len(lhi64s); i++ { + if result.IsNull(i) { + continue + } + lh, rh := lhi64s[i], rhi64s[i] + + if rh >= 0 && uint64(lh) < uint64(rh) { + return types.ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%s - %s)", b.args[0].String(), b.args[1].String())) + } + if rh < 0 && uint64(lh) > math.MaxUint64-uint64(-rh) { + return types.ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%s - %s)", b.args[0].String(), b.args[1].String())) + } + + resulti64s[i] = lh - rh + } + return nil +} + +func (b *builtinArithmeticMinusIntSig) minusSU(result *chunk.Column, lhi64s, rhi64s, resulti64s []int64) error { + for i := 0; i < len(lhi64s); i++ { + if result.IsNull(i) { + continue + } + lh, rh := lhi64s[i], rhi64s[i] + + if uint64(lh-math.MinInt64) < uint64(rh) { + return types.ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%s - %s)", b.args[0].String(), b.args[1].String())) + } + + resulti64s[i] = lh - rh + } + return nil +} +func (b *builtinArithmeticMinusIntSig) minusSS(result *chunk.Column, lhi64s, rhi64s, resulti64s []int64) error { + for i := 0; i < len(lhi64s); i++ { + if result.IsNull(i) { + continue + } + lh, rh := lhi64s[i], rhi64s[i] + + if (lh > 0 && -rh > math.MaxInt64-lh) || (lh < 0 && -rh < math.MinInt64-lh) { + return types.ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%s - %s)", b.args[0].String(), b.args[1].String())) + } + + resulti64s[i] = lh - rh + } + return nil +} + +func (b *builtinArithmeticPlusRealSig) vectorized() bool { + return true +} + +func (b *builtinArithmeticPlusRealSig) vecEvalReal(input *chunk.Chunk, result *chunk.Column) error { + if err := b.args[0].VecEvalReal(b.ctx, input, result); err != nil { + return err + } + n := input.NumRows() + buf, err := b.bufAllocator.get(types.ETReal, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf) + if err := b.args[1].VecEvalReal(b.ctx, input, buf); err != nil { + return err + } + + result.MergeNulls(buf) + x := result.Float64s() + y := buf.Float64s() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + if (x[i] > 0 && y[i] > math.MaxFloat64-x[i]) || (x[i] < 0 && y[i] < -math.MaxFloat64-x[i]) { + return types.ErrOverflow.GenWithStackByArgs("DOUBLE", fmt.Sprintf("(%s + %s)", b.args[0].String(), b.args[1].String())) + } + x[i] = x[i] + y[i] + } + return nil +} + +func (b *builtinArithmeticMultiplyIntSig) vectorized() bool { + return true +} + +func (b *builtinArithmeticMultiplyIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + if err := b.args[0].VecEvalInt(b.ctx, input, result); err != nil { + return err + } + n := input.NumRows() + buf, err := b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf) + + if err := b.args[1].VecEvalInt(b.ctx, input, buf); err != nil { + return err + } + + x := result.Int64s() + y := buf.Int64s() + result.MergeNulls(buf) + var tmp int64 + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + + tmp = x[i] * y[i] + if x[i] != 0 && tmp/x[i] != y[i] { + result.SetNull(i, true) + return types.ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%s * %s)", b.args[0].String(), b.args[1].String())) + } + + x[i] = tmp + } + + return nil +} + +func (b *builtinArithmeticDivideRealSig) vectorized() bool { + return true +} + +func (b *builtinArithmeticDivideRealSig) vecEvalReal(input *chunk.Chunk, result *chunk.Column) error { + if err := b.args[0].VecEvalReal(b.ctx, input, result); err != nil { + return err + } + n := input.NumRows() + buf, err := b.bufAllocator.get(types.ETReal, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf) + if err := b.args[1].VecEvalReal(b.ctx, input, buf); err != nil { + return err + } + + result.MergeNulls(buf) + x := result.Float64s() + y := buf.Float64s() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + if y[i] == 0 { + if err := handleDivisionByZeroError(b.ctx); err != nil { + return err + } + result.SetNull(i, true) + continue + } + + x[i] = x[i] / y[i] + if math.IsInf(x[i], 0) { + return types.ErrOverflow.GenWithStackByArgs("DOUBLE", fmt.Sprintf("(%s / %s)", b.args[0].String(), b.args[1].String())) + } + } + return nil +} + +func (b *builtinArithmeticPlusIntSig) vectorized() bool { + return true +} + +func (b *builtinArithmeticPlusIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + lh, err := b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(lh) + + if err := b.args[0].VecEvalInt(b.ctx, input, lh); err != nil { + return err + } + + // reuse result as rh to avoid buf allocate + if err := b.args[1].VecEvalInt(b.ctx, input, result); err != nil { + return err + } + + result.MergeNulls(lh) + + rh := result + lhi64s := lh.Int64s() + rhi64s := rh.Int64s() + resulti64s := result.Int64s() + + isLHSUnsigned := mysql.HasUnsignedFlag(b.args[0].GetType().Flag) + isRHSUnsigned := mysql.HasUnsignedFlag(b.args[1].GetType().Flag) + + switch { + case isLHSUnsigned && isRHSUnsigned: + err = b.plusUU(result, lhi64s, rhi64s, resulti64s) + case isLHSUnsigned && !isRHSUnsigned: + err = b.plusUS(result, lhi64s, rhi64s, resulti64s) + case !isLHSUnsigned && isRHSUnsigned: + err = b.plusSU(result, lhi64s, rhi64s, resulti64s) + case !isLHSUnsigned && !isRHSUnsigned: + err = b.plusSS(result, lhi64s, rhi64s, resulti64s) + } + return err +} +func (b *builtinArithmeticPlusIntSig) plusUU(result *chunk.Column, lhi64s, rhi64s, resulti64s []int64) error { + for i := 0; i < len(lhi64s); i++ { + if result.IsNull(i) { + continue + } + lh, rh := lhi64s[i], rhi64s[i] + + if uint64(lh) > math.MaxUint64-uint64(rh) { + return types.ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%s + %s)", b.args[0].String(), b.args[1].String())) + } + + resulti64s[i] = lh + rh + } + return nil +} + +func (b *builtinArithmeticPlusIntSig) plusUS(result *chunk.Column, lhi64s, rhi64s, resulti64s []int64) error { + for i := 0; i < len(lhi64s); i++ { + if result.IsNull(i) { + continue + } + lh, rh := lhi64s[i], rhi64s[i] + + if rh < 0 && uint64(-rh) > uint64(lh) { + return types.ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%s + %s)", b.args[0].String(), b.args[1].String())) + } + if rh > 0 && uint64(lh) > math.MaxUint64-uint64(lh) { + return types.ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%s + %s)", b.args[0].String(), b.args[1].String())) + } + + resulti64s[i] = lh + rh + } + return nil +} + +func (b *builtinArithmeticPlusIntSig) plusSU(result *chunk.Column, lhi64s, rhi64s, resulti64s []int64) error { + for i := 0; i < len(lhi64s); i++ { + if result.IsNull(i) { + continue + } + lh, rh := lhi64s[i], rhi64s[i] + + if lh < 0 && uint64(-lh) > uint64(rh) { + return types.ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%s + %s)", b.args[0].String(), b.args[1].String())) + } + if lh > 0 && uint64(rh) > math.MaxUint64-uint64(lh) { + return types.ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%s + %s)", b.args[0].String(), b.args[1].String())) + } + + resulti64s[i] = lh + rh + } + return nil +} +func (b *builtinArithmeticPlusIntSig) plusSS(result *chunk.Column, lhi64s, rhi64s, resulti64s []int64) error { + for i := 0; i < len(lhi64s); i++ { + if result.IsNull(i) { + continue + } + lh, rh := lhi64s[i], rhi64s[i] + + if (lh > 0 && rh > math.MaxInt64-lh) || (lh < 0 && rh < math.MinInt64-lh) { + return types.ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%s + %s)", b.args[0].String(), b.args[1].String())) + } + + resulti64s[i] = lh + rh + } + return nil +} + +func (b *builtinArithmeticMultiplyIntUnsignedSig) vectorized() bool { + return true +} + +func (b *builtinArithmeticMultiplyIntUnsignedSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + if err := b.args[0].VecEvalInt(b.ctx, input, result); err != nil { + return err + } + n := input.NumRows() + buf, err := b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf) + + if err := b.args[1].VecEvalInt(b.ctx, input, buf); err != nil { + return err + } + + x := result.Uint64s() + y := buf.Uint64s() + result.MergeNulls(buf) + var res uint64 + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + + res = x[i] * y[i] + if x[i] != 0 && res/x[i] != y[i] { + return types.ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%s * %s)", b.args[0].String(), b.args[1].String())) + } + x[i] = res + } + return nil +} diff --git a/expression/builtin_arithmetic_vec_test.go b/expression/builtin_arithmetic_vec_test.go new file mode 100644 index 0000000..e86a5d2 --- /dev/null +++ b/expression/builtin_arithmetic_vec_test.go @@ -0,0 +1,86 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "math" + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" +) + +var vecBuiltinArithmeticCases = map[string][]vecExprBenchCase{ + ast.Minus: { + {retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETReal, types.ETReal}}, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt}, geners: []dataGenerator{&rangeInt64Gener{-100000, 100000}, &rangeInt64Gener{-100000, 100000}}}, + }, + ast.Div: { + {retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETReal, types.ETReal}}, + {retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETReal, types.ETReal}, geners: []dataGenerator{nil, &rangeRealGener{0, 0, 0}}}, + }, + ast.Mul: { + {retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETReal, types.ETReal}}, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt}, geners: []dataGenerator{&rangeInt64Gener{-10000, 10000}, &rangeInt64Gener{-10000, 10000}}}, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt}, childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeInt24, Flag: mysql.UnsignedFlag}, {Tp: mysql.TypeLonglong, Flag: mysql.UnsignedFlag}}, + geners: []dataGenerator{ + &rangeInt64Gener{begin: 0, end: 10000}, + &rangeInt64Gener{begin: 0, end: 10000}, + }, + }, + }, + ast.Plus: { + {retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETReal, types.ETReal}}, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt}, + geners: []dataGenerator{ + &rangeInt64Gener{begin: math.MinInt64 / 2, end: math.MaxInt64 / 2}, + &rangeInt64Gener{begin: math.MinInt64 / 2, end: math.MaxInt64 / 2}, + }, + }, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt}, + childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeLonglong, Flag: mysql.UnsignedFlag}, + {Tp: mysql.TypeLonglong, Flag: mysql.UnsignedFlag}}, + geners: []dataGenerator{ + &rangeInt64Gener{begin: 0, end: math.MaxInt64}, + &rangeInt64Gener{begin: 0, end: math.MaxInt64}, + }, + }, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt}, + childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeLonglong}, + {Tp: mysql.TypeLonglong, Flag: mysql.UnsignedFlag}}, + geners: []dataGenerator{ + &rangeInt64Gener{begin: 0, end: math.MaxInt64}, + &rangeInt64Gener{begin: 0, end: math.MaxInt64}, + }, + }, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt}, + childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeLonglong, Flag: mysql.UnsignedFlag}, + {Tp: mysql.TypeLonglong}}, + geners: []dataGenerator{ + &rangeInt64Gener{begin: 0, end: math.MaxInt64}, + &rangeInt64Gener{begin: 0, end: math.MaxInt64}, + }, + }, + }, +} + +func (s *testEvaluatorSuite) TestVectorizedBuiltinArithmeticFunc(c *C) { + testVectorizedBuiltinFunc(c, vecBuiltinArithmeticCases) +} + +func BenchmarkVectorizedBuiltinArithmeticFunc(b *testing.B) { + benchmarkVectorizedBuiltinFunc(b, vecBuiltinArithmeticCases) +} diff --git a/expression/builtin_compare.go b/expression/builtin_compare.go new file mode 100644 index 0000000..749a7af --- /dev/null +++ b/expression/builtin_compare.go @@ -0,0 +1,598 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "math" + + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/opcode" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tipb/go-tipb" +) + +var ( + _ functionClass = &compareFunctionClass{} +) + +var ( + _ builtinFunc = &builtinLTIntSig{} + _ builtinFunc = &builtinLTRealSig{} + _ builtinFunc = &builtinLTStringSig{} + + _ builtinFunc = &builtinLEIntSig{} + _ builtinFunc = &builtinLERealSig{} + _ builtinFunc = &builtinLEStringSig{} + + _ builtinFunc = &builtinGTIntSig{} + _ builtinFunc = &builtinGTRealSig{} + _ builtinFunc = &builtinGTStringSig{} + + _ builtinFunc = &builtinGEIntSig{} + _ builtinFunc = &builtinGERealSig{} + _ builtinFunc = &builtinGEStringSig{} + + _ builtinFunc = &builtinNEIntSig{} + _ builtinFunc = &builtinNERealSig{} + _ builtinFunc = &builtinNEStringSig{} +) + +type compareFunctionClass struct { + baseFunctionClass + + op opcode.Op +} + +// getBaseCmpType gets the EvalType that the two args will be treated as when comparing. +func getBaseCmpType(lhs, rhs types.EvalType, lft, rft *types.FieldType) types.EvalType { + if lft.Tp == mysql.TypeUnspecified || rft.Tp == mysql.TypeUnspecified { + if lft.Tp == rft.Tp { + return types.ETString + } + if lft.Tp == mysql.TypeUnspecified { + lhs = rhs + } else { + rhs = lhs + } + } + if lhs.IsStringKind() && rhs.IsStringKind() { + return types.ETString + } else if (lhs == types.ETInt || lft.Hybrid()) && (rhs == types.ETInt || rft.Hybrid()) { + return types.ETInt + } + return types.ETReal +} + +// GetAccurateCmpType uses a more complex logic to decide the EvalType of the two args when compare with each other than +// getBaseCmpType does. +func GetAccurateCmpType(lhs, rhs Expression) types.EvalType { + lhsFieldType, rhsFieldType := lhs.GetType(), rhs.GetType() + lhsEvalType, rhsEvalType := lhsFieldType.EvalType(), rhsFieldType.EvalType() + cmpType := getBaseCmpType(lhsEvalType, rhsEvalType, lhsFieldType, rhsFieldType) + return cmpType +} + +// GetCmpFunction get the compare function according to two arguments. +func GetCmpFunction(lhs, rhs Expression) CompareFunc { + switch GetAccurateCmpType(lhs, rhs) { + case types.ETInt: + return CompareInt + case types.ETReal: + return CompareReal + case types.ETString: + return CompareString + } + return nil +} + +// getFunction sets compare built-in function signatures for various types. +func (c *compareFunctionClass) getFunction(ctx sessionctx.Context, rawArgs []Expression) (sig builtinFunc, err error) { + if err = c.verifyArgs(rawArgs); err != nil { + return nil, err + } + cmpType := GetAccurateCmpType(rawArgs[0], rawArgs[1]) + sig, err = c.generateCmpSigs(ctx, rawArgs, cmpType) + return sig, err +} + +// generateCmpSigs generates compare function signatures. +func (c *compareFunctionClass) generateCmpSigs(ctx sessionctx.Context, args []Expression, tp types.EvalType) (sig builtinFunc, err error) { + bf := newBaseBuiltinFuncWithTp(ctx, args, types.ETInt, tp, tp) + bf.tp.Flen = 1 + switch tp { + case types.ETInt: + switch c.op { + case opcode.LT: + sig = &builtinLTIntSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_LTInt) + case opcode.LE: + sig = &builtinLEIntSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_LEInt) + case opcode.GT: + sig = &builtinGTIntSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_GTInt) + case opcode.EQ: + sig = &builtinEQIntSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_EQInt) + case opcode.GE: + sig = &builtinGEIntSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_GEInt) + case opcode.NE: + sig = &builtinNEIntSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_NEInt) + } + case types.ETReal: + switch c.op { + case opcode.LT: + sig = &builtinLTRealSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_LTReal) + case opcode.LE: + sig = &builtinLERealSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_LEReal) + case opcode.GT: + sig = &builtinGTRealSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_GTReal) + case opcode.GE: + sig = &builtinGERealSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_GEReal) + case opcode.EQ: + sig = &builtinEQRealSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_EQReal) + case opcode.NE: + sig = &builtinNERealSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_NEReal) + } + case types.ETString: + switch c.op { + case opcode.LT: + sig = &builtinLTStringSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_LTString) + case opcode.LE: + sig = &builtinLEStringSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_LEString) + case opcode.GT: + sig = &builtinGTStringSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_GTString) + case opcode.GE: + sig = &builtinGEStringSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_GEString) + case opcode.EQ: + sig = &builtinEQStringSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_EQString) + case opcode.NE: + sig = &builtinNEStringSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_NEString) + } + } + return +} + +type builtinLTIntSig struct { + baseBuiltinFunc +} + +func (b *builtinLTIntSig) Clone() builtinFunc { + newSig := &builtinLTIntSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinLTIntSig) evalInt(row chunk.Row) (val int64, isNull bool, err error) { + return resOfLT(CompareInt(b.ctx, b.args[0], b.args[1], row, row)) +} + +type builtinLTRealSig struct { + baseBuiltinFunc +} + +func (b *builtinLTRealSig) Clone() builtinFunc { + newSig := &builtinLTRealSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinLTRealSig) evalInt(row chunk.Row) (val int64, isNull bool, err error) { + return resOfLT(CompareReal(b.ctx, b.args[0], b.args[1], row, row)) +} + +type builtinLTStringSig struct { + baseBuiltinFunc +} + +func (b *builtinLTStringSig) Clone() builtinFunc { + newSig := &builtinLTStringSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinLTStringSig) evalInt(row chunk.Row) (val int64, isNull bool, err error) { + return resOfLT(CompareString(b.ctx, b.args[0], b.args[1], row, row)) +} + +type builtinLEIntSig struct { + baseBuiltinFunc +} + +func (b *builtinLEIntSig) Clone() builtinFunc { + newSig := &builtinLEIntSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinLEIntSig) evalInt(row chunk.Row) (val int64, isNull bool, err error) { + return resOfLE(CompareInt(b.ctx, b.args[0], b.args[1], row, row)) +} + +type builtinLERealSig struct { + baseBuiltinFunc +} + +func (b *builtinLERealSig) Clone() builtinFunc { + newSig := &builtinLERealSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinLERealSig) evalInt(row chunk.Row) (val int64, isNull bool, err error) { + return resOfLE(CompareReal(b.ctx, b.args[0], b.args[1], row, row)) +} + +type builtinLEStringSig struct { + baseBuiltinFunc +} + +func (b *builtinLEStringSig) Clone() builtinFunc { + newSig := &builtinLEStringSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinLEStringSig) evalInt(row chunk.Row) (val int64, isNull bool, err error) { + return resOfLE(CompareString(b.ctx, b.args[0], b.args[1], row, row)) +} + +type builtinGTIntSig struct { + baseBuiltinFunc +} + +func (b *builtinGTIntSig) Clone() builtinFunc { + newSig := &builtinGTIntSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinGTIntSig) evalInt(row chunk.Row) (val int64, isNull bool, err error) { + return resOfGT(CompareInt(b.ctx, b.args[0], b.args[1], row, row)) +} + +type builtinGTRealSig struct { + baseBuiltinFunc +} + +func (b *builtinGTRealSig) Clone() builtinFunc { + newSig := &builtinGTRealSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinGTRealSig) evalInt(row chunk.Row) (val int64, isNull bool, err error) { + return resOfGT(CompareReal(b.ctx, b.args[0], b.args[1], row, row)) +} + +type builtinGTStringSig struct { + baseBuiltinFunc +} + +func (b *builtinGTStringSig) Clone() builtinFunc { + newSig := &builtinGTStringSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinGTStringSig) evalInt(row chunk.Row) (val int64, isNull bool, err error) { + return resOfGT(CompareString(b.ctx, b.args[0], b.args[1], row, row)) +} + +type builtinGEIntSig struct { + baseBuiltinFunc +} + +func (b *builtinGEIntSig) Clone() builtinFunc { + newSig := &builtinGEIntSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinGEIntSig) evalInt(row chunk.Row) (val int64, isNull bool, err error) { + return resOfGE(CompareInt(b.ctx, b.args[0], b.args[1], row, row)) +} + +type builtinGERealSig struct { + baseBuiltinFunc +} + +func (b *builtinGERealSig) Clone() builtinFunc { + newSig := &builtinGERealSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinGERealSig) evalInt(row chunk.Row) (val int64, isNull bool, err error) { + return resOfGE(CompareReal(b.ctx, b.args[0], b.args[1], row, row)) +} + +type builtinGEStringSig struct { + baseBuiltinFunc +} + +func (b *builtinGEStringSig) Clone() builtinFunc { + newSig := &builtinGEStringSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinGEStringSig) evalInt(row chunk.Row) (val int64, isNull bool, err error) { + return resOfGE(CompareString(b.ctx, b.args[0], b.args[1], row, row)) +} + +type builtinEQIntSig struct { + baseBuiltinFunc +} + +func (b *builtinEQIntSig) Clone() builtinFunc { + newSig := &builtinEQIntSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinEQIntSig) evalInt(row chunk.Row) (val int64, isNull bool, err error) { + return resOfEQ(CompareInt(b.ctx, b.args[0], b.args[1], row, row)) +} + +type builtinEQRealSig struct { + baseBuiltinFunc +} + +func (b *builtinEQRealSig) Clone() builtinFunc { + newSig := &builtinEQRealSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinEQRealSig) evalInt(row chunk.Row) (val int64, isNull bool, err error) { + return resOfEQ(CompareReal(b.ctx, b.args[0], b.args[1], row, row)) +} + +type builtinEQStringSig struct { + baseBuiltinFunc +} + +func (b *builtinEQStringSig) Clone() builtinFunc { + newSig := &builtinEQStringSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinEQStringSig) evalInt(row chunk.Row) (val int64, isNull bool, err error) { + return resOfEQ(CompareString(b.ctx, b.args[0], b.args[1], row, row)) +} + +type builtinNEIntSig struct { + baseBuiltinFunc +} + +func (b *builtinNEIntSig) Clone() builtinFunc { + newSig := &builtinNEIntSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinNEIntSig) evalInt(row chunk.Row) (val int64, isNull bool, err error) { + return resOfNE(CompareInt(b.ctx, b.args[0], b.args[1], row, row)) +} + +type builtinNERealSig struct { + baseBuiltinFunc +} + +func (b *builtinNERealSig) Clone() builtinFunc { + newSig := &builtinNERealSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinNERealSig) evalInt(row chunk.Row) (val int64, isNull bool, err error) { + return resOfNE(CompareReal(b.ctx, b.args[0], b.args[1], row, row)) +} + +type builtinNEStringSig struct { + baseBuiltinFunc +} + +func (b *builtinNEStringSig) Clone() builtinFunc { + newSig := &builtinNEStringSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinNEStringSig) evalInt(row chunk.Row) (val int64, isNull bool, err error) { + return resOfNE(CompareString(b.ctx, b.args[0], b.args[1], row, row)) +} + +func resOfLT(val int64, isNull bool, err error) (int64, bool, error) { + if isNull || err != nil { + return 0, isNull, err + } + if val < 0 { + val = 1 + } else { + val = 0 + } + return val, false, nil +} + +func resOfLE(val int64, isNull bool, err error) (int64, bool, error) { + if isNull || err != nil { + return 0, isNull, err + } + if val <= 0 { + val = 1 + } else { + val = 0 + } + return val, false, nil +} + +func resOfGT(val int64, isNull bool, err error) (int64, bool, error) { + if isNull || err != nil { + return 0, isNull, err + } + if val > 0 { + val = 1 + } else { + val = 0 + } + return val, false, nil +} + +func resOfGE(val int64, isNull bool, err error) (int64, bool, error) { + if isNull || err != nil { + return 0, isNull, err + } + if val >= 0 { + val = 1 + } else { + val = 0 + } + return val, false, nil +} + +func resOfEQ(val int64, isNull bool, err error) (int64, bool, error) { + if isNull || err != nil { + return 0, isNull, err + } + if val == 0 { + val = 1 + } else { + val = 0 + } + return val, false, nil +} + +func resOfNE(val int64, isNull bool, err error) (int64, bool, error) { + if isNull || err != nil { + return 0, isNull, err + } + if val != 0 { + val = 1 + } else { + val = 0 + } + return val, false, nil +} + +// compareNull compares null values based on the following rules. +// 1. NULL is considered to be equal to NULL +// 2. NULL is considered to be smaller than a non-NULL value. +// NOTE: (lhsIsNull == true) or (rhsIsNull == true) is required. +func compareNull(lhsIsNull, rhsIsNull bool) int64 { + if lhsIsNull && rhsIsNull { + return 0 + } + if lhsIsNull { + return -1 + } + return 1 +} + +// CompareFunc defines the compare function prototype. +type CompareFunc = func(sctx sessionctx.Context, lhsArg, rhsArg Expression, lhsRow, rhsRow chunk.Row) (int64, bool, error) + +// CompareInt compares two integers. +func CompareInt(sctx sessionctx.Context, lhsArg, rhsArg Expression, lhsRow, rhsRow chunk.Row) (int64, bool, error) { + arg0, isNull0, err := lhsArg.EvalInt(sctx, lhsRow) + if err != nil { + return 0, true, err + } + + arg1, isNull1, err := rhsArg.EvalInt(sctx, rhsRow) + if err != nil { + return 0, true, err + } + + // compare null values. + if isNull0 || isNull1 { + return compareNull(isNull0, isNull1), true, nil + } + + isUnsigned0, isUnsigned1 := mysql.HasUnsignedFlag(lhsArg.GetType().Flag), mysql.HasUnsignedFlag(rhsArg.GetType().Flag) + var res int + switch { + case isUnsigned0 && isUnsigned1: + res = types.CompareUint64(uint64(arg0), uint64(arg1)) + case isUnsigned0 && !isUnsigned1: + if arg1 < 0 || uint64(arg0) > math.MaxInt64 { + res = 1 + } else { + res = types.CompareInt64(arg0, arg1) + } + case !isUnsigned0 && isUnsigned1: + if arg0 < 0 || uint64(arg1) > math.MaxInt64 { + res = -1 + } else { + res = types.CompareInt64(arg0, arg1) + } + case !isUnsigned0 && !isUnsigned1: + res = types.CompareInt64(arg0, arg1) + } + return int64(res), false, nil +} + +// CompareString compares two strings. +func CompareString(sctx sessionctx.Context, lhsArg, rhsArg Expression, lhsRow, rhsRow chunk.Row) (int64, bool, error) { + arg0, isNull0, err := lhsArg.EvalString(sctx, lhsRow) + if err != nil { + return 0, true, err + } + + arg1, isNull1, err := rhsArg.EvalString(sctx, rhsRow) + if err != nil { + return 0, true, err + } + + if isNull0 || isNull1 { + return compareNull(isNull0, isNull1), true, nil + } + return int64(types.CompareString(arg0, arg1)), false, nil +} + +// CompareReal compares two float-point values. +func CompareReal(sctx sessionctx.Context, lhsArg, rhsArg Expression, lhsRow, rhsRow chunk.Row) (int64, bool, error) { + arg0, isNull0, err := lhsArg.EvalReal(sctx, lhsRow) + if err != nil { + return 0, true, err + } + + arg1, isNull1, err := rhsArg.EvalReal(sctx, rhsRow) + if err != nil { + return 0, true, err + } + + if isNull0 || isNull1 { + return compareNull(isNull0, isNull1), true, nil + } + return int64(types.CompareFloat64(arg0, arg1)), false, nil +} diff --git a/expression/builtin_compare_test.go b/expression/builtin_compare_test.go new file mode 100644 index 0000000..26b8c10 --- /dev/null +++ b/expression/builtin_compare_test.go @@ -0,0 +1,50 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/util/chunk" +) + +func (s *testEvaluatorSuite) TestCompare(c *C) { + intVal, uintVal, realVal, stringVal := 1, uint64(1), 1.1, "123" + // test cases for generating function signatures. + tests := []struct { + arg0 interface{} + arg1 interface{} + funcName string + tp byte + expected int64 + }{ + {intVal, intVal, ast.LT, mysql.TypeLonglong, 0}, + {stringVal, stringVal, ast.LT, mysql.TypeVarString, 0}, + {realVal, realVal, ast.LT, mysql.TypeDouble, 0}, + {uintVal, uintVal, ast.EQ, mysql.TypeLonglong, 1}, + } + + for _, t := range tests { + bf, err := funcs[t.funcName].getFunction(s.ctx, s.primitiveValsToConstants([]interface{}{t.arg0, t.arg1})) + c.Assert(err, IsNil) + args := bf.getArgs() + c.Assert(args[0].GetType().Tp, Equals, t.tp) + c.Assert(args[1].GetType().Tp, Equals, t.tp) + res, isNil, err := bf.evalInt(chunk.Row{}) + c.Assert(err, IsNil) + c.Assert(isNil, IsFalse) + c.Assert(res, Equals, t.expected) + } +} diff --git a/expression/builtin_compare_vec.go b/expression/builtin_compare_vec.go new file mode 100644 index 0000000..50e8c9e --- /dev/null +++ b/expression/builtin_compare_vec.go @@ -0,0 +1,292 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" +) + +func (b *builtinGEIntSig) vectorized() bool { + return true +} + +func (b *builtinGEIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + var err error + var buf0, buf1 *chunk.Column + buf0, err = b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf0) + if err = b.args[0].VecEvalInt(b.ctx, input, buf0); err != nil { + return err + } + buf1, err = b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) + if err = b.args[1].VecEvalInt(b.ctx, input, buf1); err != nil { + return err + } + + result.ResizeInt64(n, false) + vecCompareInt(mysql.HasUnsignedFlag(b.args[0].GetType().Flag), mysql.HasUnsignedFlag(b.args[1].GetType().Flag), buf0, buf1, result) + result.MergeNulls(buf0, buf1) + vecResOfGE(result.Int64s()) + return nil +} + +func (b *builtinEQIntSig) vectorized() bool { + return true +} + +func (b *builtinEQIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + var err error + var buf0, buf1 *chunk.Column + buf0, err = b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf0) + if err := b.args[0].VecEvalInt(b.ctx, input, buf0); err != nil { + return err + } + buf1, err = b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) + if err := b.args[1].VecEvalInt(b.ctx, input, buf1); err != nil { + return err + } + + result.ResizeInt64(n, false) + vecCompareInt(mysql.HasUnsignedFlag(b.args[0].GetType().Flag), mysql.HasUnsignedFlag(b.args[1].GetType().Flag), buf0, buf1, result) + result.MergeNulls(buf0, buf1) + vecResOfEQ(result.Int64s()) + return nil +} + +func (b *builtinNEIntSig) vectorized() bool { + return true +} + +func (b *builtinNEIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + var err error + var buf0, buf1 *chunk.Column + buf0, err = b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf0) + if err := b.args[0].VecEvalInt(b.ctx, input, buf0); err != nil { + return err + } + buf1, err = b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) + if err := b.args[1].VecEvalInt(b.ctx, input, buf1); err != nil { + return err + } + + result.ResizeInt64(n, false) + vecCompareInt(mysql.HasUnsignedFlag(b.args[0].GetType().Flag), mysql.HasUnsignedFlag(b.args[1].GetType().Flag), buf0, buf1, result) + result.MergeNulls(buf0, buf1) + vecResOfNE(result.Int64s()) + return nil +} + +func (b *builtinGTIntSig) vectorized() bool { + return true +} + +func (b *builtinGTIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + var err error + var buf0, buf1 *chunk.Column + buf0, err = b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf0) + if err := b.args[0].VecEvalInt(b.ctx, input, buf0); err != nil { + return err + } + buf1, err = b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) + if err := b.args[1].VecEvalInt(b.ctx, input, buf1); err != nil { + return err + } + + result.ResizeInt64(n, false) + vecCompareInt(mysql.HasUnsignedFlag(b.args[0].GetType().Flag), mysql.HasUnsignedFlag(b.args[1].GetType().Flag), buf0, buf1, result) + result.MergeNulls(buf0, buf1) + vecResOfGT(result.Int64s()) + return nil +} + +func (b *builtinLEIntSig) vectorized() bool { + return true +} + +func (b *builtinLEIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + var err error + var buf0, buf1 *chunk.Column + buf0, err = b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf0) + if err := b.args[0].VecEvalInt(b.ctx, input, buf0); err != nil { + return err + } + buf1, err = b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) + if err := b.args[1].VecEvalInt(b.ctx, input, buf1); err != nil { + return err + } + + result.ResizeInt64(n, false) + vecCompareInt(mysql.HasUnsignedFlag(b.args[0].GetType().Flag), mysql.HasUnsignedFlag(b.args[1].GetType().Flag), buf0, buf1, result) + result.MergeNulls(buf0, buf1) + vecResOfLE(result.Int64s()) + return nil +} + +func (b *builtinLTIntSig) vectorized() bool { + return true +} + +func (b *builtinLTIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + var err error + var buf0, buf1 *chunk.Column + buf0, err = b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf0) + if err := b.args[0].VecEvalInt(b.ctx, input, buf0); err != nil { + return err + } + buf1, err = b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) + if err := b.args[1].VecEvalInt(b.ctx, input, buf1); err != nil { + return err + } + + result.ResizeInt64(n, false) + vecCompareInt(mysql.HasUnsignedFlag(b.args[0].GetType().Flag), mysql.HasUnsignedFlag(b.args[1].GetType().Flag), buf0, buf1, result) + result.MergeNulls(buf0, buf1) + vecResOfLT(result.Int64s()) + return nil +} + +func vecResOfLT(res []int64) { + n := len(res) + for i := 0; i < n; i++ { + if res[i] < 0 { + res[i] = 1 + } else { + res[i] = 0 + } + } +} + +func vecResOfNE(res []int64) { + n := len(res) + for i := 0; i < n; i++ { + if res[i] != 0 { + res[i] = 1 + } else { + res[i] = 0 + } + } +} + +func vecResOfEQ(res []int64) { + n := len(res) + for i := 0; i < n; i++ { + if res[i] == 0 { + res[i] = 1 + } else { + res[i] = 0 + } + } +} + +func vecResOfLE(res []int64) { + n := len(res) + for i := 0; i < n; i++ { + if res[i] <= 0 { + res[i] = 1 + } else { + res[i] = 0 + } + } +} + +func vecResOfGT(res []int64) { + n := len(res) + for i := 0; i < n; i++ { + if res[i] > 0 { + res[i] = 1 + } else { + res[i] = 0 + } + } +} + +func vecResOfGE(res []int64) { + n := len(res) + for i := 0; i < n; i++ { + if res[i] >= 0 { + res[i] = 1 + } else { + res[i] = 0 + } + } +} + +//vecCompareInt is vectorized CompareInt() +func vecCompareInt(isUnsigned0, isUnsigned1 bool, largs, rargs, result *chunk.Column) { + switch { + case isUnsigned0 && isUnsigned1: + types.VecCompareUU(largs.Uint64s(), rargs.Uint64s(), result.Int64s()) + case isUnsigned0 && !isUnsigned1: + types.VecCompareUI(largs.Uint64s(), rargs.Int64s(), result.Int64s()) + case !isUnsigned0 && isUnsigned1: + types.VecCompareIU(largs.Int64s(), rargs.Uint64s(), result.Int64s()) + case !isUnsigned0 && !isUnsigned1: + types.VecCompareII(largs.Int64s(), rargs.Int64s(), result.Int64s()) + } +} diff --git a/expression/builtin_compare_vec_generated.go b/expression/builtin_compare_vec_generated.go new file mode 100644 index 0000000..96746ef --- /dev/null +++ b/expression/builtin_compare_vec_generated.go @@ -0,0 +1,513 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by go generate in expression/generator; DO NOT EDIT. + +package expression + +import ( + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" +) + +func (b *builtinLTRealSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + buf0, err := b.bufAllocator.get(types.ETReal, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf0) + if err := b.args[0].VecEvalReal(b.ctx, input, buf0); err != nil { + return err + } + buf1, err := b.bufAllocator.get(types.ETReal, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) + if err := b.args[1].VecEvalReal(b.ctx, input, buf1); err != nil { + return err + } + + arg0 := buf0.Float64s() + arg1 := buf1.Float64s() + result.ResizeInt64(n, false) + result.MergeNulls(buf0, buf1) + i64s := result.Int64s() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + val := types.CompareFloat64(arg0[i], arg1[i]) + if val < 0 { + i64s[i] = 1 + } else { + i64s[i] = 0 + } + } + return nil +} + +func (b *builtinLTRealSig) vectorized() bool { + return true +} + +func (b *builtinLTStringSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + buf0, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf0) + if err := b.args[0].VecEvalString(b.ctx, input, buf0); err != nil { + return err + } + buf1, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) + if err := b.args[1].VecEvalString(b.ctx, input, buf1); err != nil { + return err + } + + result.ResizeInt64(n, false) + result.MergeNulls(buf0, buf1) + i64s := result.Int64s() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + val := types.CompareString(buf0.GetString(i), buf1.GetString(i)) + if val < 0 { + i64s[i] = 1 + } else { + i64s[i] = 0 + } + } + return nil +} + +func (b *builtinLTStringSig) vectorized() bool { + return true +} + +func (b *builtinLERealSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + buf0, err := b.bufAllocator.get(types.ETReal, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf0) + if err := b.args[0].VecEvalReal(b.ctx, input, buf0); err != nil { + return err + } + buf1, err := b.bufAllocator.get(types.ETReal, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) + if err := b.args[1].VecEvalReal(b.ctx, input, buf1); err != nil { + return err + } + + arg0 := buf0.Float64s() + arg1 := buf1.Float64s() + result.ResizeInt64(n, false) + result.MergeNulls(buf0, buf1) + i64s := result.Int64s() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + val := types.CompareFloat64(arg0[i], arg1[i]) + if val <= 0 { + i64s[i] = 1 + } else { + i64s[i] = 0 + } + } + return nil +} + +func (b *builtinLERealSig) vectorized() bool { + return true +} + +func (b *builtinLEStringSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + buf0, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf0) + if err := b.args[0].VecEvalString(b.ctx, input, buf0); err != nil { + return err + } + buf1, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) + if err := b.args[1].VecEvalString(b.ctx, input, buf1); err != nil { + return err + } + + result.ResizeInt64(n, false) + result.MergeNulls(buf0, buf1) + i64s := result.Int64s() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + val := types.CompareString(buf0.GetString(i), buf1.GetString(i)) + if val <= 0 { + i64s[i] = 1 + } else { + i64s[i] = 0 + } + } + return nil +} + +func (b *builtinLEStringSig) vectorized() bool { + return true +} + +func (b *builtinGTRealSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + buf0, err := b.bufAllocator.get(types.ETReal, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf0) + if err := b.args[0].VecEvalReal(b.ctx, input, buf0); err != nil { + return err + } + buf1, err := b.bufAllocator.get(types.ETReal, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) + if err := b.args[1].VecEvalReal(b.ctx, input, buf1); err != nil { + return err + } + + arg0 := buf0.Float64s() + arg1 := buf1.Float64s() + result.ResizeInt64(n, false) + result.MergeNulls(buf0, buf1) + i64s := result.Int64s() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + val := types.CompareFloat64(arg0[i], arg1[i]) + if val > 0 { + i64s[i] = 1 + } else { + i64s[i] = 0 + } + } + return nil +} + +func (b *builtinGTRealSig) vectorized() bool { + return true +} + +func (b *builtinGTStringSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + buf0, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf0) + if err := b.args[0].VecEvalString(b.ctx, input, buf0); err != nil { + return err + } + buf1, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) + if err := b.args[1].VecEvalString(b.ctx, input, buf1); err != nil { + return err + } + + result.ResizeInt64(n, false) + result.MergeNulls(buf0, buf1) + i64s := result.Int64s() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + val := types.CompareString(buf0.GetString(i), buf1.GetString(i)) + if val > 0 { + i64s[i] = 1 + } else { + i64s[i] = 0 + } + } + return nil +} + +func (b *builtinGTStringSig) vectorized() bool { + return true +} + +func (b *builtinGERealSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + buf0, err := b.bufAllocator.get(types.ETReal, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf0) + if err := b.args[0].VecEvalReal(b.ctx, input, buf0); err != nil { + return err + } + buf1, err := b.bufAllocator.get(types.ETReal, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) + if err := b.args[1].VecEvalReal(b.ctx, input, buf1); err != nil { + return err + } + + arg0 := buf0.Float64s() + arg1 := buf1.Float64s() + result.ResizeInt64(n, false) + result.MergeNulls(buf0, buf1) + i64s := result.Int64s() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + val := types.CompareFloat64(arg0[i], arg1[i]) + if val >= 0 { + i64s[i] = 1 + } else { + i64s[i] = 0 + } + } + return nil +} + +func (b *builtinGERealSig) vectorized() bool { + return true +} + +func (b *builtinGEStringSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + buf0, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf0) + if err := b.args[0].VecEvalString(b.ctx, input, buf0); err != nil { + return err + } + buf1, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) + if err := b.args[1].VecEvalString(b.ctx, input, buf1); err != nil { + return err + } + + result.ResizeInt64(n, false) + result.MergeNulls(buf0, buf1) + i64s := result.Int64s() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + val := types.CompareString(buf0.GetString(i), buf1.GetString(i)) + if val >= 0 { + i64s[i] = 1 + } else { + i64s[i] = 0 + } + } + return nil +} + +func (b *builtinGEStringSig) vectorized() bool { + return true +} + +func (b *builtinEQRealSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + buf0, err := b.bufAllocator.get(types.ETReal, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf0) + if err := b.args[0].VecEvalReal(b.ctx, input, buf0); err != nil { + return err + } + buf1, err := b.bufAllocator.get(types.ETReal, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) + if err := b.args[1].VecEvalReal(b.ctx, input, buf1); err != nil { + return err + } + + arg0 := buf0.Float64s() + arg1 := buf1.Float64s() + result.ResizeInt64(n, false) + result.MergeNulls(buf0, buf1) + i64s := result.Int64s() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + val := types.CompareFloat64(arg0[i], arg1[i]) + if val == 0 { + i64s[i] = 1 + } else { + i64s[i] = 0 + } + } + return nil +} + +func (b *builtinEQRealSig) vectorized() bool { + return true +} + +func (b *builtinEQStringSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + buf0, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf0) + if err := b.args[0].VecEvalString(b.ctx, input, buf0); err != nil { + return err + } + buf1, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) + if err := b.args[1].VecEvalString(b.ctx, input, buf1); err != nil { + return err + } + + result.ResizeInt64(n, false) + result.MergeNulls(buf0, buf1) + i64s := result.Int64s() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + val := types.CompareString(buf0.GetString(i), buf1.GetString(i)) + if val == 0 { + i64s[i] = 1 + } else { + i64s[i] = 0 + } + } + return nil +} + +func (b *builtinEQStringSig) vectorized() bool { + return true +} + +func (b *builtinNERealSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + buf0, err := b.bufAllocator.get(types.ETReal, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf0) + if err := b.args[0].VecEvalReal(b.ctx, input, buf0); err != nil { + return err + } + buf1, err := b.bufAllocator.get(types.ETReal, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) + if err := b.args[1].VecEvalReal(b.ctx, input, buf1); err != nil { + return err + } + + arg0 := buf0.Float64s() + arg1 := buf1.Float64s() + result.ResizeInt64(n, false) + result.MergeNulls(buf0, buf1) + i64s := result.Int64s() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + val := types.CompareFloat64(arg0[i], arg1[i]) + if val != 0 { + i64s[i] = 1 + } else { + i64s[i] = 0 + } + } + return nil +} + +func (b *builtinNERealSig) vectorized() bool { + return true +} + +func (b *builtinNEStringSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + buf0, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf0) + if err := b.args[0].VecEvalString(b.ctx, input, buf0); err != nil { + return err + } + buf1, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) + if err := b.args[1].VecEvalString(b.ctx, input, buf1); err != nil { + return err + } + + result.ResizeInt64(n, false) + result.MergeNulls(buf0, buf1) + i64s := result.Int64s() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + val := types.CompareString(buf0.GetString(i), buf1.GetString(i)) + if val != 0 { + i64s[i] = 1 + } else { + i64s[i] = 0 + } + } + return nil +} + +func (b *builtinNEStringSig) vectorized() bool { + return true +} diff --git a/expression/builtin_compare_vec_generated_test.go b/expression/builtin_compare_vec_generated_test.go new file mode 100644 index 0000000..fa01b73 --- /dev/null +++ b/expression/builtin_compare_vec_generated_test.go @@ -0,0 +1,67 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by go generate in expression/generator; DO NOT EDIT. + +package expression + +import ( + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/types" +) + +var vecGeneratedBuiltinCompareCases = map[string][]vecExprBenchCase{ + ast.LT: { + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETReal, types.ETReal}}, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString, types.ETString}}, + }, + ast.LE: { + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETReal, types.ETReal}}, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString, types.ETString}}, + }, + ast.GT: { + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETReal, types.ETReal}}, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString, types.ETString}}, + }, + ast.GE: { + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETReal, types.ETReal}}, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString, types.ETString}}, + }, + ast.EQ: { + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETReal, types.ETReal}}, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString, types.ETString}}, + }, + ast.NE: { + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETReal, types.ETReal}}, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString, types.ETString}}, + }, +} + +func (s *testEvaluatorSuite) TestVectorizedGeneratedBuiltinCompareEvalOneVec(c *C) { + testVectorizedEvalOneVec(c, vecGeneratedBuiltinCompareCases) +} + +func (s *testEvaluatorSuite) TestVectorizedGeneratedBuiltinCompareFunc(c *C) { + testVectorizedBuiltinFunc(c, vecGeneratedBuiltinCompareCases) +} + +func BenchmarkVectorizedGeneratedBuiltinCompareEvalOneVec(b *testing.B) { + benchmarkVectorizedEvalOneVec(b, vecGeneratedBuiltinCompareCases) +} + +func BenchmarkVectorizedGeneratedBuiltinCompareFunc(b *testing.B) { + benchmarkVectorizedBuiltinFunc(b, vecGeneratedBuiltinCompareCases) +} diff --git a/expression/builtin_compare_vec_test.go b/expression/builtin_compare_vec_test.go new file mode 100644 index 0000000..3033022 --- /dev/null +++ b/expression/builtin_compare_vec_test.go @@ -0,0 +1,134 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" +) + +var vecBuiltinCompareCases = map[string][]vecExprBenchCase{ + ast.NE: { + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt}}, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt}, + childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeLonglong, Flag: mysql.UnsignedFlag}, + {Tp: mysql.TypeLonglong, Flag: mysql.UnsignedFlag}, + }, + }, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt}, + childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeLonglong}, + {Tp: mysql.TypeLonglong, Flag: mysql.UnsignedFlag}, + }, + }, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt}, + childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeLonglong, Flag: mysql.UnsignedFlag}, + {Tp: mysql.TypeLonglong}, + }, + }, + }, + ast.IsNull: {}, + ast.LE: { + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt}}, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt}, + childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeLonglong, Flag: mysql.UnsignedFlag}, + {Tp: mysql.TypeLonglong, Flag: mysql.UnsignedFlag}, + }, + }, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt}, + childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeLonglong}, + {Tp: mysql.TypeLonglong, Flag: mysql.UnsignedFlag}, + }, + }, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt}, + childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeLonglong, Flag: mysql.UnsignedFlag}, + {Tp: mysql.TypeLonglong}, + }, + }, + }, + ast.LT: { + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt}}, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt}, + childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeLonglong, Flag: mysql.UnsignedFlag}, + {Tp: mysql.TypeLonglong, Flag: mysql.UnsignedFlag}, + }, + }, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt}, + childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeLonglong}, + {Tp: mysql.TypeLonglong, Flag: mysql.UnsignedFlag}, + }, + }, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt}, + childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeLonglong, Flag: mysql.UnsignedFlag}, + {Tp: mysql.TypeLonglong}, + }, + }, + }, + ast.GT: { + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt}}, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt}, + childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeLonglong, Flag: mysql.UnsignedFlag}, + {Tp: mysql.TypeLonglong, Flag: mysql.UnsignedFlag}, + }, + }, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt}, + childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeLonglong}, + {Tp: mysql.TypeLonglong, Flag: mysql.UnsignedFlag}, + }, + }, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt}, + childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeLonglong, Flag: mysql.UnsignedFlag}, + {Tp: mysql.TypeLonglong}, + }, + }, + }, + ast.EQ: {}, + ast.GE: { + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt}}, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt}, + childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeLonglong, Flag: mysql.UnsignedFlag}, + {Tp: mysql.TypeLonglong, Flag: mysql.UnsignedFlag}, + }, + }, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt}, + childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeLonglong}, + {Tp: mysql.TypeLonglong, Flag: mysql.UnsignedFlag}, + }, + }, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt}, + childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeLonglong, Flag: mysql.UnsignedFlag}, + {Tp: mysql.TypeLonglong}, + }, + }, + }, +} + +func (s *testEvaluatorSuite) TestVectorizedBuiltinCompareEvalOneVec(c *C) { + testVectorizedEvalOneVec(c, vecBuiltinCompareCases) +} + +func (s *testEvaluatorSuite) TestVectorizedBuiltinCompareFunc(c *C) { + testVectorizedBuiltinFunc(c, vecBuiltinCompareCases) +} + +func BenchmarkVectorizedBuiltinCompareEvalOneVec(b *testing.B) { + benchmarkVectorizedEvalOneVec(b, vecBuiltinCompareCases) +} + +func BenchmarkVectorizedBuiltinCompareFunc(b *testing.B) { + benchmarkVectorizedBuiltinFunc(b, vecBuiltinCompareCases) +} diff --git a/expression/builtin_control.go b/expression/builtin_control.go new file mode 100644 index 0000000..502f01f --- /dev/null +++ b/expression/builtin_control.go @@ -0,0 +1,299 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "github.com/cznic/mathutil" + "github.com/pingcap/tidb/parser/charset" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tipb/go-tipb" +) + +var ( + _ functionClass = &ifFunctionClass{} + _ functionClass = &ifNullFunctionClass{} +) + +var ( + _ builtinFunc = &builtinIfNullIntSig{} + _ builtinFunc = &builtinIfNullRealSig{} + _ builtinFunc = &builtinIfNullStringSig{} + _ builtinFunc = &builtinIfIntSig{} + _ builtinFunc = &builtinIfRealSig{} + _ builtinFunc = &builtinIfStringSig{} +) + +// InferType4ControlFuncs infer result type for builtin IF, IFNULL, NULLIF, LEAD and LAG. +func InferType4ControlFuncs(lhs, rhs *types.FieldType) *types.FieldType { + resultFieldType := &types.FieldType{} + if lhs.Tp == mysql.TypeNull { + *resultFieldType = *rhs + // If both arguments are NULL, make resulting type BINARY(0). + if rhs.Tp == mysql.TypeNull { + resultFieldType.Tp = mysql.TypeString + resultFieldType.Flen, resultFieldType.Decimal = 0, 0 + types.SetBinChsClnFlag(resultFieldType) + } + } else if rhs.Tp == mysql.TypeNull { + *resultFieldType = *lhs + } else { + resultFieldType = types.AggFieldType([]*types.FieldType{lhs, rhs}) + evalType := types.AggregateEvalType([]*types.FieldType{lhs, rhs}, &resultFieldType.Flag) + if evalType == types.ETInt { + resultFieldType.Decimal = 0 + } else { + if lhs.Decimal == types.UnspecifiedLength || rhs.Decimal == types.UnspecifiedLength { + resultFieldType.Decimal = types.UnspecifiedLength + } else { + resultFieldType.Decimal = mathutil.Max(lhs.Decimal, rhs.Decimal) + } + } + if types.IsNonBinaryStr(lhs) && !types.IsBinaryStr(rhs) { + resultFieldType.Charset, resultFieldType.Collate, resultFieldType.Flag = charset.CharsetUTF8MB4, charset.CollationUTF8MB4, 0 + if mysql.HasBinaryFlag(lhs.Flag) || !types.IsNonBinaryStr(rhs) { + resultFieldType.Flag |= mysql.BinaryFlag + } + } else if types.IsNonBinaryStr(rhs) && !types.IsBinaryStr(lhs) { + resultFieldType.Charset, resultFieldType.Collate, resultFieldType.Flag = charset.CharsetUTF8MB4, charset.CollationUTF8MB4, 0 + if mysql.HasBinaryFlag(rhs.Flag) || !types.IsNonBinaryStr(lhs) { + resultFieldType.Flag |= mysql.BinaryFlag + } + } else if types.IsBinaryStr(lhs) || types.IsBinaryStr(rhs) || !evalType.IsStringKind() { + types.SetBinChsClnFlag(resultFieldType) + } else { + resultFieldType.Charset, resultFieldType.Collate, resultFieldType.Flag = mysql.DefaultCharset, mysql.DefaultCollationName, 0 + } + if evalType == types.ETInt { + lhsUnsignedFlag, rhsUnsignedFlag := mysql.HasUnsignedFlag(lhs.Flag), mysql.HasUnsignedFlag(rhs.Flag) + lhsFlagLen, rhsFlagLen := 0, 0 + if !lhsUnsignedFlag { + lhsFlagLen = 1 + } + if !rhsUnsignedFlag { + rhsFlagLen = 1 + } + lhsFlen := lhs.Flen - lhsFlagLen + rhsFlen := rhs.Flen - rhsFlagLen + if lhs.Decimal != types.UnspecifiedLength { + lhsFlen -= lhs.Decimal + } + if lhs.Decimal != types.UnspecifiedLength { + rhsFlen -= rhs.Decimal + } + resultFieldType.Flen = mathutil.Max(lhsFlen, rhsFlen) + resultFieldType.Decimal + 1 + } else { + resultFieldType.Flen = mathutil.Max(lhs.Flen, rhs.Flen) + } + } + // Fix decimal for int and string. + resultEvalType := resultFieldType.EvalType() + if resultEvalType == types.ETInt { + resultFieldType.Decimal = 0 + } else if resultEvalType == types.ETString { + if lhs.Tp != mysql.TypeNull || rhs.Tp != mysql.TypeNull { + resultFieldType.Decimal = types.UnspecifiedLength + } + } + return resultFieldType +} + +type ifFunctionClass struct { + baseFunctionClass +} + +// getFunction see https://dev.mysql.com/doc/refman/5.7/en/control-flow-functions.html#function_if +func (c *ifFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (sig builtinFunc, err error) { + if err = c.verifyArgs(args); err != nil { + return nil, err + } + retTp := InferType4ControlFuncs(args[1].GetType(), args[2].GetType()) + evalTps := retTp.EvalType() + bf := newBaseBuiltinFuncWithTp(ctx, args, evalTps, types.ETInt, evalTps, evalTps) + retTp.Flag |= bf.tp.Flag + bf.tp = retTp + switch evalTps { + case types.ETInt: + sig = &builtinIfIntSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_IfInt) + case types.ETReal: + sig = &builtinIfRealSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_IfReal) + case types.ETString: + sig = &builtinIfStringSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_IfString) + } + return sig, nil +} + +type builtinIfIntSig struct { + baseBuiltinFunc +} + +func (b *builtinIfIntSig) Clone() builtinFunc { + newSig := &builtinIfIntSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinIfIntSig) evalInt(row chunk.Row) (ret int64, isNull bool, err error) { + arg0, isNull0, err := b.args[0].EvalInt(b.ctx, row) + if err != nil { + return 0, true, err + } + arg1, isNull1, err := b.args[1].EvalInt(b.ctx, row) + if (!isNull0 && arg0 != 0) || err != nil { + return arg1, isNull1, err + } + arg2, isNull2, err := b.args[2].EvalInt(b.ctx, row) + return arg2, isNull2, err +} + +type builtinIfRealSig struct { + baseBuiltinFunc +} + +func (b *builtinIfRealSig) Clone() builtinFunc { + newSig := &builtinIfRealSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinIfRealSig) evalReal(row chunk.Row) (ret float64, isNull bool, err error) { + arg0, isNull0, err := b.args[0].EvalInt(b.ctx, row) + if err != nil { + return 0, true, err + } + arg1, isNull1, err := b.args[1].EvalReal(b.ctx, row) + if (!isNull0 && arg0 != 0) || err != nil { + return arg1, isNull1, err + } + arg2, isNull2, err := b.args[2].EvalReal(b.ctx, row) + return arg2, isNull2, err +} + +type builtinIfStringSig struct { + baseBuiltinFunc +} + +func (b *builtinIfStringSig) Clone() builtinFunc { + newSig := &builtinIfStringSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinIfStringSig) evalString(row chunk.Row) (ret string, isNull bool, err error) { + arg0, isNull0, err := b.args[0].EvalInt(b.ctx, row) + if err != nil { + return "", true, err + } + arg1, isNull1, err := b.args[1].EvalString(b.ctx, row) + if (!isNull0 && arg0 != 0) || err != nil { + return arg1, isNull1, err + } + arg2, isNull2, err := b.args[2].EvalString(b.ctx, row) + return arg2, isNull2, err +} + +type ifNullFunctionClass struct { + baseFunctionClass +} + +func (c *ifNullFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (sig builtinFunc, err error) { + if err = c.verifyArgs(args); err != nil { + return nil, err + } + lhs, rhs := args[0].GetType(), args[1].GetType() + retTp := InferType4ControlFuncs(lhs, rhs) + retTp.Flag |= (lhs.Flag & mysql.NotNullFlag) | (rhs.Flag & mysql.NotNullFlag) + if lhs.Tp == mysql.TypeNull && rhs.Tp == mysql.TypeNull { + retTp.Tp = mysql.TypeNull + retTp.Flen, retTp.Decimal = 0, -1 + types.SetBinChsClnFlag(retTp) + } + evalTps := retTp.EvalType() + bf := newBaseBuiltinFuncWithTp(ctx, args, evalTps, evalTps, evalTps) + bf.tp = retTp + switch evalTps { + case types.ETInt: + sig = &builtinIfNullIntSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_IfNullInt) + case types.ETReal: + sig = &builtinIfNullRealSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_IfNullReal) + case types.ETString: + sig = &builtinIfNullStringSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_IfNullString) + } + return sig, nil +} + +type builtinIfNullIntSig struct { + baseBuiltinFunc +} + +func (b *builtinIfNullIntSig) Clone() builtinFunc { + newSig := &builtinIfNullIntSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinIfNullIntSig) evalInt(row chunk.Row) (int64, bool, error) { + arg0, isNull, err := b.args[0].EvalInt(b.ctx, row) + if !isNull || err != nil { + return arg0, err != nil, err + } + arg1, isNull, err := b.args[1].EvalInt(b.ctx, row) + return arg1, isNull || err != nil, err +} + +type builtinIfNullRealSig struct { + baseBuiltinFunc +} + +func (b *builtinIfNullRealSig) Clone() builtinFunc { + newSig := &builtinIfNullRealSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinIfNullRealSig) evalReal(row chunk.Row) (float64, bool, error) { + arg0, isNull, err := b.args[0].EvalReal(b.ctx, row) + if !isNull || err != nil { + return arg0, err != nil, err + } + arg1, isNull, err := b.args[1].EvalReal(b.ctx, row) + return arg1, isNull || err != nil, err +} + +type builtinIfNullStringSig struct { + baseBuiltinFunc +} + +func (b *builtinIfNullStringSig) Clone() builtinFunc { + newSig := &builtinIfNullStringSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinIfNullStringSig) evalString(row chunk.Row) (string, bool, error) { + arg0, isNull, err := b.args[0].EvalString(b.ctx, row) + if !isNull || err != nil { + return arg0, err != nil, err + } + arg1, isNull, err := b.args[1].EvalString(b.ctx, row) + return arg1, isNull || err != nil, err +} diff --git a/expression/builtin_control_vec_generated.go b/expression/builtin_control_vec_generated.go new file mode 100644 index 0000000..73e8e09 --- /dev/null +++ b/expression/builtin_control_vec_generated.go @@ -0,0 +1,261 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by go generate in expression/generator; DO NOT EDIT. + +package expression + +import ( + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" +) + +func (b *builtinIfNullIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + if err := b.args[0].VecEvalInt(b.ctx, input, result); err != nil { + return err + } + buf1, err := b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) + if err := b.args[1].VecEvalInt(b.ctx, input, buf1); err != nil { + return err + } + + arg0 := result.Int64s() + arg1 := buf1.Int64s() + for i := 0; i < n; i++ { + if result.IsNull(i) && !buf1.IsNull(i) { + result.SetNull(i, false) + arg0[i] = arg1[i] + } + } + return nil +} + +func (b *builtinIfNullIntSig) vectorized() bool { + return true +} + +func (b *builtinIfNullRealSig) vecEvalReal(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + if err := b.args[0].VecEvalReal(b.ctx, input, result); err != nil { + return err + } + buf1, err := b.bufAllocator.get(types.ETReal, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) + if err := b.args[1].VecEvalReal(b.ctx, input, buf1); err != nil { + return err + } + + arg0 := result.Float64s() + arg1 := buf1.Float64s() + for i := 0; i < n; i++ { + if result.IsNull(i) && !buf1.IsNull(i) { + result.SetNull(i, false) + arg0[i] = arg1[i] + } + } + return nil +} + +func (b *builtinIfNullRealSig) vectorized() bool { + return true +} + +func (b *builtinIfNullStringSig) vecEvalString(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + buf0, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf0) + if err := b.args[0].VecEvalString(b.ctx, input, buf0); err != nil { + return err + } + buf1, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) + if err := b.args[1].VecEvalString(b.ctx, input, buf1); err != nil { + return err + } + + result.ReserveString(n) + for i := 0; i < n; i++ { + if !buf0.IsNull(i) { + result.AppendString(buf0.GetString(i)) + } else if !buf1.IsNull(i) { + result.AppendString(buf1.GetString(i)) + } else { + result.AppendNull() + } + } + return nil +} + +func (b *builtinIfNullStringSig) vectorized() bool { + return true +} + +func (b *builtinIfIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + buf0, err := b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf0) + if err := b.args[0].VecEvalInt(b.ctx, input, buf0); err != nil { + return err + } + if err := b.args[1].VecEvalInt(b.ctx, input, result); err != nil { + return err + } + buf2, err := b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf2) + if err := b.args[2].VecEvalInt(b.ctx, input, buf2); err != nil { + return err + } + + arg0 := buf0.Int64s() + arg2 := buf2.Int64s() + rs := result.Int64s() + for i := 0; i < n; i++ { + arg := arg0[i] + isNull0 := buf0.IsNull(i) + switch { + case isNull0 || arg == 0: + if buf2.IsNull(i) { + result.SetNull(i, true) + } else { + result.SetNull(i, false) + rs[i] = arg2[i] + } + case arg != 0: + } + } + return nil +} + +func (b *builtinIfIntSig) vectorized() bool { + return true +} + +func (b *builtinIfRealSig) vecEvalReal(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + buf0, err := b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf0) + if err := b.args[0].VecEvalInt(b.ctx, input, buf0); err != nil { + return err + } + if err := b.args[1].VecEvalReal(b.ctx, input, result); err != nil { + return err + } + buf2, err := b.bufAllocator.get(types.ETReal, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf2) + if err := b.args[2].VecEvalReal(b.ctx, input, buf2); err != nil { + return err + } + + arg0 := buf0.Int64s() + arg2 := buf2.Float64s() + rs := result.Float64s() + for i := 0; i < n; i++ { + arg := arg0[i] + isNull0 := buf0.IsNull(i) + switch { + case isNull0 || arg == 0: + if buf2.IsNull(i) { + result.SetNull(i, true) + } else { + result.SetNull(i, false) + rs[i] = arg2[i] + } + case arg != 0: + } + } + return nil +} + +func (b *builtinIfRealSig) vectorized() bool { + return true +} + +func (b *builtinIfStringSig) vecEvalString(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + buf0, err := b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf0) + if err := b.args[0].VecEvalInt(b.ctx, input, buf0); err != nil { + return err + } + buf1, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) + if err := b.args[1].VecEvalString(b.ctx, input, buf1); err != nil { + return err + } + buf2, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf2) + if err := b.args[2].VecEvalString(b.ctx, input, buf2); err != nil { + return err + } + + result.ReserveString(n) + arg0 := buf0.Int64s() + for i := 0; i < n; i++ { + arg := arg0[i] + isNull0 := buf0.IsNull(i) + switch { + case isNull0 || arg == 0: + if buf2.IsNull(i) { + result.AppendNull() + } else { + result.AppendString(buf2.GetString(i)) + } + case arg != 0: + if buf1.IsNull(i) { + result.AppendNull() + } else { + result.AppendString(buf1.GetString(i)) + } + } + } + return nil +} + +func (b *builtinIfStringSig) vectorized() bool { + return true +} diff --git a/expression/builtin_control_vec_generated_test.go b/expression/builtin_control_vec_generated_test.go new file mode 100644 index 0000000..6f88949 --- /dev/null +++ b/expression/builtin_control_vec_generated_test.go @@ -0,0 +1,76 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by go generate in expression/generator; DO NOT EDIT. + +package expression + +import ( + "math/rand" + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/types" +) + +var defaultControlIntGener = &controlIntGener{zeroRation: 0.3, defaultGener: defaultGener{0.3, types.ETInt}} + +type controlIntGener struct { + zeroRation float64 + defaultGener +} + +func (g *controlIntGener) gen() interface{} { + if rand.Float64() < g.zeroRation { + return int64(0) + } + return g.defaultGener.gen() +} + +var vecBuiltinControlCases = map[string][]vecExprBenchCase{ + + ast.Ifnull: { + + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt}}, + + {retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETReal, types.ETReal}}, + + {retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString, types.ETString}}, + }, + + ast.If: { + + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETInt}, geners: []dataGenerator{defaultControlIntGener}}, + + {retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETReal}, geners: []dataGenerator{defaultControlIntGener}}, + + {retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, geners: []dataGenerator{defaultControlIntGener}}, + }, +} + +func (s *testEvaluatorSuite) TestVectorizedBuiltinControlEvalOneVecGenerated(c *C) { + testVectorizedEvalOneVec(c, vecBuiltinControlCases) +} + +func (s *testEvaluatorSuite) TestVectorizedBuiltinControlFuncGenerated(c *C) { + testVectorizedBuiltinFunc(c, vecBuiltinControlCases) +} + +func BenchmarkVectorizedBuiltinControlEvalOneVecGenerated(b *testing.B) { + benchmarkVectorizedEvalOneVec(b, vecBuiltinControlCases) +} + +func BenchmarkVectorizedBuiltinControlFuncGenerated(b *testing.B) { + benchmarkVectorizedBuiltinFunc(b, vecBuiltinControlCases) +} diff --git a/expression/builtin_op.go b/expression/builtin_op.go new file mode 100644 index 0000000..8a7ee37 --- /dev/null +++ b/expression/builtin_op.go @@ -0,0 +1,383 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "fmt" + "math" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tipb/go-tipb" +) + +var ( + _ functionClass = &logicAndFunctionClass{} + _ functionClass = &logicOrFunctionClass{} + _ functionClass = &unaryMinusFunctionClass{} + _ functionClass = &isNullFunctionClass{} + _ functionClass = &unaryNotFunctionClass{} +) + +var ( + _ builtinFunc = &builtinLogicAndSig{} + _ builtinFunc = &builtinLogicOrSig{} + _ builtinFunc = &builtinUnaryMinusIntSig{} + _ builtinFunc = &builtinIntIsNullSig{} + _ builtinFunc = &builtinRealIsNullSig{} + _ builtinFunc = &builtinStringIsNullSig{} + _ builtinFunc = &builtinUnaryNotRealSig{} + _ builtinFunc = &builtinUnaryNotIntSig{} +) + +type logicAndFunctionClass struct { + baseFunctionClass +} + +func (c *logicAndFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) { + err := c.verifyArgs(args) + if err != nil { + return nil, err + } + + bf := newBaseBuiltinFuncWithTp(ctx, args, types.ETInt, types.ETInt, types.ETInt) + sig := &builtinLogicAndSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_LogicalAnd) + sig.tp.Flen = 1 + return sig, nil +} + +type builtinLogicAndSig struct { + baseBuiltinFunc +} + +func (b *builtinLogicAndSig) Clone() builtinFunc { + newSig := &builtinLogicAndSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinLogicAndSig) evalInt(row chunk.Row) (int64, bool, error) { + arg0, isNull0, err := b.args[0].EvalInt(b.ctx, row) + if err != nil || (!isNull0 && arg0 == 0) { + return 0, err != nil, err + } + arg1, isNull1, err := b.args[1].EvalInt(b.ctx, row) + if err != nil || (!isNull1 && arg1 == 0) { + return 0, err != nil, err + } + if isNull0 || isNull1 { + return 0, true, nil + } + return 1, false, nil +} + +type logicOrFunctionClass struct { + baseFunctionClass +} + +func (c *logicOrFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) { + err := c.verifyArgs(args) + if err != nil { + return nil, err + } + + bf := newBaseBuiltinFuncWithTp(ctx, args, types.ETInt, types.ETInt, types.ETInt) + bf.tp.Flen = 1 + sig := &builtinLogicOrSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_LogicalOr) + return sig, nil +} + +type builtinLogicOrSig struct { + baseBuiltinFunc +} + +func (b *builtinLogicOrSig) Clone() builtinFunc { + newSig := &builtinLogicOrSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinLogicOrSig) evalInt(row chunk.Row) (int64, bool, error) { + arg0, isNull0, err := b.args[0].EvalInt(b.ctx, row) + if err != nil { + return 0, true, err + } + if !isNull0 && arg0 != 0 { + return 1, false, nil + } + arg1, isNull1, err := b.args[1].EvalInt(b.ctx, row) + if err != nil { + return 0, true, err + } + if !isNull1 && arg1 != 0 { + return 1, false, nil + } + if isNull0 || isNull1 { + return 0, true, nil + } + return 0, false, nil +} + +type unaryNotFunctionClass struct { + baseFunctionClass +} + +func (c *unaryNotFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) { + if err := c.verifyArgs(args); err != nil { + return nil, err + } + + argTp := args[0].GetType().EvalType() + if argTp == types.ETString { + argTp = types.ETReal + } + + bf := newBaseBuiltinFuncWithTp(ctx, args, types.ETInt, argTp) + bf.tp.Flen = 1 + + var sig builtinFunc + switch argTp { + case types.ETReal: + sig = &builtinUnaryNotRealSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_UnaryNotReal) + case types.ETInt: + sig = &builtinUnaryNotIntSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_UnaryNotInt) + default: + return nil, errors.Errorf("unexpected types.EvalType %v", argTp) + } + return sig, nil +} + +type builtinUnaryNotRealSig struct { + baseBuiltinFunc +} + +func (b *builtinUnaryNotRealSig) Clone() builtinFunc { + newSig := &builtinUnaryNotRealSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinUnaryNotRealSig) evalInt(row chunk.Row) (int64, bool, error) { + arg, isNull, err := b.args[0].EvalReal(b.ctx, row) + if isNull || err != nil { + return 0, true, err + } + if arg == 0 { + return 1, false, nil + } + return 0, false, nil +} + +type builtinUnaryNotIntSig struct { + baseBuiltinFunc +} + +func (b *builtinUnaryNotIntSig) Clone() builtinFunc { + newSig := &builtinUnaryNotIntSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinUnaryNotIntSig) evalInt(row chunk.Row) (int64, bool, error) { + arg, isNull, err := b.args[0].EvalInt(b.ctx, row) + if isNull || err != nil { + return 0, true, err + } + if arg == 0 { + return 1, false, nil + } + return 0, false, nil +} + +type unaryMinusFunctionClass struct { + baseFunctionClass +} + +// typeInfer infers unaryMinus function return type. when the arg is an int constant and overflow, +// typerInfer will infers the return type as types.ETDecimal, not types.ETInt. +func (c *unaryMinusFunctionClass) typeInfer(argExpr Expression) (types.EvalType, bool) { + tp := argExpr.GetType().EvalType() + if tp != types.ETInt { + tp = types.ETReal + } + + return tp, false +} + +func (c *unaryMinusFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (sig builtinFunc, err error) { + if err = c.verifyArgs(args); err != nil { + return nil, err + } + + argExpr, argExprTp := args[0], args[0].GetType() + _, intOverflow := c.typeInfer(argExpr) + + var bf baseBuiltinFunc + switch argExprTp.EvalType() { + case types.ETInt: + if intOverflow { + panic("overflows int") + } else { + bf = newBaseBuiltinFuncWithTp(ctx, args, types.ETInt, types.ETInt) + sig = &builtinUnaryMinusIntSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_UnaryMinusInt) + } + bf.tp.Decimal = 0 + case types.ETReal: + bf = newBaseBuiltinFuncWithTp(ctx, args, types.ETReal, types.ETReal) + sig = &builtinUnaryMinusRealSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_UnaryMinusReal) + default: + bf = newBaseBuiltinFuncWithTp(ctx, args, types.ETReal, types.ETReal) + sig = &builtinUnaryMinusRealSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_UnaryMinusReal) + } + bf.tp.Flen = argExprTp.Flen + 1 + return sig, err +} + +type builtinUnaryMinusIntSig struct { + baseBuiltinFunc +} + +func (b *builtinUnaryMinusIntSig) Clone() builtinFunc { + newSig := &builtinUnaryMinusIntSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinUnaryMinusIntSig) evalInt(row chunk.Row) (res int64, isNull bool, err error) { + var val int64 + val, isNull, err = b.args[0].EvalInt(b.ctx, row) + if err != nil || isNull { + return val, isNull, err + } + + if mysql.HasUnsignedFlag(b.args[0].GetType().Flag) { + uval := uint64(val) + if uval > uint64(-math.MinInt64) { + return 0, false, types.ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("-%v", uval)) + } else if uval == uint64(-math.MinInt64) { + return math.MinInt64, false, nil + } + } else if val == math.MinInt64 { + return 0, false, types.ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("-%v", val)) + } + return -val, false, nil +} + +type builtinUnaryMinusRealSig struct { + baseBuiltinFunc +} + +func (b *builtinUnaryMinusRealSig) Clone() builtinFunc { + newSig := &builtinUnaryMinusRealSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinUnaryMinusRealSig) evalReal(row chunk.Row) (float64, bool, error) { + val, isNull, err := b.args[0].EvalReal(b.ctx, row) + return -val, isNull, err +} + +type isNullFunctionClass struct { + baseFunctionClass +} + +func (c *isNullFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) { + if err := c.verifyArgs(args); err != nil { + return nil, err + } + argTp := args[0].GetType().EvalType() + bf := newBaseBuiltinFuncWithTp(ctx, args, types.ETInt, argTp) + bf.tp.Flen = 1 + var sig builtinFunc + switch argTp { + case types.ETInt: + sig = &builtinIntIsNullSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_IntIsNull) + case types.ETReal: + sig = &builtinRealIsNullSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_RealIsNull) + case types.ETString: + sig = &builtinStringIsNullSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_StringIsNull) + default: + panic("unexpected types.EvalType") + } + return sig, nil +} + +func evalIsNull(isNull bool, err error) (int64, bool, error) { + if err != nil { + return 0, true, err + } + if isNull { + return 1, false, nil + } + return 0, false, nil +} + +type builtinIntIsNullSig struct { + baseBuiltinFunc +} + +func (b *builtinIntIsNullSig) Clone() builtinFunc { + newSig := &builtinIntIsNullSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinIntIsNullSig) evalInt(row chunk.Row) (int64, bool, error) { + _, isNull, err := b.args[0].EvalInt(b.ctx, row) + return evalIsNull(isNull, err) +} + +type builtinRealIsNullSig struct { + baseBuiltinFunc +} + +func (b *builtinRealIsNullSig) Clone() builtinFunc { + newSig := &builtinRealIsNullSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinRealIsNullSig) evalInt(row chunk.Row) (int64, bool, error) { + _, isNull, err := b.args[0].EvalReal(b.ctx, row) + return evalIsNull(isNull, err) +} + +type builtinStringIsNullSig struct { + baseBuiltinFunc +} + +func (b *builtinStringIsNullSig) Clone() builtinFunc { + newSig := &builtinStringIsNullSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinStringIsNullSig) evalInt(row chunk.Row) (int64, bool, error) { + _, isNull, err := b.args[0].EvalString(b.ctx, row) + return evalIsNull(isNull, err) +} diff --git a/expression/builtin_op_vec.go b/expression/builtin_op_vec.go new file mode 100644 index 0000000..82bdee2 --- /dev/null +++ b/expression/builtin_op_vec.go @@ -0,0 +1,267 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "fmt" + "math" + + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" +) + +func (b *builtinLogicOrSig) vectorized() bool { + return true +} + +func (b *builtinLogicOrSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + if err := b.args[0].VecEvalInt(b.ctx, input, result); err != nil { + return err + } + + n := input.NumRows() + buf, err := b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf) + if err := b.args[1].VecEvalInt(b.ctx, input, buf); err != nil { + return err + } + + i64s := result.Int64s() + arg1s := buf.Int64s() + + for i := 0; i < n; i++ { + isNull0 := result.IsNull(i) + isNull1 := buf.IsNull(i) + // Because buf is used to store the conversion of args[0] in place, it could + // be that args[0] is null and args[1] is nonzero, in which case the result + // is 1. In these cases, we need to clear the null bit mask of the corresponding + // row in result. + // See https://dev.mysql.com/doc/refman/5.7/en/logical-operators.html#operator_or + isNull := false + if (!isNull0 && i64s[i] != 0) || (!isNull1 && arg1s[i] != 0) { + i64s[i] = 1 + } else if isNull0 || isNull1 { + isNull = true + } else { + i64s[i] = 0 + } + if isNull != isNull0 { + result.SetNull(i, isNull) + } + } + return nil +} + +func (b *builtinUnaryMinusRealSig) vectorized() bool { + return true +} + +func (b *builtinUnaryMinusRealSig) vecEvalReal(input *chunk.Chunk, result *chunk.Column) error { + var err error + if err = b.args[0].VecEvalReal(b.ctx, input, result); err != nil { + return err + } + + n := input.NumRows() + f64s := result.Float64s() + for i := 0; i < n; i++ { + f64s[i] = -f64s[i] + } + return nil +} + +func (b *builtinIntIsNullSig) vectorized() bool { + return true +} + +func (b *builtinIntIsNullSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + if err := b.args[0].VecEvalInt(b.ctx, input, result); err != nil { + return err + } + + i64s := result.Int64s() + for i := 0; i < len(i64s); i++ { + if result.IsNull(i) { + i64s[i] = 1 + result.SetNull(i, false) + } else { + i64s[i] = 0 + } + } + return nil +} + +func (b *builtinRealIsNullSig) vectorized() bool { + return true +} + +func (b *builtinRealIsNullSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + numRows := input.NumRows() + buf, err := b.bufAllocator.get(types.ETReal, numRows) + if err != nil { + return err + } + defer b.bufAllocator.put(buf) + + if err := b.args[0].VecEvalReal(b.ctx, input, buf); err != nil { + return err + } + + result.ResizeInt64(numRows, false) + i64s := result.Int64s() + for i := 0; i < numRows; i++ { + if buf.IsNull(i) { + i64s[i] = 1 + } else { + i64s[i] = 0 + } + } + return nil +} + +func (b *builtinUnaryNotRealSig) vectorized() bool { + return true +} + +func (b *builtinUnaryNotRealSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + buf, err := b.bufAllocator.get(types.ETReal, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf) + if err := b.args[0].VecEvalReal(b.ctx, input, buf); err != nil { + return err + } + f64s := buf.Float64s() + + result.ResizeInt64(n, false) + result.MergeNulls(buf) + i64s := result.Int64s() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + if f64s[i] == 0 { + i64s[i] = 1 + } else { + i64s[i] = 0 + } + } + return nil +} + +func (b *builtinLogicAndSig) vectorized() bool { + return true +} + +func (b *builtinLogicAndSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + + if err := b.args[0].VecEvalInt(b.ctx, input, result); err != nil { + return err + } + + buf1, err := b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) + if err := b.args[1].VecEvalInt(b.ctx, input, buf1); err != nil { + return err + } + + i64s := result.Int64s() + arg1 := buf1.Int64s() + + for i := 0; i < n; i++ { + isNull0 := result.IsNull(i) + if !isNull0 && i64s[i] == 0 { + result.SetNull(i, false) + continue + } + + isNull1 := buf1.IsNull(i) + if !isNull1 && arg1[i] == 0 { + i64s[i] = 0 + result.SetNull(i, false) + continue + } + + if isNull0 || isNull1 { + result.SetNull(i, true) + continue + } + + i64s[i] = 1 + } + + return nil +} + +func (b *builtinUnaryMinusIntSig) vectorized() bool { + return true +} + +func (b *builtinUnaryMinusIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + if err := b.args[0].VecEvalInt(b.ctx, input, result); err != nil { + return err + } + n := input.NumRows() + args := result.Int64s() + if mysql.HasUnsignedFlag(b.args[0].GetType().Flag) { + for i := 0; i < n; i++ { + if uint64(args[i]) > uint64(-math.MinInt64) { + return types.ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("-%v", uint64(args[i]))) + } + args[i] = -args[i] + } + } else { + for i := 0; i < n; i++ { + if args[i] == math.MinInt64 { + return types.ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("-%v", args[i])) + } + args[i] = -args[i] + } + } + return nil +} + +func (b *builtinUnaryNotIntSig) vectorized() bool { + return true +} + +func (b *builtinUnaryNotIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + if err := b.args[0].VecEvalInt(b.ctx, input, result); err != nil { + return err + } + + i64s := result.Int64s() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + if i64s[i] == 0 { + i64s[i] = 1 + } else { + i64s[i] = 0 + } + } + return nil +} diff --git a/expression/builtin_op_vec_test.go b/expression/builtin_op_vec_test.go new file mode 100644 index 0000000..2589c14 --- /dev/null +++ b/expression/builtin_op_vec_test.go @@ -0,0 +1,118 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "math" + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" +) + +var vecBuiltinOpCases = map[string][]vecExprBenchCase{ + ast.LogicOr: { + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt}, geners: makeBinaryLogicOpDataGeners()}, + }, + ast.LogicAnd: { + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt}, geners: makeBinaryLogicOpDataGeners()}, + }, + ast.UnaryNot: { + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETReal}}, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt}}, + }, + ast.UnaryMinus: { + {retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETReal}}, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt}}, + { + retEvalType: types.ETInt, + childrenTypes: []types.EvalType{types.ETInt}, + childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeLonglong, Flag: mysql.UnsignedFlag}}, + geners: []dataGenerator{&rangeInt64Gener{0, math.MaxInt64}}, + }, + }, + ast.IsNull: { + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETReal}}, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt}}, + }, +} + +// givenValsGener returns the items sequentially from the slice given at +// the construction time. If this slice is exhausted, it falls back to +// the fallback generator. +type givenValsGener struct { + given []interface{} + idx int + fallback dataGenerator +} + +func (g *givenValsGener) gen() interface{} { + if g.idx >= len(g.given) { + return g.fallback.gen() + } + v := g.given[g.idx] + g.idx++ + return v +} + +func makeGivenValsOrDefaultGener(vals []interface{}, eType types.EvalType) *givenValsGener { + g := &givenValsGener{} + g.given = vals + g.fallback = &defaultGener{0.2, eType} + return g +} + +func makeBinaryLogicOpDataGeners() []dataGenerator { + // TODO: rename this to makeBinaryOpDataGenerator, since the BIT ops are also using it? + pairs := [][]interface{}{ + {nil, nil}, + {0, nil}, + {nil, 0}, + {1, nil}, + {nil, 1}, + {0, 0}, + {0, 1}, + {1, 0}, + {1, 1}, + {-1, 1}, + } + + maybeToInt64 := func(v interface{}) interface{} { + if v == nil { + return nil + } + return int64(v.(int)) + } + + n := len(pairs) + arg0s := make([]interface{}, n) + arg1s := make([]interface{}, n) + for i, p := range pairs { + arg0s[i] = maybeToInt64(p[0]) + arg1s[i] = maybeToInt64(p[1]) + } + return []dataGenerator{ + makeGivenValsOrDefaultGener(arg0s, types.ETInt), + makeGivenValsOrDefaultGener(arg1s, types.ETInt)} +} + +func (s *testEvaluatorSuite) TestVectorizedBuiltinOpFunc(c *C) { + testVectorizedBuiltinFunc(c, vecBuiltinOpCases) +} + +func BenchmarkVectorizedBuiltinOpFunc(b *testing.B) { + benchmarkVectorizedBuiltinFunc(b, vecBuiltinOpCases) +} diff --git a/expression/builtin_other.go b/expression/builtin_other.go new file mode 100644 index 0000000..a65aa5a --- /dev/null +++ b/expression/builtin_other.go @@ -0,0 +1,433 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "strings" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/stringutil" + "github.com/pingcap/tipb/go-tipb" +) + +var ( + _ functionClass = &inFunctionClass{} + _ functionClass = &rowFunctionClass{} + _ functionClass = &setVarFunctionClass{} + _ functionClass = &getVarFunctionClass{} + _ functionClass = &valuesFunctionClass{} +) + +var ( + _ builtinFunc = &builtinInIntSig{} + _ builtinFunc = &builtinInStringSig{} + _ builtinFunc = &builtinInRealSig{} + _ builtinFunc = &builtinRowSig{} + _ builtinFunc = &builtinSetVarSig{} + _ builtinFunc = &builtinGetVarSig{} + _ builtinFunc = &builtinValuesIntSig{} + _ builtinFunc = &builtinValuesRealSig{} + _ builtinFunc = &builtinValuesStringSig{} +) + +type inFunctionClass struct { + baseFunctionClass +} + +func (c *inFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (sig builtinFunc, err error) { + if err := c.verifyArgs(args); err != nil { + return nil, err + } + argTps := make([]types.EvalType, len(args)) + for i := range args { + argTps[i] = args[0].GetType().EvalType() + } + bf := newBaseBuiltinFuncWithTp(ctx, args, types.ETInt, argTps...) + bf.tp.Flen = 1 + switch args[0].GetType().EvalType() { + case types.ETInt: + sig = &builtinInIntSig{baseBuiltinFunc: bf} + sig.setPbCode(tipb.ScalarFuncSig_InInt) + case types.ETString: + sig = &builtinInStringSig{baseBuiltinFunc: bf} + sig.setPbCode(tipb.ScalarFuncSig_InString) + case types.ETReal: + sig = &builtinInRealSig{baseBuiltinFunc: bf} + sig.setPbCode(tipb.ScalarFuncSig_InReal) + } + return sig, nil +} + +// builtinInIntSig see https://dev.mysql.com/doc/refman/5.7/en/comparison-operators.html#function_in +type builtinInIntSig struct { + baseBuiltinFunc +} + +func (b *builtinInIntSig) Clone() builtinFunc { + newSig := &builtinInIntSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinInIntSig) evalInt(row chunk.Row) (int64, bool, error) { + arg0, isNull0, err := b.args[0].EvalInt(b.ctx, row) + if isNull0 || err != nil { + return 0, isNull0, err + } + isUnsigned0 := mysql.HasUnsignedFlag(b.args[0].GetType().Flag) + var hasNull bool + for _, arg := range b.args[1:] { + evaledArg, isNull, err := arg.EvalInt(b.ctx, row) + if err != nil { + return 0, true, err + } + if isNull { + hasNull = true + continue + } + isUnsigned := mysql.HasUnsignedFlag(arg.GetType().Flag) + if isUnsigned0 && isUnsigned { + if evaledArg == arg0 { + return 1, false, nil + } + } else if !isUnsigned0 && !isUnsigned { + if evaledArg == arg0 { + return 1, false, nil + } + } else if !isUnsigned0 && isUnsigned { + if arg0 >= 0 && evaledArg == arg0 { + return 1, false, nil + } + } else { + if evaledArg >= 0 && evaledArg == arg0 { + return 1, false, nil + } + } + } + return 0, hasNull, nil +} + +// builtinInStringSig see https://dev.mysql.com/doc/refman/5.7/en/comparison-operators.html#function_in +type builtinInStringSig struct { + baseBuiltinFunc +} + +func (b *builtinInStringSig) Clone() builtinFunc { + newSig := &builtinInStringSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinInStringSig) evalInt(row chunk.Row) (int64, bool, error) { + arg0, isNull0, err := b.args[0].EvalString(b.ctx, row) + if isNull0 || err != nil { + return 0, isNull0, err + } + var hasNull bool + for _, arg := range b.args[1:] { + evaledArg, isNull, err := arg.EvalString(b.ctx, row) + if err != nil { + return 0, true, err + } + if isNull { + hasNull = true + continue + } + if arg0 == evaledArg { + return 1, false, nil + } + } + return 0, hasNull, nil +} + +// builtinInRealSig see https://dev.mysql.com/doc/refman/5.7/en/comparison-operators.html#function_in +type builtinInRealSig struct { + baseBuiltinFunc +} + +func (b *builtinInRealSig) Clone() builtinFunc { + newSig := &builtinInRealSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinInRealSig) evalInt(row chunk.Row) (int64, bool, error) { + arg0, isNull0, err := b.args[0].EvalReal(b.ctx, row) + if isNull0 || err != nil { + return 0, isNull0, err + } + var hasNull bool + for _, arg := range b.args[1:] { + evaledArg, isNull, err := arg.EvalReal(b.ctx, row) + if err != nil { + return 0, true, err + } + if isNull { + hasNull = true + continue + } + if arg0 == evaledArg { + return 1, false, nil + } + } + return 0, hasNull, nil +} + +type rowFunctionClass struct { + baseFunctionClass +} + +func (c *rowFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (sig builtinFunc, err error) { + if err = c.verifyArgs(args); err != nil { + return nil, err + } + argTps := make([]types.EvalType, len(args)) + for i := range argTps { + argTps[i] = args[i].GetType().EvalType() + } + bf := newBaseBuiltinFuncWithTp(ctx, args, types.ETString, argTps...) + sig = &builtinRowSig{bf} + return sig, nil +} + +type builtinRowSig struct { + baseBuiltinFunc +} + +func (b *builtinRowSig) Clone() builtinFunc { + newSig := &builtinRowSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +// evalString rowFunc should always be flattened in expression rewrite phrase. +func (b *builtinRowSig) evalString(row chunk.Row) (string, bool, error) { + panic("builtinRowSig.evalString() should never be called.") +} + +type setVarFunctionClass struct { + baseFunctionClass +} + +func (c *setVarFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (sig builtinFunc, err error) { + if err = c.verifyArgs(args); err != nil { + return nil, err + } + bf := newBaseBuiltinFuncWithTp(ctx, args, types.ETString, types.ETString, types.ETString) + bf.tp.Flen = args[1].GetType().Flen + // TODO: we should consider the type of the argument, but not take it as string for all situations. + sig = &builtinSetVarSig{bf} + return sig, err +} + +type builtinSetVarSig struct { + baseBuiltinFunc +} + +func (b *builtinSetVarSig) Clone() builtinFunc { + newSig := &builtinSetVarSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinSetVarSig) evalString(row chunk.Row) (res string, isNull bool, err error) { + var varName string + sessionVars := b.ctx.GetSessionVars() + varName, isNull, err = b.args[0].EvalString(b.ctx, row) + if isNull || err != nil { + return "", isNull, err + } + res, isNull, err = b.args[1].EvalString(b.ctx, row) + if isNull || err != nil { + return "", isNull, err + } + varName = strings.ToLower(varName) + sessionVars.UsersLock.Lock() + sessionVars.Users[varName] = stringutil.Copy(res) + sessionVars.UsersLock.Unlock() + return res, false, nil +} + +type getVarFunctionClass struct { + baseFunctionClass +} + +func (c *getVarFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (sig builtinFunc, err error) { + if err = c.verifyArgs(args); err != nil { + return nil, err + } + // TODO: we should consider the type of the argument, but not take it as string for all situations. + bf := newBaseBuiltinFuncWithTp(ctx, args, types.ETString, types.ETString) + bf.tp.Flen = mysql.MaxFieldVarCharLength + sig = &builtinGetVarSig{bf} + return sig, nil +} + +type builtinGetVarSig struct { + baseBuiltinFunc +} + +func (b *builtinGetVarSig) Clone() builtinFunc { + newSig := &builtinGetVarSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinGetVarSig) evalString(row chunk.Row) (string, bool, error) { + sessionVars := b.ctx.GetSessionVars() + varName, isNull, err := b.args[0].EvalString(b.ctx, row) + if isNull || err != nil { + return "", isNull, err + } + varName = strings.ToLower(varName) + sessionVars.UsersLock.RLock() + defer sessionVars.UsersLock.RUnlock() + if v, ok := sessionVars.Users[varName]; ok { + return v, false, nil + } + return "", true, nil +} + +type valuesFunctionClass struct { + baseFunctionClass + + offset int + tp *types.FieldType +} + +func (c *valuesFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (sig builtinFunc, err error) { + if err = c.verifyArgs(args); err != nil { + return nil, err + } + bf := newBaseBuiltinFunc(ctx, args) + bf.tp = c.tp + switch c.tp.EvalType() { + case types.ETInt: + sig = &builtinValuesIntSig{bf, c.offset} + case types.ETReal: + sig = &builtinValuesRealSig{bf, c.offset} + case types.ETString: + sig = &builtinValuesStringSig{bf, c.offset} + } + return sig, nil +} + +type builtinValuesIntSig struct { + baseBuiltinFunc + + offset int +} + +func (b *builtinValuesIntSig) Clone() builtinFunc { + newSig := &builtinValuesIntSig{offset: b.offset} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +// evalInt evals a builtinValuesIntSig. +// See https://dev.mysql.com/doc/refman/5.7/en/miscellaneous-functions.html#function_values +func (b *builtinValuesIntSig) evalInt(_ chunk.Row) (int64, bool, error) { + if !b.ctx.GetSessionVars().StmtCtx.InInsertStmt { + return 0, true, nil + } + row := b.ctx.GetSessionVars().CurrInsertValues + if row.IsEmpty() { + return 0, true, errors.New("Session current insert values is nil") + } + if b.offset < row.Len() { + if row.IsNull(b.offset) { + return 0, true, nil + } + return row.GetInt64(b.offset), false, nil + } + return 0, true, errors.Errorf("Session current insert values len %d and column's offset %v don't match", row.Len(), b.offset) +} + +type builtinValuesRealSig struct { + baseBuiltinFunc + + offset int +} + +func (b *builtinValuesRealSig) Clone() builtinFunc { + newSig := &builtinValuesRealSig{offset: b.offset} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +// evalReal evals a builtinValuesRealSig. +// See https://dev.mysql.com/doc/refman/5.7/en/miscellaneous-functions.html#function_values +func (b *builtinValuesRealSig) evalReal(_ chunk.Row) (float64, bool, error) { + if !b.ctx.GetSessionVars().StmtCtx.InInsertStmt { + return 0, true, nil + } + row := b.ctx.GetSessionVars().CurrInsertValues + if row.IsEmpty() { + return 0, true, errors.New("Session current insert values is nil") + } + if b.offset < row.Len() { + if row.IsNull(b.offset) { + return 0, true, nil + } + if b.getRetTp().Tp == mysql.TypeFloat { + return float64(row.GetFloat32(b.offset)), false, nil + } + return row.GetFloat64(b.offset), false, nil + } + return 0, true, errors.Errorf("Session current insert values len %d and column's offset %v don't match", row.Len(), b.offset) +} + +type builtinValuesStringSig struct { + baseBuiltinFunc + + offset int +} + +func (b *builtinValuesStringSig) Clone() builtinFunc { + newSig := &builtinValuesStringSig{offset: b.offset} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +// evalString evals a builtinValuesStringSig. +// See https://dev.mysql.com/doc/refman/5.7/en/miscellaneous-functions.html#function_values +func (b *builtinValuesStringSig) evalString(_ chunk.Row) (string, bool, error) { + if !b.ctx.GetSessionVars().StmtCtx.InInsertStmt { + return "", true, nil + } + row := b.ctx.GetSessionVars().CurrInsertValues + if row.IsEmpty() { + return "", true, errors.New("Session current insert values is nil") + } + if b.offset >= row.Len() { + return "", true, errors.Errorf("Session current insert values len %d and column's offset %v don't match", row.Len(), b.offset) + } + + if row.IsNull(b.offset) { + return "", true, nil + } + + // Specially handle the ENUM/SET/BIT input value. + if retType := b.getRetTp(); retType.Hybrid() { + val := row.GetDatum(b.offset, retType) + res, err := val.ToString() + return res, err != nil, err + } + + return row.GetString(b.offset), false, nil +} diff --git a/expression/builtin_other_test.go b/expression/builtin_other_test.go new file mode 100644 index 0000000..6e99b67 --- /dev/null +++ b/expression/builtin_other_test.go @@ -0,0 +1,196 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/hack" + "math" +) + +func (s *testEvaluatorSuite) TestInFunc(c *C) { + fc := funcs[ast.In] + testCases := []struct { + args []interface{} + res interface{} + }{ + {[]interface{}{1, 1, 2, 3}, int64(1)}, + {[]interface{}{1, 0, 2, 3}, int64(0)}, + {[]interface{}{1, nil, 2, 3}, nil}, + {[]interface{}{nil, nil, 2, 3}, nil}, + {[]interface{}{uint64(0), 0, 2, 3}, int64(1)}, + {[]interface{}{uint64(math.MaxUint64), uint64(math.MaxUint64), 2, 3}, int64(1)}, + {[]interface{}{-1, uint64(math.MaxUint64), 2, 3}, int64(0)}, + {[]interface{}{uint64(math.MaxUint64), -1, 2, 3}, int64(0)}, + {[]interface{}{1, 0, 2, 3}, int64(0)}, + {[]interface{}{1.1, 1.2, 1.3}, int64(0)}, + {[]interface{}{1.1, 1.1, 1.2, 1.3}, int64(1)}, + {[]interface{}{"1.1", "1.1", "1.2", "1.3"}, int64(1)}, + {[]interface{}{"1.1", hack.Slice("1.1"), "1.2", "1.3"}, int64(1)}, + {[]interface{}{hack.Slice("1.1"), "1.1", "1.2", "1.3"}, int64(1)}, + } + for _, tc := range testCases { + fn, err := fc.getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(tc.args...))) + c.Assert(err, IsNil) + d, err := evalBuiltinFunc(fn, chunk.MutRowFromDatums(types.MakeDatums(tc.args...)).ToRow()) + c.Assert(err, IsNil) + c.Assert(d.GetValue(), Equals, tc.res, Commentf("%v", types.MakeDatums(tc.args))) + } +} + +func (s *testEvaluatorSuite) TestRowFunc(c *C) { + fc := funcs[ast.RowFunc] + _, err := fc.getFunction(s.ctx, s.datumsToConstants(types.MakeDatums([]interface{}{"1", 1.2, true, 120}...))) + c.Assert(err, IsNil) +} + +func (s *testEvaluatorSuite) TestSetVar(c *C) { + fc := funcs[ast.SetVar] + testCases := []struct { + args []interface{} + res interface{} + }{ + {[]interface{}{"a", "12"}, "12"}, + {[]interface{}{"b", "34"}, "34"}, + {[]interface{}{"c", nil}, ""}, + {[]interface{}{"c", "ABC"}, "ABC"}, + {[]interface{}{"c", "dEf"}, "dEf"}, + } + for _, tc := range testCases { + fn, err := fc.getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(tc.args...))) + c.Assert(err, IsNil) + d, err := evalBuiltinFunc(fn, chunk.MutRowFromDatums(types.MakeDatums(tc.args...)).ToRow()) + c.Assert(err, IsNil) + c.Assert(d.GetString(), Equals, tc.res) + if tc.args[1] != nil { + key, ok := tc.args[0].(string) + c.Assert(ok, Equals, true) + val, ok := tc.res.(string) + c.Assert(ok, Equals, true) + c.Assert(s.ctx.GetSessionVars().Users[key], Equals, val) + } + } +} + +func (s *testEvaluatorSuite) TestGetVar(c *C) { + fc := funcs[ast.GetVar] + + sessionVars := []struct { + key string + val string + }{ + {"a", "中"}, + {"b", "文字符chuan"}, + {"c", ""}, + } + for _, kv := range sessionVars { + s.ctx.GetSessionVars().Users[kv.key] = kv.val + } + + testCases := []struct { + args []interface{} + res interface{} + }{ + {[]interface{}{"a"}, "中"}, + {[]interface{}{"b"}, "文字符chuan"}, + {[]interface{}{"c"}, ""}, + {[]interface{}{"d"}, ""}, + } + for _, tc := range testCases { + fn, err := fc.getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(tc.args...))) + c.Assert(err, IsNil) + d, err := evalBuiltinFunc(fn, chunk.MutRowFromDatums(types.MakeDatums(tc.args...)).ToRow()) + c.Assert(err, IsNil) + c.Assert(d.GetString(), Equals, tc.res) + } +} + +func (s *testEvaluatorSuite) TestValues(c *C) { + origin := s.ctx.GetSessionVars().StmtCtx.InInsertStmt + s.ctx.GetSessionVars().StmtCtx.InInsertStmt = false + defer func() { + s.ctx.GetSessionVars().StmtCtx.InInsertStmt = origin + }() + + fc := &valuesFunctionClass{baseFunctionClass{ast.Values, 0, 0}, 1, types.NewFieldType(mysql.TypeVarchar)} + _, err := fc.getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(""))) + c.Assert(err, ErrorMatches, "*Incorrect parameter count in the call to native function 'values'") + + sig, err := fc.getFunction(s.ctx, s.datumsToConstants(types.MakeDatums())) + c.Assert(err, IsNil) + + ret, err := evalBuiltinFunc(sig, chunk.Row{}) + c.Assert(err, IsNil) + c.Assert(ret.IsNull(), IsTrue) + + s.ctx.GetSessionVars().CurrInsertValues = chunk.MutRowFromDatums(types.MakeDatums("1")).ToRow() + ret, err = evalBuiltinFunc(sig, chunk.Row{}) + c.Assert(err, IsNil) + c.Assert(ret.IsNull(), IsTrue) + + currInsertValues := types.MakeDatums("1", "2") + s.ctx.GetSessionVars().StmtCtx.InInsertStmt = true + s.ctx.GetSessionVars().CurrInsertValues = chunk.MutRowFromDatums(currInsertValues).ToRow() + ret, err = evalBuiltinFunc(sig, chunk.Row{}) + c.Assert(err, IsNil) + + cmp, err := ret.CompareDatum(nil, &currInsertValues[1]) + c.Assert(err, IsNil) + c.Assert(cmp, Equals, 0) +} + +func (s *testEvaluatorSuite) TestSetVarFromColumn(c *C) { + // Construct arguments. + argVarName := &Constant{ + Value: types.NewStringDatum("a"), + RetType: &types.FieldType{Tp: mysql.TypeVarString, Flen: 20}, + } + argCol := &Column{ + RetType: &types.FieldType{Tp: mysql.TypeVarString, Flen: 20}, + Index: 0, + } + + // Construct SetVar function. + funcSetVar, err := NewFunction( + s.ctx, + ast.SetVar, + &types.FieldType{Tp: mysql.TypeVarString, Flen: 20}, + []Expression{argVarName, argCol}..., + ) + c.Assert(err, IsNil) + + // Construct input and output Chunks. + inputChunk := chunk.NewChunkWithCapacity([]*types.FieldType{argCol.RetType}, 1) + inputChunk.AppendString(0, "a") + outputChunk := chunk.NewChunkWithCapacity([]*types.FieldType{argCol.RetType}, 1) + + // Evaluate the SetVar function. + err = evalOneCell(s.ctx, funcSetVar, inputChunk.GetRow(0), outputChunk, 0) + c.Assert(err, IsNil) + c.Assert(outputChunk.GetRow(0).GetString(0), Equals, "a") + + // Change the content of the underlying Chunk. + inputChunk.Reset() + inputChunk.AppendString(0, "b") + + // Check whether the user variable changed. + sessionVars := s.ctx.GetSessionVars() + sessionVars.UsersLock.RLock() + defer sessionVars.UsersLock.RUnlock() + c.Assert(sessionVars.Users["a"], Equals, "a") +} diff --git a/expression/builtin_other_vec.go b/expression/builtin_other_vec.go new file mode 100644 index 0000000..28a1df2 --- /dev/null +++ b/expression/builtin_other_vec.go @@ -0,0 +1,127 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "strings" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/stringutil" +) + +func (b *builtinValuesIntSig) vectorized() bool { + return false +} + +func (b *builtinValuesIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + return errors.Errorf("not implemented") +} + +func (b *builtinRowSig) vectorized() bool { + return true +} + +func (b *builtinRowSig) vecEvalString(input *chunk.Chunk, result *chunk.Column) error { + panic("builtinRowSig.vecEvalString() should never be called.") +} + +func (b *builtinValuesRealSig) vectorized() bool { + return false +} + +func (b *builtinValuesRealSig) vecEvalReal(input *chunk.Chunk, result *chunk.Column) error { + return errors.Errorf("not implemented") +} + +func (b *builtinValuesStringSig) vectorized() bool { + return false +} + +func (b *builtinValuesStringSig) vecEvalString(input *chunk.Chunk, result *chunk.Column) error { + return errors.Errorf("not implemented") +} + +func (b *builtinSetVarSig) vectorized() bool { + return true +} + +func (b *builtinSetVarSig) vecEvalString(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + buf0, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf0) + if err := b.args[0].VecEvalString(b.ctx, input, buf0); err != nil { + return err + } + buf1, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) + if err := b.args[1].VecEvalString(b.ctx, input, buf1); err != nil { + return err + } + result.ReserveString(n) + sessionVars := b.ctx.GetSessionVars() + sessionVars.UsersLock.Lock() + defer sessionVars.UsersLock.Unlock() + for i := 0; i < n; i++ { + if buf0.IsNull(i) || buf1.IsNull(i) { + result.AppendNull() + continue + } + varName := strings.ToLower(buf0.GetString(i)) + res := buf1.GetString(i) + sessionVars.Users[varName] = stringutil.Copy(res) + result.AppendString(res) + } + return nil +} + +func (b *builtinGetVarSig) vectorized() bool { + return true +} + +func (b *builtinGetVarSig) vecEvalString(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + buf0, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf0) + if err := b.args[0].VecEvalString(b.ctx, input, buf0); err != nil { + return err + } + result.ReserveString(n) + sessionVars := b.ctx.GetSessionVars() + sessionVars.UsersLock.Lock() + defer sessionVars.UsersLock.Unlock() + for i := 0; i < n; i++ { + if buf0.IsNull(i) { + result.AppendNull() + continue + } + varName := strings.ToLower(buf0.GetString(i)) + if v, ok := sessionVars.Users[varName]; ok { + result.AppendString(v) + continue + } + result.AppendNull() + } + return nil +} diff --git a/expression/builtin_other_vec_generated.go b/expression/builtin_other_vec_generated.go new file mode 100644 index 0000000..d40969b --- /dev/null +++ b/expression/builtin_other_vec_generated.go @@ -0,0 +1,206 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by go generate in expression/generator; DO NOT EDIT. + +package expression + +import ( + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" +) + +func (b *builtinInIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + buf0, err := b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf0) + if err := b.args[0].VecEvalInt(b.ctx, input, buf0); err != nil { + return err + } + buf1, err := b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) + + args0 := buf0.Int64s() + result.ResizeInt64(n, true) + r64s := result.Int64s() + for i := 0; i < n; i++ { + r64s[i] = 0 + } + hasNull := make([]bool, n) + isUnsigned0 := mysql.HasUnsignedFlag(b.args[0].GetType().Flag) + var compareResult int + + for j := 1; j < len(b.args); j++ { + if err := b.args[j].VecEvalInt(b.ctx, input, buf1); err != nil { + return err + } + isUnsigned := mysql.HasUnsignedFlag(b.args[j].GetType().Flag) + args1 := buf1.Int64s() + buf1.MergeNulls(buf0) + for i := 0; i < n; i++ { + if buf1.IsNull(i) { + hasNull[i] = true + continue + } + arg0 := args0[i] + arg1 := args1[i] + compareResult = 1 + switch { + case (isUnsigned0 && isUnsigned), (!isUnsigned0 && !isUnsigned): + if arg1 == arg0 { + compareResult = 0 + } + case !isUnsigned0 && isUnsigned: + if arg0 >= 0 && arg1 == arg0 { + compareResult = 0 + } + case isUnsigned0 && !isUnsigned: + if arg1 >= 0 && arg1 == arg0 { + compareResult = 0 + } + } + if compareResult == 0 { + result.SetNull(i, false) + r64s[i] = 1 + } + } // for i + } // for j + for i := 0; i < n; i++ { + if result.IsNull(i) { + result.SetNull(i, hasNull[i]) + } + } + return nil +} + +func (b *builtinInIntSig) vectorized() bool { + return true +} + +func (b *builtinInStringSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + buf0, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf0) + if err := b.args[0].VecEvalString(b.ctx, input, buf0); err != nil { + return err + } + buf1, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) + + result.ResizeInt64(n, true) + r64s := result.Int64s() + for i := 0; i < n; i++ { + r64s[i] = 0 + } + hasNull := make([]bool, n) + var compareResult int + + for j := 1; j < len(b.args); j++ { + if err := b.args[j].VecEvalString(b.ctx, input, buf1); err != nil { + return err + } + for i := 0; i < n; i++ { + if buf1.IsNull(i) || buf0.IsNull(i) { + hasNull[i] = true + continue + } + arg0 := buf0.GetString(i) + arg1 := buf1.GetString(i) + compareResult = types.CompareString(arg0, arg1) + if compareResult == 0 { + result.SetNull(i, false) + r64s[i] = 1 + } + } // for i + } // for j + for i := 0; i < n; i++ { + if result.IsNull(i) { + result.SetNull(i, hasNull[i]) + } + } + return nil +} + +func (b *builtinInStringSig) vectorized() bool { + return true +} + +func (b *builtinInRealSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + buf0, err := b.bufAllocator.get(types.ETReal, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf0) + if err := b.args[0].VecEvalReal(b.ctx, input, buf0); err != nil { + return err + } + buf1, err := b.bufAllocator.get(types.ETReal, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) + + args0 := buf0.Float64s() + result.ResizeInt64(n, true) + r64s := result.Int64s() + for i := 0; i < n; i++ { + r64s[i] = 0 + } + hasNull := make([]bool, n) + var compareResult int + + for j := 1; j < len(b.args); j++ { + if err := b.args[j].VecEvalReal(b.ctx, input, buf1); err != nil { + return err + } + args1 := buf1.Float64s() + buf1.MergeNulls(buf0) + for i := 0; i < n; i++ { + if buf1.IsNull(i) { + hasNull[i] = true + continue + } + arg0 := args0[i] + arg1 := args1[i] + compareResult = types.CompareFloat64(arg0, arg1) + if compareResult == 0 { + result.SetNull(i, false) + r64s[i] = 1 + } + } // for i + } // for j + for i := 0; i < n; i++ { + if result.IsNull(i) { + result.SetNull(i, hasNull[i]) + } + } + return nil +} + +func (b *builtinInRealSig) vectorized() bool { + return true +} diff --git a/expression/builtin_other_vec_generated_test.go b/expression/builtin_other_vec_generated_test.go new file mode 100644 index 0000000..d06b1f3 --- /dev/null +++ b/expression/builtin_other_vec_generated_test.go @@ -0,0 +1,121 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by go generate in expression/generator; DO NOT EDIT. + +package expression + +import ( + "fmt" + "math/rand" + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/types" +) + +type inGener struct { + defaultGener +} + +func (g inGener) gen() interface{} { + if rand.Float64() < g.nullRation { + return nil + } + randNum := rand.Int63n(10) + switch g.eType { + case types.ETInt: + if rand.Float64() < 0.5 { + return -randNum + } + return randNum + case types.ETReal: + if rand.Float64() < 0.5 { + return -float64(randNum) + } + return float64(randNum) + case types.ETString: + return fmt.Sprint(randNum) + } + return randNum +} + +var vecBuiltinOtherGeneratedCases = map[string][]vecExprBenchCase{ + ast.In: { + // builtinInIntSig + { + retEvalType: types.ETInt, + childrenTypes: []types.EvalType{ + types.ETInt, + types.ETInt, + types.ETInt, + types.ETInt, + }, + geners: []dataGenerator{ + inGener{defaultGener{eType: types.ETInt, nullRation: 0.2}}, + inGener{defaultGener{eType: types.ETInt, nullRation: 0.2}}, + inGener{defaultGener{eType: types.ETInt, nullRation: 0.2}}, + inGener{defaultGener{eType: types.ETInt, nullRation: 0.2}}, + }, + }, + // builtinInStringSig + { + retEvalType: types.ETInt, + childrenTypes: []types.EvalType{ + types.ETString, + types.ETString, + types.ETString, + types.ETString, + }, + geners: []dataGenerator{ + inGener{defaultGener{eType: types.ETString, nullRation: 0.2}}, + inGener{defaultGener{eType: types.ETString, nullRation: 0.2}}, + inGener{defaultGener{eType: types.ETString, nullRation: 0.2}}, + inGener{defaultGener{eType: types.ETString, nullRation: 0.2}}, + }, + }, + // builtinInRealSig + { + retEvalType: types.ETInt, + childrenTypes: []types.EvalType{ + types.ETReal, + types.ETReal, + types.ETReal, + types.ETReal, + }, + geners: []dataGenerator{ + inGener{defaultGener{eType: types.ETReal, nullRation: 0.2}}, + inGener{defaultGener{eType: types.ETReal, nullRation: 0.2}}, + inGener{defaultGener{eType: types.ETReal, nullRation: 0.2}}, + inGener{defaultGener{eType: types.ETReal, nullRation: 0.2}}, + }, + }, + }, +} + +func (s *testEvaluatorSuite) TestVectorizedBuiltinOtherEvalOneVecGenerated(c *C) { + testVectorizedEvalOneVec(c, vecBuiltinOtherGeneratedCases) +} + +func (s *testEvaluatorSuite) TestVectorizedBuiltinOtherFuncGenerated(c *C) { + testVectorizedBuiltinFunc(c, vecBuiltinOtherGeneratedCases) +} + +func BenchmarkVectorizedBuiltinOtherEvalOneVecGenerated(b *testing.B) { + benchmarkVectorizedEvalOneVec(b, vecBuiltinOtherGeneratedCases) +} + +func BenchmarkVectorizedBuiltinOtherFuncGenerated(b *testing.B) { + benchmarkVectorizedBuiltinFunc(b, vecBuiltinOtherGeneratedCases) +} diff --git a/expression/builtin_other_vec_test.go b/expression/builtin_other_vec_test.go new file mode 100644 index 0000000..5808365 --- /dev/null +++ b/expression/builtin_other_vec_test.go @@ -0,0 +1,39 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/types" +) + +var vecBuiltinOtherCases = map[string][]vecExprBenchCase{ + ast.SetVar: { + {retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString, types.ETString}}, + }, + ast.GetVar: { + {retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString}}, + }, +} + +func (s *testEvaluatorSuite) TestVectorizedBuiltinOtherFunc(c *C) { + testVectorizedBuiltinFunc(c, vecBuiltinOtherCases) +} + +func BenchmarkVectorizedBuiltinOtherFunc(b *testing.B) { + benchmarkVectorizedBuiltinFunc(b, vecBuiltinOtherCases) +} diff --git a/expression/builtin_string.go b/expression/builtin_string.go new file mode 100644 index 0000000..84706c9 --- /dev/null +++ b/expression/builtin_string.go @@ -0,0 +1,128 @@ +// Copyright 2013 The ql Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSES/QL-LICENSE file. + +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tipb/go-tipb" +) + +var ( + _ functionClass = &lengthFunctionClass{} + _ functionClass = &strcmpFunctionClass{} +) + +var ( + _ builtinFunc = &builtinLengthSig{} + _ builtinFunc = &builtinStrcmpSig{} +) + +// SetBinFlagOrBinStr sets resTp to binary string if argTp is a binary string, +// if not, sets the binary flag of resTp to true if argTp has binary flag. +func SetBinFlagOrBinStr(argTp *types.FieldType, resTp *types.FieldType) { + if types.IsBinaryStr(argTp) { + types.SetBinChsClnFlag(resTp) + } else if mysql.HasBinaryFlag(argTp.Flag) || !types.IsNonBinaryStr(argTp) { + resTp.Flag |= mysql.BinaryFlag + } +} + +type lengthFunctionClass struct { + baseFunctionClass +} + +func (c *lengthFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) { + if err := c.verifyArgs(args); err != nil { + return nil, err + } + bf := newBaseBuiltinFuncWithTp(ctx, args, types.ETInt, types.ETString) + bf.tp.Flen = 10 + sig := &builtinLengthSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_Length) + return sig, nil +} + +type builtinLengthSig struct { + baseBuiltinFunc +} + +func (b *builtinLengthSig) Clone() builtinFunc { + newSig := &builtinLengthSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +// evalInt evaluates a builtinLengthSig. +// See https://dev.mysql.com/doc/refman/5.7/en/string-functions.html +func (b *builtinLengthSig) evalInt(row chunk.Row) (int64, bool, error) { + val, isNull, err := b.args[0].EvalString(b.ctx, row) + if isNull || err != nil { + return 0, isNull, err + } + return int64(len([]byte(val))), false, nil +} + +type strcmpFunctionClass struct { + baseFunctionClass +} + +func (c *strcmpFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) { + if err := c.verifyArgs(args); err != nil { + return nil, err + } + bf := newBaseBuiltinFuncWithTp(ctx, args, types.ETInt, types.ETString, types.ETString) + bf.tp.Flen = 2 + types.SetBinChsClnFlag(bf.tp) + sig := &builtinStrcmpSig{bf} + sig.setPbCode(tipb.ScalarFuncSig_Strcmp) + return sig, nil +} + +type builtinStrcmpSig struct { + baseBuiltinFunc +} + +func (b *builtinStrcmpSig) Clone() builtinFunc { + newSig := &builtinStrcmpSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +// evalInt evals a builtinStrcmpSig. +// See https://dev.mysql.com/doc/refman/5.7/en/string-comparison-functions.html +func (b *builtinStrcmpSig) evalInt(row chunk.Row) (int64, bool, error) { + var ( + left, right string + isNull bool + err error + ) + + left, isNull, err = b.args[0].EvalString(b.ctx, row) + if isNull || err != nil { + return 0, isNull, err + } + right, isNull, err = b.args[1].EvalString(b.ctx, row) + if isNull || err != nil { + return 0, isNull, err + } + res := types.CompareString(left, right) + return int64(res), false, nil +} diff --git a/expression/builtin_string_test.go b/expression/builtin_string_test.go new file mode 100644 index 0000000..0b8121a --- /dev/null +++ b/expression/builtin_string_test.go @@ -0,0 +1,100 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" +) + +func (s *testEvaluatorSuite) TestLengthAndOctetLength(c *C) { + cases := []struct { + args interface{} + expected int64 + isNil bool + getErr bool + }{ + {"abc", 3, false, false}, + {"你好", 6, false, false}, + {1, 1, false, false}, + {3.14, 4, false, false}, + {nil, 0, true, false}, + {errors.New("must error"), 0, false, true}, + } + + lengthMethods := []string{ast.Length, ast.OctetLength} + for _, lengthMethod := range lengthMethods { + for _, t := range cases { + f, err := newFunctionForTest(s.ctx, lengthMethod, s.primitiveValsToConstants([]interface{}{t.args})...) + c.Assert(err, IsNil) + d, err := f.Eval(chunk.Row{}) + if t.getErr { + c.Assert(err, NotNil) + } else { + c.Assert(err, IsNil) + if t.isNil { + c.Assert(d.Kind(), Equals, types.KindNull) + } else { + c.Assert(d.GetInt64(), Equals, t.expected) + } + } + } + } + + _, err := funcs[ast.Length].getFunction(s.ctx, []Expression{Zero}) + c.Assert(err, IsNil) +} + +func (s *testEvaluatorSuite) TestStrcmp(c *C) { + cases := []struct { + args []interface{} + isNil bool + getErr bool + res int64 + }{ + {[]interface{}{"123", "123"}, false, false, 0}, + {[]interface{}{"123", "1"}, false, false, 1}, + {[]interface{}{"1", "123"}, false, false, -1}, + {[]interface{}{"123", "45"}, false, false, -1}, + {[]interface{}{123, "123"}, false, false, 0}, + {[]interface{}{"12.34", 12.34}, false, false, 0}, + {[]interface{}{nil, "123"}, true, false, 0}, + {[]interface{}{"123", nil}, true, false, 0}, + {[]interface{}{"", "123"}, false, false, -1}, + {[]interface{}{"123", ""}, false, false, 1}, + {[]interface{}{"", ""}, false, false, 0}, + {[]interface{}{"", nil}, true, false, 0}, + {[]interface{}{nil, ""}, true, false, 0}, + {[]interface{}{nil, nil}, true, false, 0}, + {[]interface{}{"123", errors.New("must err")}, false, true, 0}, + } + for _, t := range cases { + f, err := newFunctionForTest(s.ctx, ast.Strcmp, s.primitiveValsToConstants(t.args)...) + c.Assert(err, IsNil) + d, err := f.Eval(chunk.Row{}) + if t.getErr { + c.Assert(err, NotNil) + } else { + c.Assert(err, IsNil) + if t.isNil { + c.Assert(d.Kind(), Equals, types.KindNull) + } else { + c.Assert(d.GetInt64(), Equals, t.res) + } + } + } +} diff --git a/expression/builtin_string_vec.go b/expression/builtin_string_vec.go new file mode 100644 index 0000000..748f09d --- /dev/null +++ b/expression/builtin_string_vec.go @@ -0,0 +1,92 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" +) + +func (b *builtinStringIsNullSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + buf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf) + if err := b.args[0].VecEvalString(b.ctx, input, buf); err != nil { + return err + } + + result.ResizeInt64(n, false) + i64s := result.Int64s() + for i := 0; i < n; i++ { + if buf.IsNull(i) { + i64s[i] = 1 + } else { + i64s[i] = 0 + } + } + return nil +} + +func (b *builtinStringIsNullSig) vectorized() bool { + return true +} + +func (b *builtinStrcmpSig) vectorized() bool { + return true +} + +func (b *builtinStrcmpSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + leftBuf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(leftBuf) + if err := b.args[0].VecEvalString(b.ctx, input, leftBuf); err != nil { + return err + } + rightBuf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(rightBuf) + if err := b.args[1].VecEvalString(b.ctx, input, rightBuf); err != nil { + return err + } + result.ResizeInt64(n, false) + result.MergeNulls(leftBuf, rightBuf) + i64s := result.Int64s() + for i := 0; i < n; i++ { + // if left or right is null, then set to null and return 0(which is the default value) + if result.IsNull(i) { + continue + } + i64s[i] = int64(types.CompareString(leftBuf.GetString(i), rightBuf.GetString(i))) + } + return nil +} + +func (b *builtinLengthSig) vectorized() bool { + return false +} + +// vecEvalInt evaluates a builtinLengthSig. +// See https://dev.mysql.com/doc/refman/5.7/en/string-functions.html +func (b *builtinLengthSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + /* Your code here */ + return nil +} diff --git a/expression/builtin_string_vec_test.go b/expression/builtin_string_vec_test.go new file mode 100644 index 0000000..3c36983 --- /dev/null +++ b/expression/builtin_string_vec_test.go @@ -0,0 +1,63 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/types" +) + +var vecBuiltinStringCases = map[string][]vecExprBenchCase{ + ast.Length: { + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{&defaultGener{0.2, types.ETString}}}, + }, + ast.IsNull: { + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{&randLenStrGener{10, 20}}}, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{&defaultGener{0.2, types.ETString}}}, + }, + ast.Strcmp: { + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString, types.ETString}}, + {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString, types.ETString}, geners: []dataGenerator{ + &selectStringGener{ + candidates: []string{ + "test", + }, + }, + &selectStringGener{ + candidates: []string{ + "test", + }, + }, + }}, + }, +} + +func (s *testEvaluatorSuite) TestVectorizedBuiltinStringEvalOneVec(c *C) { + testVectorizedEvalOneVec(c, vecBuiltinStringCases) +} + +func (s *testEvaluatorSuite) TestVectorizedBuiltinStringFunc(c *C) { + testVectorizedBuiltinFunc(c, vecBuiltinStringCases) +} + +func BenchmarkVectorizedBuiltinStringEvalOneVec(b *testing.B) { + benchmarkVectorizedEvalOneVec(b, vecBuiltinStringCases) +} + +func BenchmarkVectorizedBuiltinStringFunc(b *testing.B) { + benchmarkVectorizedBuiltinFunc(b, vecBuiltinStringCases) +} diff --git a/expression/builtin_test.go b/expression/builtin_test.go new file mode 100644 index 0000000..1d04b46 --- /dev/null +++ b/expression/builtin_test.go @@ -0,0 +1,87 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" +) + +func evalBuiltinFunc(f builtinFunc, row chunk.Row) (d types.Datum, err error) { + var ( + res interface{} + isNull bool + ) + switch f.getRetTp().EvalType() { + case types.ETInt: + var intRes int64 + intRes, isNull, err = f.evalInt(row) + if mysql.HasUnsignedFlag(f.getRetTp().Flag) { + res = uint64(intRes) + } else { + res = intRes + } + case types.ETReal: + res, isNull, err = f.evalReal(row) + case types.ETString: + res, isNull, err = f.evalString(row) + } + + if isNull || err != nil { + d.SetValue(nil) + return d, err + } + d.SetValue(res) + return +} + +func (s *testEvaluatorSuite) TestIsNullFunc(c *C) { + fc := funcs[ast.IsNull] + f, err := fc.getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(1))) + c.Assert(err, IsNil) + v, err := evalBuiltinFunc(f, chunk.Row{}) + c.Assert(err, IsNil) + c.Assert(v.GetInt64(), Equals, int64(0)) + + f, err = fc.getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(nil))) + c.Assert(err, IsNil) + v, err = evalBuiltinFunc(f, chunk.Row{}) + c.Assert(err, IsNil) + c.Assert(v.GetInt64(), Equals, int64(1)) +} + +// newFunctionForTest creates a new ScalarFunction using funcName and arguments, +// it is different from expression.NewFunction which needs an additional retType argument. +func newFunctionForTest(ctx sessionctx.Context, funcName string, args ...Expression) (Expression, error) { + fc, ok := funcs[funcName] + if !ok { + return nil, errFunctionNotExists.GenWithStackByArgs("FUNCTION", funcName) + } + funcArgs := make([]Expression, len(args)) + copy(funcArgs, args) + f, err := fc.getFunction(ctx, funcArgs) + if err != nil { + return nil, err + } + return &ScalarFunction{ + FuncName: model.NewCIStr(funcName), + RetType: f.getRetTp(), + Function: f, + }, nil +} diff --git a/expression/builtin_vectorized.go b/expression/builtin_vectorized.go new file mode 100644 index 0000000..8fa10f3 --- /dev/null +++ b/expression/builtin_vectorized.go @@ -0,0 +1,106 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "sync" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" +) + +// columnBufferAllocator is used to allocate and release column buffer in vectorized evaluation. +type columnBufferAllocator interface { + // get allocates a column buffer with the specific eval type and capacity. + // the allocator is not responsible for initializing the column, so please initialize it before using. + get(evalType types.EvalType, capacity int) (*chunk.Column, error) + // put releases a column buffer. + put(buf *chunk.Column) +} + +// localSliceBuffer implements columnBufferAllocator interface. +// It works like a concurrency-safe deque which is implemented by a lock + slice. +type localSliceBuffer struct { + sync.Mutex + buffers []*chunk.Column + head int + tail int + size int +} + +func newLocalSliceBuffer(initCap int) *localSliceBuffer { + return &localSliceBuffer{buffers: make([]*chunk.Column, initCap)} +} + +var globalColumnAllocator = newLocalSliceBuffer(1024) + +func newBuffer(evalType types.EvalType, capacity int) (*chunk.Column, error) { + switch evalType { + case types.ETInt: + return chunk.NewColumn(types.NewFieldType(mysql.TypeLonglong), capacity), nil + case types.ETReal: + return chunk.NewColumn(types.NewFieldType(mysql.TypeDouble), capacity), nil + case types.ETString: + return chunk.NewColumn(types.NewFieldType(mysql.TypeString), capacity), nil + } + return nil, errors.Errorf("get column buffer for unsupported EvalType=%v", evalType) +} + +// GetColumn allocates a column buffer with the specific eval type and capacity. +// the allocator is not responsible for initializing the column, so please initialize it before using. +func GetColumn(evalType types.EvalType, capacity int) (*chunk.Column, error) { + return globalColumnAllocator.get(evalType, capacity) +} + +// PutColumn releases a column buffer. +func PutColumn(buf *chunk.Column) { + globalColumnAllocator.put(buf) +} + +func (r *localSliceBuffer) get(evalType types.EvalType, capacity int) (*chunk.Column, error) { + r.Lock() + if r.size > 0 { + buf := r.buffers[r.head] + r.head++ + if r.head == len(r.buffers) { + r.head = 0 + } + r.size-- + r.Unlock() + return buf, nil + } + r.Unlock() + return newBuffer(evalType, capacity) +} + +func (r *localSliceBuffer) put(buf *chunk.Column) { + r.Lock() + if r.size == len(r.buffers) { + buffers := make([]*chunk.Column, len(r.buffers)*2) + copy(buffers, r.buffers[r.head:]) + copy(buffers[r.size-r.head:], r.buffers[:r.tail]) + r.head = 0 + r.tail = len(r.buffers) + r.buffers = buffers + } + r.buffers[r.tail] = buf + r.tail++ + if r.tail == len(r.buffers) { + r.tail = 0 + } + r.size++ + r.Unlock() +} diff --git a/expression/builtin_vectorized_test.go b/expression/builtin_vectorized_test.go new file mode 100644 index 0000000..4b6d0bb --- /dev/null +++ b/expression/builtin_vectorized_test.go @@ -0,0 +1,531 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "fmt" + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/mock" + "math/rand" + "sync" + "testing" +) + +type mockVecPlusIntBuiltinFunc struct { + baseBuiltinFunc + + buf *chunk.Column + enableAlloc bool +} + +func (p *mockVecPlusIntBuiltinFunc) allocBuf(n int) (*chunk.Column, error) { + if p.enableAlloc { + return p.bufAllocator.get(types.ETInt, n) + } + if p.buf == nil { + p.buf = chunk.NewColumn(types.NewFieldType(mysql.TypeLonglong), n) + } + return p.buf, nil +} + +func (p *mockVecPlusIntBuiltinFunc) releaseBuf(buf *chunk.Column) { + if p.enableAlloc { + p.bufAllocator.put(buf) + } +} + +func (p *mockVecPlusIntBuiltinFunc) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + buf, err := p.allocBuf(n) + if err != nil { + return err + } + defer p.releaseBuf(buf) + if err := p.args[0].VecEvalInt(p.ctx, input, result); err != nil { + return err + } + if err := p.args[1].VecEvalInt(p.ctx, input, buf); err != nil { + return err + } + dst64s := result.Int64s() + src64s := buf.Int64s() + for i := range dst64s { + dst64s[i] += src64s[i] + } + for i := 0; i < n; i++ { + if buf.IsNull(i) && !result.IsNull(i) { + result.SetNull(i, true) + } + } + return nil +} + +func genMockVecPlusIntBuiltinFunc() (*mockVecPlusIntBuiltinFunc, *chunk.Chunk, *chunk.Column) { + tp := types.NewFieldType(mysql.TypeLonglong) + col1 := newColumn(0) + col1.Index, col1.RetType = 0, tp + col2 := newColumn(1) + col2.Index, col2.RetType = 1, tp + bf := newBaseBuiltinFuncWithTp(mock.NewContext(), []Expression{col1, col2}, types.ETInt, types.ETInt, types.ETInt) + plus := &mockVecPlusIntBuiltinFunc{bf, nil, false} + input := chunk.New([]*types.FieldType{tp, tp}, 1024, 1024) + buf := chunk.NewColumn(types.NewFieldType(mysql.TypeLonglong), 1024) + for i := 0; i < 1024; i++ { + input.AppendInt64(0, int64(i)) + input.AppendInt64(1, int64(i)) + } + return plus, input, buf +} + +func (s *testEvaluatorSuite) TestMockVecPlusInt(c *C) { + plus, input, buf := genMockVecPlusIntBuiltinFunc() + plus.enableAlloc = false + c.Assert(plus.vecEvalInt(input, buf), IsNil) + for i := 0; i < 1024; i++ { + c.Assert(buf.IsNull(i), IsFalse) + c.Assert(buf.GetInt64(i), Equals, int64(i*2)) + } + + plus.enableAlloc = true + c.Assert(plus.vecEvalInt(input, buf), IsNil) + for i := 0; i < 1024; i++ { + c.Assert(buf.IsNull(i), IsFalse) + c.Assert(buf.GetInt64(i), Equals, int64(i*2)) + } +} + +func (s *testEvaluatorSuite) TestMockVecPlusIntParallel(c *C) { + plus, input, buf := genMockVecPlusIntBuiltinFunc() + plus.enableAlloc = true // it's concurrency-safe if enableAlloc is true + var wg sync.WaitGroup + for i := 0; i < 50; i++ { + wg.Add(1) + go func() { + result := buf.CopyConstruct(nil) + for i := 0; i < 200; i++ { + c.Assert(plus.vecEvalInt(input, result), IsNil) + for i := 0; i < 1024; i++ { + c.Assert(result.IsNull(i), IsFalse) + c.Assert(result.GetInt64(i), Equals, int64(i*2)) + } + } + wg.Done() + }() + } + wg.Wait() +} + +func BenchmarkColumnBufferAllocate(b *testing.B) { + allocator := newLocalSliceBuffer(1) + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf, _ := allocator.get(types.ETInt, 1024) + allocator.put(buf) + } +} + +func BenchmarkColumnBufferAllocateParallel(b *testing.B) { + allocator := newLocalSliceBuffer(1) + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + buf, _ := allocator.get(types.ETInt, 1024) + allocator.put(buf) + } + }) +} + +func BenchmarkPlusIntBufAllocator(b *testing.B) { + plus, input, buf := genMockVecPlusIntBuiltinFunc() + names := []string{"enable", "disable"} + enable := []bool{true, false} + for i := range enable { + b.Run(names[i], func(b *testing.B) { + plus.enableAlloc = enable[i] + b.ResetTimer() + for i := 0; i < b.N; i++ { + if err := plus.vecEvalInt(input, buf); err != nil { + b.Fatal(err) + } + } + }) + } +} + +type mockBuiltinDouble struct { + baseBuiltinFunc + + evalType types.EvalType + enableVec bool +} + +func (p *mockBuiltinDouble) vectorized() bool { + return p.enableVec +} + +func (p *mockBuiltinDouble) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + if err := p.args[0].VecEvalInt(p.ctx, input, result); err != nil { + return err + } + i64s := result.Int64s() + for i := range i64s { + i64s[i] <<= 1 + } + return nil +} + +func (p *mockBuiltinDouble) vecEvalReal(input *chunk.Chunk, result *chunk.Column) error { + if err := p.args[0].VecEvalReal(p.ctx, input, result); err != nil { + return err + } + f64s := result.Float64s() + for i := range f64s { + f64s[i] *= 2 + } + return nil +} + +func (p *mockBuiltinDouble) vecEvalString(input *chunk.Chunk, result *chunk.Column) error { + var buf *chunk.Column + var err error + if buf, err = p.baseBuiltinFunc.bufAllocator.get(p.evalType, input.NumRows()); err != nil { + return err + } + if err := p.args[0].VecEvalString(p.ctx, input, buf); err != nil { + return err + } + result.ReserveString(input.NumRows()) + for i := 0; i < input.NumRows(); i++ { + str := buf.GetString(i) + result.AppendString(str + str) + } + p.baseBuiltinFunc.bufAllocator.put(buf) + return nil +} + +func (p *mockBuiltinDouble) evalInt(row chunk.Row) (int64, bool, error) { + v, isNull, err := p.args[0].EvalInt(p.ctx, row) + if err != nil { + return 0, false, err + } + return v * 2, isNull, nil +} + +func (p *mockBuiltinDouble) evalReal(row chunk.Row) (float64, bool, error) { + v, isNull, err := p.args[0].EvalReal(p.ctx, row) + if err != nil { + return 0, false, err + } + return v * 2, isNull, nil +} + +func (p *mockBuiltinDouble) evalString(row chunk.Row) (string, bool, error) { + v, isNull, err := p.args[0].EvalString(p.ctx, row) + if err != nil { + return "", false, err + } + return v + v, isNull, nil +} + +func convertETType(eType types.EvalType) (mysqlType byte) { + switch eType { + case types.ETInt: + mysqlType = mysql.TypeLonglong + case types.ETReal: + mysqlType = mysql.TypeDouble + case types.ETString: + mysqlType = mysql.TypeVarString + } + return +} + +func genMockRowDouble(eType types.EvalType, enableVec bool) (builtinFunc, *chunk.Chunk, *chunk.Column, error) { + mysqlType := convertETType(eType) + tp := types.NewFieldType(mysqlType) + col1 := newColumn(1) + col1.Index = 0 + col1.RetType = tp + bf := newBaseBuiltinFuncWithTp(mock.NewContext(), []Expression{col1}, eType, eType) + rowDouble := &mockBuiltinDouble{bf, eType, enableVec} + input := chunk.New([]*types.FieldType{tp}, 1024, 1024) + buf := chunk.NewColumn(types.NewFieldType(convertETType(eType)), 1024) + for i := 0; i < 1024; i++ { + switch eType { + case types.ETInt: + input.AppendInt64(0, int64(i)) + case types.ETReal: + input.AppendFloat64(0, float64(i)) + case types.ETString: + input.AppendString(0, fmt.Sprintf("%v", i)) + } + } + return rowDouble, input, buf, nil +} + +func (s *testEvaluatorSuite) checkVecEval(c *C, eType types.EvalType, sel []int, result *chunk.Column) { + if sel == nil { + for i := 0; i < 1024; i++ { + sel = append(sel, i) + } + } + switch eType { + case types.ETInt: + i64s := result.Int64s() + c.Assert(len(i64s), Equals, len(sel)) + for i, j := range sel { + c.Assert(i64s[i], Equals, int64(j*2)) + } + case types.ETReal: + f64s := result.Float64s() + c.Assert(len(f64s), Equals, len(sel)) + for i, j := range sel { + c.Assert(f64s[i], Equals, float64(j*2)) + } + case types.ETString: + for i, j := range sel { + c.Assert(result.GetString(i), Equals, fmt.Sprintf("%v%v", j, j)) + } + } +} + +func vecEvalType(f builtinFunc, eType types.EvalType, input *chunk.Chunk, result *chunk.Column) error { + switch eType { + case types.ETInt: + return f.vecEvalInt(input, result) + case types.ETReal: + return f.vecEvalReal(input, result) + case types.ETString: + return f.vecEvalString(input, result) + } + panic("not implement") +} + +func (s *testEvaluatorSuite) TestDoubleRow2Vec(c *C) { + eTypes := []types.EvalType{types.ETInt, types.ETReal, types.ETString} + for _, eType := range eTypes { + rowDouble, input, result, err := genMockRowDouble(eType, false) + c.Assert(err, IsNil) + c.Assert(vecEvalType(rowDouble, eType, input, result), IsNil) + s.checkVecEval(c, eType, nil, result) + + sel := []int{0} + for { + end := sel[len(sel)-1] + gap := 1024 - end + if gap < 10 { + break + } + sel = append(sel, end+rand.Intn(gap-1)+1) + } + input.SetSel(sel) + c.Assert(vecEvalType(rowDouble, eType, input, result), IsNil) + + s.checkVecEval(c, eType, sel, result) + } +} + +func (s *testEvaluatorSuite) TestDoubleVec2Row(c *C) { + eTypes := []types.EvalType{types.ETInt, types.ETReal, types.ETString} + for _, eType := range eTypes { + rowDouble, input, result, err := genMockRowDouble(eType, true) + result.Reset(eType) + c.Assert(err, IsNil) + it := chunk.NewIterator4Chunk(input) + for row := it.Begin(); row != it.End(); row = it.Next() { + switch eType { + case types.ETInt: + v, _, err := rowDouble.evalInt(row) + c.Assert(err, IsNil) + result.AppendInt64(v) + case types.ETReal: + v, _, err := rowDouble.evalReal(row) + c.Assert(err, IsNil) + result.AppendFloat64(v) + case types.ETString: + v, _, err := rowDouble.evalString(row) + c.Assert(err, IsNil) + result.AppendString(v) + } + } + s.checkVecEval(c, eType, nil, result) + } +} + +func evalRows(b *testing.B, it *chunk.Iterator4Chunk, eType types.EvalType, result *chunk.Column, rowDouble builtinFunc) { + switch eType { + case types.ETInt: + for i := 0; i < b.N; i++ { + result.Reset(eType) + for r := it.Begin(); r != it.End(); r = it.Next() { + v, isNull, err := rowDouble.evalInt(r) + if err != nil { + b.Fatal(err) + } + if isNull { + result.AppendNull() + } else { + result.AppendInt64(v) + } + } + } + case types.ETReal: + for i := 0; i < b.N; i++ { + result.Reset(eType) + for r := it.Begin(); r != it.End(); r = it.Next() { + v, isNull, err := rowDouble.evalReal(r) + if err != nil { + b.Fatal(err) + } + if isNull { + result.AppendNull() + } else { + result.AppendFloat64(v) + } + } + } + case types.ETString: + for i := 0; i < b.N; i++ { + result.Reset(eType) + for r := it.Begin(); r != it.End(); r = it.Next() { + v, isNull, err := rowDouble.evalString(r) + if err != nil { + b.Fatal(err) + } + if isNull { + result.AppendNull() + } else { + result.AppendString(v) + } + } + } + } +} + +func BenchmarkMockDoubleRow(b *testing.B) { + typeNames := []string{"Int", "Real", "String"} + eTypes := []types.EvalType{types.ETInt, types.ETReal, types.ETString} + for i, eType := range eTypes { + b.Run(typeNames[i], func(b *testing.B) { + rowDouble, input, result, _ := genMockRowDouble(eType, false) + it := chunk.NewIterator4Chunk(input) + b.ResetTimer() + evalRows(b, it, eType, result, rowDouble) + }) + } +} + +func BenchmarkMockDoubleVec(b *testing.B) { + typeNames := []string{"Int", "Real", "String"} + eTypes := []types.EvalType{types.ETInt, types.ETReal, types.ETString} + for i, eType := range eTypes { + b.Run(typeNames[i], func(b *testing.B) { + rowDouble, input, result, _ := genMockRowDouble(eType, true) + b.ResetTimer() + for i := 0; i < b.N; i++ { + if err := vecEvalType(rowDouble, eType, input, result); err != nil { + b.Fatal(err) + } + } + }) + } +} + +func (s *testEvaluatorSuite) TestVectorizedCheck(c *C) { + con := &Constant{} + c.Assert(con.Vectorized(), IsTrue) + col := &Column{} + c.Assert(col.Vectorized(), IsTrue) + + vecF, _, _, _ := genMockRowDouble(types.ETInt, true) + sf := &ScalarFunction{Function: vecF} + c.Assert(sf.Vectorized(), IsTrue) + + rowF, _, _, _ := genMockRowDouble(types.ETInt, false) + sf = &ScalarFunction{Function: rowF} + c.Assert(sf.Vectorized(), IsFalse) +} + +func genFloat32Col() (*Column, *chunk.Chunk, *chunk.Column) { + typeFloat := types.NewFieldType(mysql.TypeFloat) + col := &Column{Index: 0, RetType: typeFloat} + chk := chunk.NewChunkWithCapacity([]*types.FieldType{typeFloat}, 1024) + for i := 0; i < 1024; i++ { + chk.AppendFloat32(0, rand.Float32()) + } + result := chunk.NewColumn(typeFloat, 1024) + return col, chk, result +} + +func (s *testEvaluatorSuite) TestFloat32ColVec(c *C) { + col, chk, result := genFloat32Col() + ctx := mock.NewContext() + c.Assert(col.VecEvalReal(ctx, chk, result), IsNil) + it := chunk.NewIterator4Chunk(chk) + i := 0 + for row := it.Begin(); row != it.End(); row = it.Next() { + v, _, err := col.EvalReal(ctx, row) + c.Assert(err, IsNil) + c.Assert(v, Equals, result.GetFloat64(i)) + i++ + } + + // set Sel + n := chk.NumRows() + sel := make([]int, n/2) + for i := 0; i < n; i += 2 { + sel = append(sel, i) + } + chk.SetSel(sel) + c.Assert(col.VecEvalReal(ctx, chk, result), IsNil) + i = 0 + for row := it.Begin(); row != it.End(); row = it.Next() { + v, _, err := col.EvalReal(ctx, row) + c.Assert(err, IsNil) + c.Assert(v, Equals, result.GetFloat64(i)) + i++ + } + + // set an empty Sel + sel = sel[:0] + c.Assert(col.VecEvalReal(ctx, chk, result), IsNil) +} + +func BenchmarkFloat32ColRow(b *testing.B) { + col, chk, _ := genFloat32Col() + ctx := mock.NewContext() + it := chunk.NewIterator4Chunk(chk) + b.ResetTimer() + for i := 0; i < b.N; i++ { + for row := it.Begin(); row != it.End(); row = it.Next() { + if _, _, err := col.EvalReal(ctx, row); err != nil { + b.Fatal(err) + } + + } + } +} + +func BenchmarkFloat32ColVec(b *testing.B) { + col, chk, result := genFloat32Col() + ctx := mock.NewContext() + b.ResetTimer() + for i := 0; i < b.N; i++ { + if err := col.VecEvalReal(ctx, chk, result); err != nil { + b.Fatal(err) + } + } +} diff --git a/expression/chunk_executor.go b/expression/chunk_executor.go new file mode 100644 index 0000000..e3c206d --- /dev/null +++ b/expression/chunk_executor.go @@ -0,0 +1,313 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "strconv" + + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" +) + +// Vectorizable checks whether a list of expressions can employ vectorized execution. +func Vectorizable(exprs []Expression) bool { + for _, expr := range exprs { + if HasGetSetVarFunc(expr) { + return false + } + } + return true +} + +// HasGetSetVarFunc checks whether an expression contains SetVar/GetVar function. +func HasGetSetVarFunc(expr Expression) bool { + scalaFunc, ok := expr.(*ScalarFunction) + if !ok { + return false + } + if scalaFunc.FuncName.L == ast.SetVar { + return true + } + if scalaFunc.FuncName.L == ast.GetVar { + return true + } + for _, arg := range scalaFunc.GetArgs() { + if HasGetSetVarFunc(arg) { + return true + } + } + return false +} + +// HasAssignSetVarFunc checks whether an expression contains SetVar function and assign a value +func HasAssignSetVarFunc(expr Expression) bool { + scalaFunc, ok := expr.(*ScalarFunction) + if !ok { + return false + } + if scalaFunc.FuncName.L == ast.SetVar { + for _, arg := range scalaFunc.GetArgs() { + if _, ok := arg.(*ScalarFunction); ok { + return true + } + } + } + for _, arg := range scalaFunc.GetArgs() { + if HasAssignSetVarFunc(arg) { + return true + } + } + return false +} + +func evalOneVec(ctx sessionctx.Context, expr Expression, input *chunk.Chunk, output *chunk.Chunk, colIdx int) error { + ft := expr.GetType() + result := output.Column(colIdx) + switch ft.EvalType() { + case types.ETInt: + if err := expr.VecEvalInt(ctx, input, result); err != nil { + return err + } + case types.ETReal: + if err := expr.VecEvalReal(ctx, input, result); err != nil { + return err + } + if ft.Tp == mysql.TypeFloat { + f64s := result.Float64s() + n := input.NumRows() + buf := chunk.NewColumn(ft, n) + buf.ResizeFloat32(n, false) + f32s := buf.Float32s() + for i := range f64s { + if result.IsNull(i) { + buf.SetNull(i, true) + } else { + f32s[i] = float32(f64s[i]) + } + } + output.SetCol(colIdx, buf) + } + case types.ETString: + if err := expr.VecEvalString(ctx, input, result); err != nil { + return err + } + } + return nil +} + +func evalOneColumn(ctx sessionctx.Context, expr Expression, iterator *chunk.Iterator4Chunk, output *chunk.Chunk, colID int) (err error) { + switch fieldType, evalType := expr.GetType(), expr.GetType().EvalType(); evalType { + case types.ETInt: + for row := iterator.Begin(); err == nil && row != iterator.End(); row = iterator.Next() { + err = executeToInt(ctx, expr, fieldType, row, output, colID) + } + case types.ETReal: + for row := iterator.Begin(); err == nil && row != iterator.End(); row = iterator.Next() { + err = executeToReal(ctx, expr, fieldType, row, output, colID) + } + case types.ETString: + for row := iterator.Begin(); err == nil && row != iterator.End(); row = iterator.Next() { + err = executeToString(ctx, expr, fieldType, row, output, colID) + } + } + return err +} + +func evalOneCell(ctx sessionctx.Context, expr Expression, row chunk.Row, output *chunk.Chunk, colID int) (err error) { + switch fieldType, evalType := expr.GetType(), expr.GetType().EvalType(); evalType { + case types.ETInt: + err = executeToInt(ctx, expr, fieldType, row, output, colID) + case types.ETReal: + err = executeToReal(ctx, expr, fieldType, row, output, colID) + case types.ETString: + err = executeToString(ctx, expr, fieldType, row, output, colID) + } + return err +} + +func executeToInt(ctx sessionctx.Context, expr Expression, fieldType *types.FieldType, row chunk.Row, output *chunk.Chunk, colID int) error { + res, isNull, err := expr.EvalInt(ctx, row) + if err != nil { + return err + } + if isNull { + output.AppendNull(colID) + return nil + } + if fieldType.Tp == mysql.TypeBit { + output.AppendBytes(colID, strconv.AppendUint(make([]byte, 0, 8), uint64(res), 10)) + return nil + } + if mysql.HasUnsignedFlag(fieldType.Flag) { + output.AppendUint64(colID, uint64(res)) + return nil + } + output.AppendInt64(colID, res) + return nil +} + +func executeToReal(ctx sessionctx.Context, expr Expression, fieldType *types.FieldType, row chunk.Row, output *chunk.Chunk, colID int) error { + res, isNull, err := expr.EvalReal(ctx, row) + if err != nil { + return err + } + if isNull { + output.AppendNull(colID) + return nil + } + if fieldType.Tp == mysql.TypeFloat { + output.AppendFloat32(colID, float32(res)) + return nil + } + output.AppendFloat64(colID, res) + return nil +} + +func executeToString(ctx sessionctx.Context, expr Expression, fieldType *types.FieldType, row chunk.Row, output *chunk.Chunk, colID int) error { + res, isNull, err := expr.EvalString(ctx, row) + if err != nil { + return err + } + if isNull { + output.AppendNull(colID) + } else { + output.AppendString(colID, res) + } + return nil +} + +// VectorizedFilter applies a list of filters to a Chunk and +// returns a bool slice, which indicates whether a row is passed the filters. +// Filters is executed vectorized. +func VectorizedFilter(ctx sessionctx.Context, filters []Expression, iterator *chunk.Iterator4Chunk, selected []bool) (_ []bool, err error) { + selected, _, err = VectorizedFilterConsiderNull(ctx, filters, iterator, selected, nil) + return selected, err +} + +// VectorizedFilterConsiderNull applies a list of filters to a Chunk and +// returns two bool slices, `selected` indicates whether a row passed the +// filters, `isNull` indicates whether the result of the filter is null. +// Filters is executed vectorized. +func VectorizedFilterConsiderNull(ctx sessionctx.Context, filters []Expression, iterator *chunk.Iterator4Chunk, selected []bool, isNull []bool) ([]bool, []bool, error) { + // canVectorized used to check whether all of the filters can be vectorized evaluated + canVectorized := true + for _, filter := range filters { + if !filter.Vectorized() { + canVectorized = false + break + } + } + + input := iterator.GetChunk() + sel := input.Sel() + var err error + if canVectorized && ctx.GetSessionVars().EnableVectorizedExpression { + selected, isNull, err = vectorizedFilter(ctx, filters, iterator, selected, isNull) + } else { + selected, isNull, err = rowBasedFilter(ctx, filters, iterator, selected, isNull) + } + if err != nil || sel == nil { + return selected, isNull, err + } + + // When the input.Sel() != nil, we need to handle the selected slice and input.Sel() + // Get the index which is not appeared in input.Sel() and set the selected[index] = false + selectedLength := len(selected) + unselected := allocZeroSlice(selectedLength) + defer deallocateZeroSlice(unselected) + // unselected[i] == 1 means that the i-th row is not selected + for i := 0; i < selectedLength; i++ { + unselected[i] = 1 + } + for _, ind := range sel { + unselected[ind] = 0 + } + for i := 0; i < selectedLength; i++ { + if selected[i] && unselected[i] == 1 { + selected[i] = false + } + } + return selected, isNull, err +} + +// rowBasedFilter filters by row. +func rowBasedFilter(ctx sessionctx.Context, filters []Expression, iterator *chunk.Iterator4Chunk, selected []bool, isNull []bool) ([]bool, []bool, error) { + // If input.Sel() != nil, we will call input.SetSel(nil) to clear the sel slice in input chunk. + // After the function finished, then we reset the sel in input chunk. + // Then the caller will handle the input.sel and selected slices. + input := iterator.GetChunk() + if input.Sel() != nil { + defer input.SetSel(input.Sel()) + input.SetSel(nil) + iterator = chunk.NewIterator4Chunk(input) + } + + selected = selected[:0] + for i, numRows := 0, iterator.Len(); i < numRows; i++ { + selected = append(selected, true) + } + if isNull != nil { + isNull = isNull[:0] + for i, numRows := 0, iterator.Len(); i < numRows; i++ { + isNull = append(isNull, false) + } + } + var ( + filterResult int64 + bVal, isNullResult bool + err error + ) + for _, filter := range filters { + isIntType := true + if filter.GetType().EvalType() != types.ETInt { + isIntType = false + } + for row := iterator.Begin(); row != iterator.End(); row = iterator.Next() { + if !selected[row.Idx()] { + continue + } + if isIntType { + filterResult, isNullResult, err = filter.EvalInt(ctx, row) + if err != nil { + return nil, nil, err + } + selected[row.Idx()] = selected[row.Idx()] && !isNullResult && (filterResult != 0) + } else { + // TODO: should rewrite the filter to `cast(expr as SIGNED) != 0` and always use `EvalInt`. + bVal, isNullResult, err = EvalBool(ctx, []Expression{filter}, row) + if err != nil { + return nil, nil, err + } + selected[row.Idx()] = selected[row.Idx()] && bVal + } + if isNull != nil { + isNull[row.Idx()] = isNull[row.Idx()] || isNullResult + } + } + } + return selected, isNull, nil +} + +// vectorizedFilter filters by vector. +func vectorizedFilter(ctx sessionctx.Context, filters []Expression, iterator *chunk.Iterator4Chunk, selected []bool, isNull []bool) ([]bool, []bool, error) { + selected, isNull, err := VecEvalBool(ctx, filters, iterator.GetChunk(), selected, isNull) + if err != nil { + return nil, nil, err + } + + return selected, isNull, nil +} diff --git a/expression/column.go b/expression/column.go new file mode 100644 index 0000000..7eb2200 --- /dev/null +++ b/expression/column.go @@ -0,0 +1,362 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "fmt" + "strings" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/codec" +) + +// Column represents a column. +type Column struct { + RetType *types.FieldType + // ID is used to specify whether this column is ExtraHandleColumn or to access histogram. + // We'll try to remove it in the future. + ID int64 + // UniqueID is the unique id of this column. + UniqueID int64 + + // Index is used for execution, to tell the column's position in the given row. + Index int + + hashcode []byte + + OrigName string +} + +// Equal implements Expression interface. +func (col *Column) Equal(_ sessionctx.Context, expr Expression) bool { + if newCol, ok := expr.(*Column); ok { + return newCol.UniqueID == col.UniqueID + } + return false +} + +// VecEvalInt evaluates this expression in a vectorized manner. +func (col *Column) VecEvalInt(ctx sessionctx.Context, input *chunk.Chunk, result *chunk.Column) error { + if col.RetType.Hybrid() { + it := chunk.NewIterator4Chunk(input) + result.ResizeInt64(0, false) + for row := it.Begin(); row != it.End(); row = it.Next() { + v, null, err := col.EvalInt(ctx, row) + if err != nil { + return err + } + if null { + result.AppendNull() + } else { + result.AppendInt64(v) + } + } + return nil + } + input.Column(col.Index).CopyReconstruct(input.Sel(), result) + return nil +} + +// VecEvalReal evaluates this expression in a vectorized manner. +func (col *Column) VecEvalReal(ctx sessionctx.Context, input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + src := input.Column(col.Index) + if col.GetType().Tp == mysql.TypeFloat { + result.ResizeFloat64(n, false) + f32s := src.Float32s() + f64s := result.Float64s() + sel := input.Sel() + if sel != nil { + for i, j := range sel { + if src.IsNull(j) { + result.SetNull(i, true) + } else { + f64s[i] = float64(f32s[j]) + } + } + return nil + } + for i := range f32s { + // TODO(zhangyuanjia): speed up the way to manipulate null-bitmaps. + if src.IsNull(i) { + result.SetNull(i, true) + } else { + f64s[i] = float64(f32s[i]) + } + } + return nil + } + input.Column(col.Index).CopyReconstruct(input.Sel(), result) + return nil +} + +// VecEvalString evaluates this expression in a vectorized manner. +func (col *Column) VecEvalString(ctx sessionctx.Context, input *chunk.Chunk, result *chunk.Column) error { + if col.RetType.Hybrid() || ctx.GetSessionVars().StmtCtx.PadCharToFullLength { + it := chunk.NewIterator4Chunk(input) + result.ReserveString(input.NumRows()) + for row := it.Begin(); row != it.End(); row = it.Next() { + v, null, err := col.EvalString(ctx, row) + if err != nil { + return err + } + if null { + result.AppendNull() + } else { + result.AppendString(v) + } + } + return nil + } + input.Column(col.Index).CopyReconstruct(input.Sel(), result) + return nil +} + +const columnPrefix = "Column#" + +// String implements Stringer interface. +func (col *Column) String() string { + if col.OrigName != "" { + return col.OrigName + } + var builder strings.Builder + fmt.Fprintf(&builder, "%s%d", columnPrefix, col.UniqueID) + return builder.String() +} + +// MarshalJSON implements json.Marshaler interface. +func (col *Column) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf("\"%s\"", col)), nil +} + +// GetType implements Expression interface. +func (col *Column) GetType() *types.FieldType { + return col.RetType +} + +// Eval implements Expression interface. +func (col *Column) Eval(row chunk.Row) (types.Datum, error) { + return row.GetDatum(col.Index, col.RetType), nil +} + +// EvalInt returns int representation of Column. +func (col *Column) EvalInt(ctx sessionctx.Context, row chunk.Row) (int64, bool, error) { + if col.GetType().Hybrid() { + val := row.GetDatum(col.Index, col.RetType) + if val.IsNull() { + return 0, true, nil + } + res, err := val.ToInt64(ctx.GetSessionVars().StmtCtx) + return res, err != nil, err + } + if row.IsNull(col.Index) { + return 0, true, nil + } + return row.GetInt64(col.Index), false, nil +} + +// EvalReal returns real representation of Column. +func (col *Column) EvalReal(ctx sessionctx.Context, row chunk.Row) (float64, bool, error) { + if row.IsNull(col.Index) { + return 0, true, nil + } + if col.GetType().Tp == mysql.TypeFloat { + return float64(row.GetFloat32(col.Index)), false, nil + } + return row.GetFloat64(col.Index), false, nil +} + +// EvalString returns string representation of Column. +func (col *Column) EvalString(ctx sessionctx.Context, row chunk.Row) (string, bool, error) { + if row.IsNull(col.Index) { + return "", true, nil + } + + // Specially handle the ENUM/SET/BIT input value. + if col.GetType().Hybrid() { + val := row.GetDatum(col.Index, col.RetType) + res, err := val.ToString() + return res, err != nil, err + } + + val := row.GetString(col.Index) + if ctx.GetSessionVars().StmtCtx.PadCharToFullLength && col.GetType().Tp == mysql.TypeString { + valLen := len([]rune(val)) + if valLen < col.RetType.Flen { + val = val + strings.Repeat(" ", col.RetType.Flen-valLen) + } + } + return val, false, nil +} + +// Clone implements Expression interface. +func (col *Column) Clone() Expression { + newCol := *col + return &newCol +} + +// IsCorrelated implements Expression interface. +func (col *Column) IsCorrelated() bool { + return false +} + +// ConstItem implements Expression interface. +func (col *Column) ConstItem() bool { + return false +} + +// Decorrelate implements Expression interface. +func (col *Column) Decorrelate(_ *Schema) Expression { + return col +} + +// HashCode implements Expression interface. +func (col *Column) HashCode(_ *stmtctx.StatementContext) []byte { + if len(col.hashcode) != 0 { + return col.hashcode + } + col.hashcode = make([]byte, 0, 9) + col.hashcode = append(col.hashcode, columnFlag) + col.hashcode = codec.EncodeInt(col.hashcode, int64(col.UniqueID)) + return col.hashcode +} + +// ResolveIndices implements Expression interface. +func (col *Column) ResolveIndices(schema *Schema) (Expression, error) { + newCol := col.Clone() + err := newCol.resolveIndices(schema) + return newCol, err +} + +func (col *Column) resolveIndices(schema *Schema) error { + col.Index = schema.ColumnIndex(col) + if col.Index == -1 { + return errors.Errorf("Can't find column %s in schema %s", col, schema) + } + return nil +} + +// Vectorized returns if this expression supports vectorized evaluation. +func (col *Column) Vectorized() bool { + return true +} + +// ToInfo converts the expression.Column to model.ColumnInfo for casting values, +// beware it doesn't fill all the fields of the model.ColumnInfo. +func (col *Column) ToInfo() *model.ColumnInfo { + return &model.ColumnInfo{ + ID: col.ID, + FieldType: *col.RetType, + } +} + +// Column2Exprs will transfer column slice to expression slice. +func Column2Exprs(cols []*Column) []Expression { + result := make([]Expression, 0, len(cols)) + for _, col := range cols { + result = append(result, col) + } + return result +} + +// ColInfo2Col finds the corresponding column of the ColumnInfo in a column slice. +func ColInfo2Col(cols []*Column, col *model.ColumnInfo) *Column { + for _, c := range cols { + if c.ID == col.ID { + return c + } + } + return nil +} + +// indexCol2Col finds the corresponding column of the IndexColumn in a column slice. +func indexCol2Col(colInfos []*model.ColumnInfo, cols []*Column, col *model.IndexColumn) *Column { + for i, info := range colInfos { + if info.Name.L == col.Name.L { + return cols[i] + } + } + return nil +} + +// IndexInfo2PrefixCols gets the corresponding []*Column of the indexInfo's []*IndexColumn, +// together with a []int containing their lengths. +// If this index has three IndexColumn that the 1st and 3rd IndexColumn has corresponding *Column, +// the return value will be only the 1st corresponding *Column and its length. +// TODO: Use a struct to represent {*Column, int}. And merge IndexInfo2PrefixCols and IndexInfo2Cols. +func IndexInfo2PrefixCols(colInfos []*model.ColumnInfo, cols []*Column, index *model.IndexInfo) ([]*Column, []int) { + retCols := make([]*Column, 0, len(index.Columns)) + lengths := make([]int, 0, len(index.Columns)) + for _, c := range index.Columns { + col := indexCol2Col(colInfos, cols, c) + if col == nil { + return retCols, lengths + } + retCols = append(retCols, col) + if c.Length != types.UnspecifiedLength && c.Length == col.RetType.Flen { + lengths = append(lengths, types.UnspecifiedLength) + } else { + lengths = append(lengths, c.Length) + } + } + return retCols, lengths +} + +// IndexInfo2Cols gets the corresponding []*Column of the indexInfo's []*IndexColumn, +// together with a []int containing their lengths. +// If this index has three IndexColumn that the 1st and 3rd IndexColumn has corresponding *Column, +// the return value will be [col1, nil, col2]. +func IndexInfo2Cols(colInfos []*model.ColumnInfo, cols []*Column, index *model.IndexInfo) ([]*Column, []int) { + retCols := make([]*Column, 0, len(index.Columns)) + lens := make([]int, 0, len(index.Columns)) + for _, c := range index.Columns { + col := indexCol2Col(colInfos, cols, c) + if col == nil { + retCols = append(retCols, col) + lens = append(lens, types.UnspecifiedLength) + continue + } + retCols = append(retCols, col) + if c.Length != types.UnspecifiedLength && c.Length == col.RetType.Flen { + lens = append(lens, types.UnspecifiedLength) + } else { + lens = append(lens, c.Length) + } + } + return retCols, lens +} + +// FindPrefixOfIndex will find columns in index by checking the unique id. +// So it will return at once no matching column is found. +func FindPrefixOfIndex(cols []*Column, idxColIDs []int64) []*Column { + retCols := make([]*Column, 0, len(idxColIDs)) +idLoop: + for _, id := range idxColIDs { + for _, col := range cols { + if col.UniqueID == id { + retCols = append(retCols, col) + continue idLoop + } + } + // If no matching column is found, just return. + return retCols + } + return retCols +} diff --git a/expression/column_test.go b/expression/column_test.go new file mode 100644 index 0000000..1761637 --- /dev/null +++ b/expression/column_test.go @@ -0,0 +1,113 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/mock" +) + +func (s *testEvaluatorSuite) TestColumnHashCode(c *C) { + col1 := &Column{ + UniqueID: 12, + } + c.Assert(col1.HashCode(nil), DeepEquals, []byte{0x1, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc}) + + col2 := &Column{ + UniqueID: 2, + } + c.Assert(col2.HashCode(nil), DeepEquals, []byte{0x1, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}) +} + +func (s *testEvaluatorSuite) TestColumn2Expr(c *C) { + cols := make([]*Column, 0, 5) + for i := 0; i < 5; i++ { + cols = append(cols, &Column{UniqueID: int64(i)}) + } + + exprs := Column2Exprs(cols) + for i := range exprs { + c.Assert(exprs[i].Equal(nil, cols[i]), IsTrue) + } +} + +func (s *testEvaluatorSuite) TestColInfo2Col(c *C) { + col0, col1 := &Column{ID: 0}, &Column{ID: 1} + cols := []*Column{col0, col1} + colInfo := &model.ColumnInfo{ID: 0} + res := ColInfo2Col(cols, colInfo) + c.Assert(res.Equal(nil, col1), IsTrue) + + colInfo.ID = 3 + res = ColInfo2Col(cols, colInfo) + c.Assert(res, IsNil) +} + +func (s *testEvaluatorSuite) TestIndexInfo2Cols(c *C) { + col0 := &Column{UniqueID: 0, ID: 0, RetType: types.NewFieldType(mysql.TypeLonglong)} + col1 := &Column{UniqueID: 1, ID: 1, RetType: types.NewFieldType(mysql.TypeLonglong)} + colInfo0 := &model.ColumnInfo{ID: 0, Name: model.NewCIStr("0")} + colInfo1 := &model.ColumnInfo{ID: 1, Name: model.NewCIStr("1")} + indexCol0, indexCol1 := &model.IndexColumn{Name: model.NewCIStr("0")}, &model.IndexColumn{Name: model.NewCIStr("1")} + indexInfo := &model.IndexInfo{Columns: []*model.IndexColumn{indexCol0, indexCol1}} + + cols := []*Column{col0} + colInfos := []*model.ColumnInfo{colInfo0} + resCols, lengths := IndexInfo2PrefixCols(colInfos, cols, indexInfo) + c.Assert(len(resCols), Equals, 1) + c.Assert(len(lengths), Equals, 1) + c.Assert(resCols[0].Equal(nil, col0), IsTrue) + + cols = []*Column{col1} + colInfos = []*model.ColumnInfo{colInfo1} + resCols, lengths = IndexInfo2PrefixCols(colInfos, cols, indexInfo) + c.Assert(len(resCols), Equals, 0) + c.Assert(len(lengths), Equals, 0) + + cols = []*Column{col0, col1} + colInfos = []*model.ColumnInfo{colInfo0, colInfo1} + resCols, lengths = IndexInfo2PrefixCols(colInfos, cols, indexInfo) + c.Assert(len(resCols), Equals, 2) + c.Assert(len(lengths), Equals, 2) + c.Assert(resCols[0].Equal(nil, col0), IsTrue) + c.Assert(resCols[1].Equal(nil, col1), IsTrue) +} + +func (s *testEvaluatorSuite) TestPadCharToFullLength(c *C) { + ctx := mock.NewContext() + ctx.GetSessionVars().StmtCtx.PadCharToFullLength = true + + ft := types.NewFieldType(mysql.TypeString) + ft.Flen = 10 + col := &Column{RetType: ft, Index: 0} + input := chunk.New([]*types.FieldType{ft}, 1024, 1024) + for i := 0; i < 1024; i++ { + input.AppendString(0, "xy") + } + result, err := newBuffer(types.ETString, 1024) + c.Assert(err, IsNil) + c.Assert(col.VecEvalString(ctx, input, result), IsNil) + + it := chunk.NewIterator4Chunk(input) + for row, i := it.Begin(), 0; row != it.End(); row, i = it.Next(), i+1 { + v, _, err := col.EvalString(ctx, row) + c.Assert(err, IsNil) + c.Assert(len(v), Equals, ft.Flen) + c.Assert(v, Equals, result.GetString(i)) + } +} diff --git a/expression/constant.go b/expression/constant.go new file mode 100644 index 0000000..b49621a --- /dev/null +++ b/expression/constant.go @@ -0,0 +1,190 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "fmt" + + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/codec" +) + +var ( + // One stands for a number 1. + One = &Constant{ + Value: types.NewDatum(1), + RetType: types.NewFieldType(mysql.TypeTiny), + } + + // Zero stands for a number 0. + Zero = &Constant{ + Value: types.NewDatum(0), + RetType: types.NewFieldType(mysql.TypeTiny), + } + + // Null stands for null constant. + Null = &Constant{ + Value: types.NewDatum(nil), + RetType: types.NewFieldType(mysql.TypeTiny), + } +) + +// Constant stands for a constant value. +type Constant struct { + Value types.Datum + RetType *types.FieldType + hashcode []byte +} + +// String implements fmt.Stringer interface. +func (c *Constant) String() string { + return fmt.Sprintf("%v", c.Value.GetValue()) +} + +// MarshalJSON implements json.Marshaler interface. +func (c *Constant) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf("\"%s\"", c)), nil +} + +// Clone implements Expression interface. +func (c *Constant) Clone() Expression { + return c +} + +// GetType implements Expression interface. +func (c *Constant) GetType() *types.FieldType { + return c.RetType +} + +// VecEvalInt evaluates this expression in a vectorized manner. +func (c *Constant) VecEvalInt(ctx sessionctx.Context, input *chunk.Chunk, result *chunk.Column) error { + return genVecFromConstExpr(ctx, c, types.ETInt, input, result) +} + +// VecEvalReal evaluates this expression in a vectorized manner. +func (c *Constant) VecEvalReal(ctx sessionctx.Context, input *chunk.Chunk, result *chunk.Column) error { + return genVecFromConstExpr(ctx, c, types.ETReal, input, result) +} + +// VecEvalString evaluates this expression in a vectorized manner. +func (c *Constant) VecEvalString(ctx sessionctx.Context, input *chunk.Chunk, result *chunk.Column) error { + return genVecFromConstExpr(ctx, c, types.ETString, input, result) +} + +// Eval implements Expression interface. +func (c *Constant) Eval(_ chunk.Row) (types.Datum, error) { + return c.Value, nil +} + +// EvalInt returns int representation of Constant. +func (c *Constant) EvalInt(ctx sessionctx.Context, _ chunk.Row) (int64, bool, error) { + if c.GetType().Tp == mysql.TypeNull || c.Value.IsNull() { + return 0, true, nil + } + if c.GetType().Hybrid() || c.Value.Kind() == types.KindString { + res, err := c.Value.ToInt64(ctx.GetSessionVars().StmtCtx) + return res, err != nil, err + } + return c.Value.GetInt64(), false, nil +} + +// EvalReal returns real representation of Constant. +func (c *Constant) EvalReal(ctx sessionctx.Context, _ chunk.Row) (float64, bool, error) { + if c.GetType().Tp == mysql.TypeNull || c.Value.IsNull() { + return 0, true, nil + } + if c.GetType().Hybrid() || c.Value.Kind() == types.KindString { + res, err := c.Value.ToFloat64(ctx.GetSessionVars().StmtCtx) + return res, err != nil, err + } + return c.Value.GetFloat64(), false, nil +} + +// EvalString returns string representation of Constant. +func (c *Constant) EvalString(ctx sessionctx.Context, _ chunk.Row) (string, bool, error) { + if c.GetType().Tp == mysql.TypeNull || c.Value.IsNull() { + return "", true, nil + } + res, err := c.Value.ToString() + return res, err != nil, err +} + +// Equal implements Expression interface. +func (c *Constant) Equal(ctx sessionctx.Context, b Expression) bool { + y, ok := b.(*Constant) + if !ok { + return false + } + _, err1 := y.Eval(chunk.Row{}) + _, err2 := c.Eval(chunk.Row{}) + if err1 != nil || err2 != nil { + return false + } + con, err := c.Value.CompareDatum(ctx.GetSessionVars().StmtCtx, &y.Value) + if err != nil || con != 0 { + return false + } + return true +} + +// IsCorrelated implements Expression interface. +func (c *Constant) IsCorrelated() bool { + return false +} + +// ConstItem implements Expression interface. +func (c *Constant) ConstItem() bool { + return true +} + +// Decorrelate implements Expression interface. +func (c *Constant) Decorrelate(_ *Schema) Expression { + return c +} + +// HashCode implements Expression interface. +func (c *Constant) HashCode(sc *stmtctx.StatementContext) []byte { + if len(c.hashcode) > 0 { + return c.hashcode + } + _, err := c.Eval(chunk.Row{}) + if err != nil { + terror.Log(err) + } + c.hashcode = append(c.hashcode, constantFlag) + c.hashcode, err = codec.EncodeValue(sc, c.hashcode, c.Value) + if err != nil { + terror.Log(err) + } + return c.hashcode +} + +// ResolveIndices implements Expression interface. +func (c *Constant) ResolveIndices(_ *Schema) (Expression, error) { + return c, nil +} + +func (c *Constant) resolveIndices(_ *Schema) error { + return nil +} + +// Vectorized returns if this expression supports vectorized evaluation. +func (c *Constant) Vectorized() bool { + return true +} diff --git a/expression/constant_fold.go b/expression/constant_fold.go new file mode 100644 index 0000000..e898d83 --- /dev/null +++ b/expression/constant_fold.go @@ -0,0 +1,84 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +// FoldConstant does constant folding optimization on an expression excluding deferred ones. +func FoldConstant(expr Expression) Expression { + return foldConstant(expr) +} + +func foldConstant(expr Expression) Expression { + switch x := expr.(type) { + case *ScalarFunction: + if _, ok := unFoldableFunctions[x.FuncName.L]; ok { + return expr + } + + args := x.GetArgs() + sc := x.GetCtx().GetSessionVars().StmtCtx + argIsConst := make([]bool, len(args)) + hasNullArg := false + allConstArg := true + for i := 0; i < len(args); i++ { + switch x := args[i].(type) { + case *Constant: + argIsConst[i] = true + hasNullArg = hasNullArg || x.Value.IsNull() + default: + allConstArg = false + } + } + if !allConstArg { + if !hasNullArg || !sc.InNullRejectCheck { + return expr + } + constArgs := make([]Expression, len(args)) + for i, arg := range args { + if argIsConst[i] { + constArgs[i] = arg + } else { + constArgs[i] = One + } + } + dummyScalarFunc, err := NewFunctionBase(x.GetCtx(), x.FuncName.L, x.GetType(), constArgs...) + if err != nil { + return expr + } + value, err := dummyScalarFunc.Eval(chunk.Row{}) + if err != nil { + return expr + } + if value.IsNull() { + return &Constant{Value: value, RetType: x.RetType} + } + if isTrue, err := value.ToBool(sc); err == nil && isTrue == 0 { + return &Constant{Value: value, RetType: x.RetType} + } + return expr + } + value, err := x.Eval(chunk.Row{}) + if err != nil { + logutil.BgLogger().Debug("fold expression to constant", zap.String("expression", x.ExplainInfo()), zap.Error(err)) + return expr + } + return &Constant{Value: value, RetType: x.RetType} + } + return expr +} diff --git a/expression/constant_propagation.go b/expression/constant_propagation.go new file mode 100644 index 0000000..5d0cac5 --- /dev/null +++ b/expression/constant_propagation.go @@ -0,0 +1,580 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/disjointset" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +// MaxPropagateColsCnt means the max number of columns that can participate propagation. +var MaxPropagateColsCnt = 100 + +type basePropConstSolver struct { + colMapper map[int64]int // colMapper maps column to its index + eqList []*Constant // if eqList[i] != nil, it means col_i = eqList[i] + unionSet *disjointset.IntSet // unionSet stores the relations like col_i = col_j + columns []*Column // columns stores all columns appearing in the conditions + ctx sessionctx.Context +} + +func (s *basePropConstSolver) getColID(col *Column) int { + return s.colMapper[col.UniqueID] +} + +func (s *basePropConstSolver) insertCol(col *Column) { + _, ok := s.colMapper[col.UniqueID] + if !ok { + s.colMapper[col.UniqueID] = len(s.colMapper) + s.columns = append(s.columns, col) + } +} + +// tryToUpdateEQList tries to update the eqList. When the eqList has store this column with a different constant, like +// a = 1 and a = 2, we set the second return value to false. +func (s *basePropConstSolver) tryToUpdateEQList(col *Column, con *Constant) (bool, bool) { + if con.Value.IsNull() { + return false, true + } + id := s.getColID(col) + oldCon := s.eqList[id] + if oldCon != nil { + return false, !oldCon.Equal(s.ctx, con) + } + s.eqList[id] = con + return true, false +} + +// validEqualCond checks if the cond is an expression like [column eq constant]. +func validEqualCond(cond Expression) (*Column, *Constant) { + if eq, ok := cond.(*ScalarFunction); ok { + if eq.FuncName.L != ast.EQ { + return nil, nil + } + if col, colOk := eq.GetArgs()[0].(*Column); colOk { + if con, conOk := eq.GetArgs()[1].(*Constant); conOk { + return col, con + } + } + if col, colOk := eq.GetArgs()[1].(*Column); colOk { + if con, conOk := eq.GetArgs()[0].(*Constant); conOk { + return col, con + } + } + } + return nil, nil +} + +// tryToReplaceCond aims to replace all occurrences of column 'src' and try to replace it with 'tgt' in 'cond' +// It returns +// bool: if a replacement happened +// bool: if 'cond' contains non-deterministic expression +// Expression: the replaced expression, or original 'cond' if the replacement didn't happen +// +// For example: +// for 'a, b, a < 3', it returns 'true, false, b < 3' +// for 'a, b, sin(a) + cos(a) = 5', it returns 'true, false, returns sin(b) + cos(b) = 5' +// for 'a, b, cast(a) < rand()', it returns 'false, true, cast(a) < rand()' +func tryToReplaceCond(ctx sessionctx.Context, src *Column, tgt *Column, cond Expression) (bool, bool, Expression) { + sf, ok := cond.(*ScalarFunction) + if !ok { + return false, false, cond + } + replaced := false + var args []Expression + if _, ok := unFoldableFunctions[sf.FuncName.L]; ok { + return false, true, cond + } + if _, ok := inequalFunctions[sf.FuncName.L]; ok { + return false, true, cond + } + for idx, expr := range sf.GetArgs() { + if src.Equal(nil, expr) { + replaced = true + if args == nil { + args = make([]Expression, len(sf.GetArgs())) + copy(args, sf.GetArgs()) + } + args[idx] = tgt + } else { + subReplaced, isNonDeterministic, subExpr := tryToReplaceCond(ctx, src, tgt, expr) + if isNonDeterministic { + return false, true, cond + } else if subReplaced { + replaced = true + if args == nil { + args = make([]Expression, len(sf.GetArgs())) + copy(args, sf.GetArgs()) + } + args[idx] = subExpr + } + } + } + if replaced { + return true, false, NewFunctionInternal(ctx, sf.FuncName.L, sf.GetType(), args...) + } + return false, false, cond +} + +type propConstSolver struct { + basePropConstSolver + conditions []Expression +} + +// propagateConstantEQ propagates expressions like 'column = constant' by substituting the constant for column, the +// procedure repeats multiple times. An example runs as following: +// a = d & b * 2 = c & c = d + 2 & b = 1 & a = 4, we pick eq cond b = 1 and a = 4 +// d = 4 & 2 = c & c = d + 2 & b = 1 & a = 4, we propagate b = 1 and a = 4 and pick eq cond c = 2 and d = 4 +// d = 4 & 2 = c & false & b = 1 & a = 4, we propagate c = 2 and d = 4, and do constant folding: c = d + 2 will be folded as false. +func (s *propConstSolver) propagateConstantEQ() { + s.eqList = make([]*Constant, len(s.columns)) + visited := make([]bool, len(s.conditions)) + for i := 0; i < MaxPropagateColsCnt; i++ { + mapper := s.pickNewEQConds(visited) + if len(mapper) == 0 { + return + } + cols := make([]*Column, 0, len(mapper)) + cons := make([]Expression, 0, len(mapper)) + for id, con := range mapper { + cols = append(cols, s.columns[id]) + cons = append(cons, con) + } + for i, cond := range s.conditions { + if !visited[i] { + s.conditions[i] = ColumnSubstitute(cond, NewSchema(cols...), cons) + } + } + } +} + +// propagateColumnEQ propagates expressions like 'column A = column B' by adding extra filters +// 'expression(..., column B, ...)' propagated from 'expression(..., column A, ...)' as long as: +// +// 1. The expression is deterministic +// 2. The expression doesn't have any side effect +// +// e.g. For expression a = b and b = c and c = d and c < 1 , we can get extra a < 1 and b < 1 and d < 1. +// However, for a = b and a < rand(), we cannot propagate a < rand() to b < rand() because rand() is non-deterministic +// +// This propagation may bring redundancies that we need to resolve later, for example: +// for a = b and a < 3 and b < 3, we get new a < 3 and b < 3, which are redundant +// for a = b and a < 3 and 3 > b, we get new b < 3 and 3 > a, which are redundant +// for a = b and a < 3 and b < 4, we get new a < 4 and b < 3 but should expect a < 3 and b < 3 +// for a = b and a in (3) and b in (4), we get b in (3) and a in (4) but should expect 'false' +// +// TODO: remove redundancies later +// +// We maintain a unionSet representing the equivalent for every two columns. +func (s *propConstSolver) propagateColumnEQ() { + visited := make([]bool, len(s.conditions)) + s.unionSet = disjointset.NewIntSet(len(s.columns)) + for i := range s.conditions { + if fun, ok := s.conditions[i].(*ScalarFunction); ok && fun.FuncName.L == ast.EQ { + lCol, lOk := fun.GetArgs()[0].(*Column) + rCol, rOk := fun.GetArgs()[1].(*Column) + if lOk && rOk { + lID := s.getColID(lCol) + rID := s.getColID(rCol) + s.unionSet.Union(lID, rID) + visited[i] = true + } + } + } + + condsLen := len(s.conditions) + for i, coli := range s.columns { + for j := i + 1; j < len(s.columns); j++ { + // unionSet doesn't have iterate(), we use a two layer loop to iterate col_i = col_j relation + if s.unionSet.FindRoot(i) != s.unionSet.FindRoot(j) { + continue + } + colj := s.columns[j] + for k := 0; k < condsLen; k++ { + if visited[k] { + // cond_k has been used to retrieve equality relation + continue + } + cond := s.conditions[k] + replaced, _, newExpr := tryToReplaceCond(s.ctx, coli, colj, cond) + if replaced { + s.conditions = append(s.conditions, newExpr) + } + replaced, _, newExpr = tryToReplaceCond(s.ctx, colj, coli, cond) + if replaced { + s.conditions = append(s.conditions, newExpr) + } + } + } + } +} + +func (s *propConstSolver) setConds2ConstFalse() { + s.conditions = []Expression{&Constant{ + Value: types.NewDatum(false), + RetType: types.NewFieldType(mysql.TypeTiny), + }} +} + +// pickNewEQConds tries to pick new equal conds and puts them to retMapper. +func (s *propConstSolver) pickNewEQConds(visited []bool) (retMapper map[int]*Constant) { + retMapper = make(map[int]*Constant) + for i, cond := range s.conditions { + if visited[i] { + continue + } + col, con := validEqualCond(cond) + // Then we check if this CNF item is a false constant. If so, we will set the whole condition to false. + var ok bool + if col == nil { + if con, ok = cond.(*Constant); ok { + value, _, err := EvalBool(s.ctx, []Expression{con}, chunk.Row{}) + if err != nil { + terror.Log(err) + return nil + } + if !value { + s.setConds2ConstFalse() + return nil + } + } + continue + } + visited[i] = true + updated, foreverFalse := s.tryToUpdateEQList(col, con) + if foreverFalse { + s.setConds2ConstFalse() + return nil + } + if updated { + retMapper[s.getColID(col)] = con + } + } + return +} + +func (s *propConstSolver) solve(conditions []Expression) []Expression { + cols := make([]*Column, 0, len(conditions)) + for _, cond := range conditions { + s.conditions = append(s.conditions, SplitCNFItems(cond)...) + cols = append(cols, ExtractColumns(cond)...) + } + for _, col := range cols { + s.insertCol(col) + } + if len(s.columns) > MaxPropagateColsCnt { + logutil.BgLogger().Warn("too many columns in a single CNF", + zap.Int("numCols", len(s.columns)), + zap.Int("maxNumCols", MaxPropagateColsCnt), + ) + return conditions + } + s.propagateConstantEQ() + s.propagateColumnEQ() + s.conditions = propagateConstantDNF(s.ctx, s.conditions) + return s.conditions +} + +// PropagateConstant propagate constant values of deterministic predicates in a condition. +func PropagateConstant(ctx sessionctx.Context, conditions []Expression) []Expression { + return newPropConstSolver().PropagateConstant(ctx, conditions) +} + +type propOuterJoinConstSolver struct { + basePropConstSolver + joinConds []Expression + filterConds []Expression + outerSchema *Schema + innerSchema *Schema +} + +func (s *propOuterJoinConstSolver) setConds2ConstFalse(filterConds bool) { + s.joinConds = []Expression{&Constant{ + Value: types.NewDatum(false), + RetType: types.NewFieldType(mysql.TypeTiny), + }} + if filterConds { + s.filterConds = []Expression{&Constant{ + Value: types.NewDatum(false), + RetType: types.NewFieldType(mysql.TypeTiny), + }} + } +} + +// pickEQCondsOnOuterCol picks constant equal expression from specified conditions. +func (s *propOuterJoinConstSolver) pickEQCondsOnOuterCol(retMapper map[int]*Constant, visited []bool, filterConds bool) map[int]*Constant { + var conds []Expression + var condsOffset int + if filterConds { + conds = s.filterConds + } else { + conds = s.joinConds + condsOffset = len(s.filterConds) + } + for i, cond := range conds { + if visited[i+condsOffset] { + continue + } + col, con := validEqualCond(cond) + // Then we check if this CNF item is a false constant. If so, we will set the whole condition to false. + var ok bool + if col == nil { + if con, ok = cond.(*Constant); ok { + value, _, err := EvalBool(s.ctx, []Expression{con}, chunk.Row{}) + if err != nil { + terror.Log(err) + return nil + } + if !value { + s.setConds2ConstFalse(filterConds) + return nil + } + } + continue + } + // Only extract `outerCol = const` expressions. + if !s.outerSchema.Contains(col) { + continue + } + visited[i+condsOffset] = true + updated, foreverFalse := s.tryToUpdateEQList(col, con) + if foreverFalse { + s.setConds2ConstFalse(filterConds) + return nil + } + if updated { + retMapper[s.getColID(col)] = con + } + } + return retMapper +} + +// pickNewEQConds picks constant equal expressions from join and filter conditions. +func (s *propOuterJoinConstSolver) pickNewEQConds(visited []bool) map[int]*Constant { + retMapper := make(map[int]*Constant) + retMapper = s.pickEQCondsOnOuterCol(retMapper, visited, true) + if retMapper == nil { + // Filter is constant false or error occurred, enforce early termination. + return nil + } + retMapper = s.pickEQCondsOnOuterCol(retMapper, visited, false) + return retMapper +} + +// propagateConstantEQ propagates expressions like `outerCol = const` by substituting `outerCol` in *JOIN* condition +// with `const`, the procedure repeats multiple times. +func (s *propOuterJoinConstSolver) propagateConstantEQ() { + s.eqList = make([]*Constant, len(s.columns)) + lenFilters := len(s.filterConds) + visited := make([]bool, lenFilters+len(s.joinConds)) + for i := 0; i < MaxPropagateColsCnt; i++ { + mapper := s.pickNewEQConds(visited) + if len(mapper) == 0 { + return + } + cols := make([]*Column, 0, len(mapper)) + cons := make([]Expression, 0, len(mapper)) + for id, con := range mapper { + cols = append(cols, s.columns[id]) + cons = append(cons, con) + } + for i, cond := range s.joinConds { + if !visited[i+lenFilters] { + s.joinConds[i] = ColumnSubstitute(cond, NewSchema(cols...), cons) + } + } + } +} + +func (s *propOuterJoinConstSolver) colsFromOuterAndInner(col1, col2 *Column) (*Column, *Column) { + if s.outerSchema.Contains(col1) && s.innerSchema.Contains(col2) { + return col1, col2 + } + if s.outerSchema.Contains(col2) && s.innerSchema.Contains(col1) { + return col2, col1 + } + return nil, nil +} + +// validColEqualCond checks if expression is column equal condition that we can use for constant +// propagation over outer join. We only use expression like `outerCol = innerCol`, for expressions like +// `outerCol1 = outerCol2` or `innerCol1 = innerCol2`, they do not help deriving new inner table conditions +// which can be pushed down to children plan nodes, so we do not pick them. +func (s *propOuterJoinConstSolver) validColEqualCond(cond Expression) (*Column, *Column) { + if fun, ok := cond.(*ScalarFunction); ok && fun.FuncName.L == ast.EQ { + lCol, lOk := fun.GetArgs()[0].(*Column) + rCol, rOk := fun.GetArgs()[1].(*Column) + if lOk && rOk { + return s.colsFromOuterAndInner(lCol, rCol) + } + } + return nil, nil + +} + +// deriveConds given `outerCol = innerCol`, derive new expression for specified conditions. +func (s *propOuterJoinConstSolver) deriveConds(outerCol, innerCol *Column, schema *Schema, fCondsOffset int, visited []bool, filterConds bool) []bool { + var offset, condsLen int + var conds []Expression + if filterConds { + conds = s.filterConds + offset = fCondsOffset + condsLen = len(s.filterConds) + } else { + conds = s.joinConds + condsLen = fCondsOffset + } + for k := 0; k < condsLen; k++ { + if visited[k+offset] { + // condition has been used to retrieve equality relation or contains column beyond children schema. + continue + } + cond := conds[k] + if !ExprFromSchema(cond, schema) { + visited[k+offset] = true + continue + } + replaced, _, newExpr := tryToReplaceCond(s.ctx, outerCol, innerCol, cond) + if replaced { + s.joinConds = append(s.joinConds, newExpr) + } + } + return visited +} + +// propagateColumnEQ propagates expressions like 'outerCol = innerCol' by adding extra filters +// 'expression(..., innerCol, ...)' derived from 'expression(..., outerCol, ...)' as long as +// 'expression(..., outerCol, ...)' does not reference columns outside children schemas of join node. +// Derived new expressions must be appended into join condition, not filter condition. +func (s *propOuterJoinConstSolver) propagateColumnEQ() { + visited := make([]bool, 2*len(s.joinConds)+len(s.filterConds)) + s.unionSet = disjointset.NewIntSet(len(s.columns)) + var outerCol, innerCol *Column + // Only consider column equal condition in joinConds. + // If we have column equal in filter condition, the outer join should have been simplified already. + for i := range s.joinConds { + outerCol, innerCol = s.validColEqualCond(s.joinConds[i]) + if outerCol != nil { + outerID := s.getColID(outerCol) + innerID := s.getColID(innerCol) + s.unionSet.Union(outerID, innerID) + visited[i] = true + childCol := s.innerSchema.RetrieveColumn(innerCol) + if !mysql.HasNotNullFlag(childCol.RetType.Flag) { + notNullExpr := BuildNotNullExpr(s.ctx, childCol) + s.joinConds = append(s.joinConds, notNullExpr) + } + } + } + lenJoinConds := len(s.joinConds) + mergedSchema := MergeSchema(s.outerSchema, s.innerSchema) + for i, coli := range s.columns { + for j := i + 1; j < len(s.columns); j++ { + // unionSet doesn't have iterate(), we use a two layer loop to iterate col_i = col_j relation. + if s.unionSet.FindRoot(i) != s.unionSet.FindRoot(j) { + continue + } + colj := s.columns[j] + outerCol, innerCol = s.colsFromOuterAndInner(coli, colj) + if outerCol == nil { + continue + } + visited = s.deriveConds(outerCol, innerCol, mergedSchema, lenJoinConds, visited, false) + visited = s.deriveConds(outerCol, innerCol, mergedSchema, lenJoinConds, visited, true) + } + } +} + +func (s *propOuterJoinConstSolver) solve(joinConds, filterConds []Expression) ([]Expression, []Expression) { + cols := make([]*Column, 0, len(joinConds)+len(filterConds)) + for _, cond := range joinConds { + s.joinConds = append(s.joinConds, SplitCNFItems(cond)...) + cols = append(cols, ExtractColumns(cond)...) + } + for _, cond := range filterConds { + s.filterConds = append(s.filterConds, SplitCNFItems(cond)...) + cols = append(cols, ExtractColumns(cond)...) + } + for _, col := range cols { + s.insertCol(col) + } + if len(s.columns) > MaxPropagateColsCnt { + logutil.BgLogger().Warn("too many columns", + zap.Int("numCols", len(s.columns)), + zap.Int("maxNumCols", MaxPropagateColsCnt), + ) + return joinConds, filterConds + } + s.propagateConstantEQ() + s.propagateColumnEQ() + s.joinConds = propagateConstantDNF(s.ctx, s.joinConds) + s.filterConds = propagateConstantDNF(s.ctx, s.filterConds) + return s.joinConds, s.filterConds +} + +// propagateConstantDNF find DNF item from CNF, and propagate constant inside DNF. +func propagateConstantDNF(ctx sessionctx.Context, conds []Expression) []Expression { + for i, cond := range conds { + if dnf, ok := cond.(*ScalarFunction); ok && dnf.FuncName.L == ast.LogicOr { + dnfItems := SplitDNFItems(cond) + for j, item := range dnfItems { + dnfItems[j] = ComposeCNFCondition(ctx, PropagateConstant(ctx, []Expression{item})...) + } + conds[i] = ComposeDNFCondition(ctx, dnfItems...) + } + } + return conds +} + +// PropConstOverOuterJoin propagate constant equal and column equal conditions over outer join. +// First step is to extract `outerCol = const` from join conditions and filter conditions, +// and substitute `outerCol` in join conditions with `const`; +// Second step is to extract `outerCol = innerCol` from join conditions, and derive new join +// conditions based on this column equal condition and `outerCol` related +// expressions in join conditions and filter conditions; +func PropConstOverOuterJoin(ctx sessionctx.Context, joinConds, filterConds []Expression, + outerSchema, innerSchema *Schema) ([]Expression, []Expression) { + solver := &propOuterJoinConstSolver{ + outerSchema: outerSchema, + innerSchema: innerSchema, + } + solver.colMapper = make(map[int64]int) + solver.ctx = ctx + return solver.solve(joinConds, filterConds) +} + +// PropagateConstantSolver is a constant propagate solver. +type PropagateConstantSolver interface { + PropagateConstant(ctx sessionctx.Context, conditions []Expression) []Expression +} + +// newPropConstSolver returns a PropagateConstantSolver. +func newPropConstSolver() PropagateConstantSolver { + solver := &propConstSolver{} + solver.colMapper = make(map[int64]int) + return solver +} + +// PropagateConstant propagate constant values of deterministic predicates in a condition. +func (s *propConstSolver) PropagateConstant(ctx sessionctx.Context, conditions []Expression) []Expression { + s.ctx = ctx + return s.solve(conditions) +} diff --git a/expression/constant_propagation_test.go b/expression/constant_propagation_test.go new file mode 100644 index 0000000..7419203 --- /dev/null +++ b/expression/constant_propagation_test.go @@ -0,0 +1,70 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression_test + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/util/mock" + "github.com/pingcap/tidb/util/testkit" + "github.com/pingcap/tidb/util/testutil" +) + +var _ = Suite(&testSuite{}) + +type testSuite struct { + store kv.Storage + dom *domain.Domain + ctx sessionctx.Context + testData testutil.TestData +} + +func (s *testSuite) SetUpSuite(c *C) { + var err error + s.store, s.dom, err = newStoreWithBootstrap() + c.Assert(err, IsNil) + s.ctx = mock.NewContext() + s.testData, err = testutil.LoadTestSuiteData("testdata", "expression_suite") + c.Assert(err, IsNil) +} + +func (s *testSuite) TearDownSuite(c *C) { + c.Assert(s.testData.GenerateOutputIfNeeded(), IsNil) + s.dom.Close() + s.store.Close() +} + +func (s *testSuite) TestOuterJoinPropConst(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t1, t2;") + tk.MustExec("create table t1(id bigint primary key, a int, b int);") + tk.MustExec("create table t2(id bigint primary key, a int, b int);") + + var input []string + var output []struct { + SQL string + Result []string + } + s.testData.GetTestCases(c, &input, &output) + for i, tt := range input { + s.testData.OnRecord(func() { + output[i].SQL = tt + output[i].Result = s.testData.ConvertRowsToStrings(tk.MustQuery(tt).Rows()) + }) + tk.MustQuery(tt).Check(testkit.Rows(output[i].Result...)) + } +} diff --git a/expression/constant_test.go b/expression/constant_test.go new file mode 100644 index 0000000..7c9d61c --- /dev/null +++ b/expression/constant_test.go @@ -0,0 +1,189 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/mock" + "sort" + "strings" +) + +var _ = Suite(&testExpressionSuite{}) + +type testExpressionSuite struct{} + +func newColumn(id int) *Column { + return newColumnWithType(id, types.NewFieldType(mysql.TypeLonglong)) +} + +func newColumnWithType(id int, t *types.FieldType) *Column { + return &Column{ + UniqueID: int64(id), + RetType: t, + } +} + +func newLonglong(value int64) *Constant { + return &Constant{ + Value: types.NewIntDatum(value), + RetType: types.NewFieldType(mysql.TypeLonglong), + } +} + +func newFunction(funcName string, args ...Expression) Expression { + typeLong := types.NewFieldType(mysql.TypeLonglong) + return NewFunctionInternal(mock.NewContext(), funcName, typeLong, args...) +} + +func (*testExpressionSuite) TestConstantPropagation(c *C) { + tests := []struct { + solver []PropagateConstantSolver + conditions []Expression + result string + }{ + { + solver: []PropagateConstantSolver{newPropConstSolver(), pgSolver2{}}, + conditions: []Expression{ + newFunction(ast.EQ, newColumn(0), newColumn(1)), + newFunction(ast.EQ, newColumn(1), newColumn(2)), + newFunction(ast.EQ, newColumn(2), newColumn(3)), + newFunction(ast.EQ, newColumn(3), newLonglong(1)), + newFunction(ast.LogicOr, newLonglong(1), newColumn(0)), + }, + result: "1, eq(Column#0, 1), eq(Column#1, 1), eq(Column#2, 1), eq(Column#3, 1)", + }, + { + solver: []PropagateConstantSolver{newPropConstSolver(), pgSolver2{}}, + conditions: []Expression{ + newFunction(ast.EQ, newColumn(0), newColumn(1)), + newFunction(ast.EQ, newColumn(1), newLonglong(1)), + newFunction(ast.NE, newColumn(2), newLonglong(2)), + }, + result: "eq(Column#0, 1), eq(Column#1, 1), ne(Column#2, 2)", + }, + { + solver: []PropagateConstantSolver{newPropConstSolver()}, + conditions: []Expression{ + newFunction(ast.EQ, newColumn(0), newColumn(1)), + newFunction(ast.EQ, newColumn(1), newLonglong(1)), + newFunction(ast.EQ, newColumn(2), newColumn(3)), + newFunction(ast.GE, newColumn(2), newLonglong(2)), + newFunction(ast.NE, newColumn(2), newLonglong(4)), + newFunction(ast.NE, newColumn(3), newLonglong(5)), + }, + result: "eq(Column#0, 1), eq(Column#1, 1), eq(Column#2, Column#3), ge(Column#2, 2), ge(Column#3, 2), ne(Column#2, 4), ne(Column#2, 5), ne(Column#3, 4), ne(Column#3, 5)", + }, + { + solver: []PropagateConstantSolver{newPropConstSolver()}, + conditions: []Expression{ + newFunction(ast.EQ, newColumn(0), newColumn(1)), + newFunction(ast.EQ, newColumn(0), newColumn(2)), + newFunction(ast.GE, newColumn(1), newLonglong(0)), + }, + result: "eq(Column#0, Column#1), eq(Column#0, Column#2), ge(Column#0, 0), ge(Column#1, 0), ge(Column#2, 0)", + }, + { + solver: []PropagateConstantSolver{newPropConstSolver()}, + conditions: []Expression{ + newFunction(ast.EQ, newColumn(0), newColumn(1)), + newFunction(ast.GT, newColumn(0), newLonglong(2)), + newFunction(ast.GT, newColumn(1), newLonglong(3)), + newFunction(ast.LT, newColumn(0), newLonglong(1)), + newFunction(ast.GT, newLonglong(2), newColumn(1)), + }, + result: "eq(Column#0, Column#1), gt(2, Column#0), gt(2, Column#1), gt(Column#0, 2), gt(Column#0, 3), gt(Column#1, 2), gt(Column#1, 3), lt(Column#0, 1), lt(Column#1, 1)", + }, + { + solver: []PropagateConstantSolver{newPropConstSolver(), pgSolver2{}}, + conditions: []Expression{ + newFunction(ast.EQ, newLonglong(1), newColumn(0)), + newLonglong(0), + }, + result: "0", + }, + { + solver: []PropagateConstantSolver{newPropConstSolver()}, + conditions: []Expression{ + newFunction(ast.EQ, newColumn(0), newColumn(1)), + newFunction(ast.In, newColumn(0), newLonglong(1), newLonglong(2)), + newFunction(ast.In, newColumn(1), newLonglong(3), newLonglong(4)), + }, + result: "eq(Column#0, Column#1), in(Column#0, 1, 2), in(Column#0, 3, 4), in(Column#1, 1, 2), in(Column#1, 3, 4)", + }, + { + solver: []PropagateConstantSolver{newPropConstSolver()}, + conditions: []Expression{ + newFunction(ast.EQ, newColumn(0), newColumn(1)), + newFunction(ast.LE, newFunction(ast.Mul, newColumn(0), newColumn(0)), newLonglong(50)), + }, + result: "eq(Column#0, Column#1), le(mul(Column#0, Column#0), 50), le(mul(Column#1, Column#1), 50)", + }, + { + solver: []PropagateConstantSolver{newPropConstSolver()}, + conditions: []Expression{ + newFunction(ast.EQ, newColumn(0), newColumn(1)), + newFunction(ast.LE, newColumn(0), newFunction(ast.Plus, newColumn(1), newLonglong(1))), + }, + result: "eq(Column#0, Column#1), le(Column#0, plus(Column#0, 1)), le(Column#0, plus(Column#1, 1)), le(Column#1, plus(Column#1, 1))", + }, + } + for _, tt := range tests { + for _, solver := range tt.solver { + ctx := mock.NewContext() + conds := make([]Expression, 0, len(tt.conditions)) + for _, cd := range tt.conditions { + conds = append(conds, FoldConstant(cd)) + } + newConds := solver.PropagateConstant(ctx, conds) + var result []string + for _, v := range newConds { + result = append(result, v.String()) + } + sort.Strings(result) + c.Assert(strings.Join(result, ", "), Equals, tt.result, Commentf("different for expr %s", tt.conditions)) + } + } +} + +func (*testExpressionSuite) TestConstantFolding(c *C) { + tests := []struct { + condition Expression + result string + }{ + { + condition: newFunction(ast.LT, newColumn(0), newFunction(ast.Plus, newLonglong(1), newLonglong(2))), + result: "lt(Column#0, 3)", + }, + { + condition: newFunction(ast.IsNull, newLonglong(1)), + result: "0", + }, + { + condition: newFunction(ast.EQ, newColumn(0), newFunction(ast.UnaryNot, newFunction(ast.Plus, newLonglong(1), newLonglong(1)))), + result: "eq(Column#0, 0)", + }, + { + condition: newFunction(ast.LT, newColumn(0), newFunction(ast.Plus, newColumn(1), newFunction(ast.Plus, newLonglong(2), newLonglong(1)))), + result: "lt(Column#0, plus(Column#1, 3))", + }, + } + for _, tt := range tests { + newConds := FoldConstant(tt.condition) + c.Assert(newConds.String(), Equals, tt.result, Commentf("different for expr %s", tt.condition)) + } +} diff --git a/expression/constraint_propagation.go b/expression/constraint_propagation.go new file mode 100644 index 0000000..e86c17c --- /dev/null +++ b/expression/constraint_propagation.go @@ -0,0 +1,174 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "bytes" + + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +// exprSet is a Set container for expressions, each expression in it is unique. +// `tombstone` is deleted mark, if tombstone[i] is true, data[i] is invalid. +// `index` use expr.HashCode() as key, to implement the unique property. +type exprSet struct { + data []Expression + tombstone []bool + exists map[string]struct{} + constfalse bool +} + +func (s *exprSet) Append(sc *stmtctx.StatementContext, e Expression) bool { + if _, ok := s.exists[string(e.HashCode(sc))]; ok { + return false + } + + s.data = append(s.data, e) + s.tombstone = append(s.tombstone, false) + s.exists[string(e.HashCode(sc))] = struct{}{} + return true +} + +// Slice returns the valid expressions in the exprSet, this function has side effect. +func (s *exprSet) Slice() []Expression { + if s.constfalse { + return []Expression{&Constant{ + Value: types.NewDatum(false), + RetType: types.NewFieldType(mysql.TypeTiny), + }} + } + + idx := 0 + for i := 0; i < len(s.data); i++ { + if !s.tombstone[i] { + s.data[idx] = s.data[i] + idx++ + } + } + return s.data[:idx] +} + +func (s *exprSet) SetConstFalse() { + s.constfalse = true +} + +func newExprSet(ctx sessionctx.Context, conditions []Expression) *exprSet { + var exprs exprSet + exprs.data = make([]Expression, 0, len(conditions)) + exprs.tombstone = make([]bool, 0, len(conditions)) + exprs.exists = make(map[string]struct{}, len(conditions)) + sc := ctx.GetSessionVars().StmtCtx + for _, v := range conditions { + exprs.Append(sc, v) + } + return &exprs +} + +type constraintSolver []constraintPropagateRule + +func newConstraintSolver(rules ...constraintPropagateRule) constraintSolver { + return constraintSolver(rules) +} + +type pgSolver2 struct{} + +func (s pgSolver2) PropagateConstant(ctx sessionctx.Context, conditions []Expression) []Expression { + solver := newConstraintSolver(ruleConstantFalse, ruleColumnEQConst) + return solver.Solve(ctx, conditions) +} + +// Solve propagate constraint according to the rules in the constraintSolver. +func (s constraintSolver) Solve(ctx sessionctx.Context, conditions []Expression) []Expression { + exprs := newExprSet(ctx, conditions) + s.fixPoint(ctx, exprs) + return exprs.Slice() +} + +// fixPoint is the core of the constraint propagation algorithm. +// It will iterate the expression set over and over again, pick two expressions, +// apply one to another. +// If new conditions can be inferred, they will be append into the expression set. +// Until no more conditions can be inferred from the set, the algorithm finish. +func (s constraintSolver) fixPoint(ctx sessionctx.Context, exprs *exprSet) { + for { + saveLen := len(exprs.data) + s.iterOnce(ctx, exprs) + if saveLen == len(exprs.data) { + break + } + } +} + +// iterOnce picks two expressions from the set, try to propagate new conditions from them. +func (s constraintSolver) iterOnce(ctx sessionctx.Context, exprs *exprSet) { + for i := 0; i < len(exprs.data); i++ { + if exprs.tombstone[i] { + continue + } + for j := 0; j < len(exprs.data); j++ { + if exprs.tombstone[j] { + continue + } + if i == j { + continue + } + s.solve(ctx, i, j, exprs) + } + } +} + +// solve uses exprs[i] exprs[j] to propagate new conditions. +func (s constraintSolver) solve(ctx sessionctx.Context, i, j int, exprs *exprSet) { + for _, rule := range s { + rule(ctx, i, j, exprs) + } +} + +type constraintPropagateRule func(ctx sessionctx.Context, i, j int, exprs *exprSet) + +// ruleConstantFalse propagates from CNF condition that false plus anything returns false. +// false, a = 1, b = c ... => false +func ruleConstantFalse(ctx sessionctx.Context, i, j int, exprs *exprSet) { + cond := exprs.data[i] + if cons, ok := cond.(*Constant); ok { + v, isNull, err := cons.EvalInt(ctx, chunk.Row{}) + if err != nil { + logutil.BgLogger().Warn("eval constant", zap.Error(err)) + return + } + if !isNull && v == 0 { + exprs.SetConstFalse() + } + } +} + +// ruleColumnEQConst propagates the "column = const" condition. +// "a = 3, b = a, c = a, d = b" => "a = 3, b = 3, c = 3, d = 3" +func ruleColumnEQConst(ctx sessionctx.Context, i, j int, exprs *exprSet) { + col, cons := validEqualCond(exprs.data[i]) + if col != nil { + expr := ColumnSubstitute(exprs.data[j], NewSchema(col), []Expression{cons}) + stmtctx := ctx.GetSessionVars().StmtCtx + if !bytes.Equal(expr.HashCode(stmtctx), exprs.data[j].HashCode(stmtctx)) { + exprs.Append(stmtctx, expr) + exprs.tombstone[j] = true + } + } +} diff --git a/expression/distsql_builtin.go b/expression/distsql_builtin.go new file mode 100644 index 0000000..1075dfd --- /dev/null +++ b/expression/distsql_builtin.go @@ -0,0 +1,284 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "fmt" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/codec" + "github.com/pingcap/tidb/util/mock" + "github.com/pingcap/tipb/go-tipb" +) + +// PbTypeToFieldType converts tipb.FieldType to FieldType +func PbTypeToFieldType(tp *tipb.FieldType) *types.FieldType { + return &types.FieldType{ + Tp: byte(tp.Tp), + Flag: uint(tp.Flag), + Flen: int(tp.Flen), + Decimal: int(tp.Decimal), + Charset: tp.Charset, + Collate: mysql.Collations[uint8(tp.Collate)], + } +} + +func getSignatureByPB(ctx sessionctx.Context, sigCode tipb.ScalarFuncSig, tp *tipb.FieldType, args []Expression) (f builtinFunc, e error) { + fieldTp := PbTypeToFieldType(tp) + base := newBaseBuiltinFunc(ctx, args) + base.tp = fieldTp + switch sigCode { + case tipb.ScalarFuncSig_LTInt: + f = &builtinLTIntSig{base} + case tipb.ScalarFuncSig_LTReal: + f = &builtinLTRealSig{base} + case tipb.ScalarFuncSig_LTString: + f = &builtinLTStringSig{base} + case tipb.ScalarFuncSig_LEInt: + f = &builtinLEIntSig{base} + case tipb.ScalarFuncSig_LEReal: + f = &builtinLERealSig{base} + case tipb.ScalarFuncSig_LEString: + f = &builtinLEStringSig{base} + case tipb.ScalarFuncSig_GTInt: + f = &builtinGTIntSig{base} + case tipb.ScalarFuncSig_GTReal: + f = &builtinGTRealSig{base} + case tipb.ScalarFuncSig_GTString: + f = &builtinGTStringSig{base} + case tipb.ScalarFuncSig_GEInt: + f = &builtinGEIntSig{base} + case tipb.ScalarFuncSig_GEReal: + f = &builtinGERealSig{base} + case tipb.ScalarFuncSig_GEString: + f = &builtinGEStringSig{base} + case tipb.ScalarFuncSig_EQInt: + f = &builtinEQIntSig{base} + case tipb.ScalarFuncSig_EQReal: + f = &builtinEQRealSig{base} + case tipb.ScalarFuncSig_EQString: + f = &builtinEQStringSig{base} + case tipb.ScalarFuncSig_NEInt: + f = &builtinNEIntSig{base} + case tipb.ScalarFuncSig_NEReal: + f = &builtinNERealSig{base} + case tipb.ScalarFuncSig_NEString: + f = &builtinNEStringSig{base} + case tipb.ScalarFuncSig_PlusReal: + f = &builtinArithmeticPlusRealSig{base} + case tipb.ScalarFuncSig_PlusInt: + f = &builtinArithmeticPlusIntSig{base} + case tipb.ScalarFuncSig_MinusReal: + f = &builtinArithmeticMinusRealSig{base} + case tipb.ScalarFuncSig_MinusInt: + f = &builtinArithmeticMinusIntSig{base} + case tipb.ScalarFuncSig_MultiplyReal: + f = &builtinArithmeticMultiplyRealSig{base} + case tipb.ScalarFuncSig_MultiplyInt: + f = &builtinArithmeticMultiplyIntSig{base} + case tipb.ScalarFuncSig_DivideReal: + f = &builtinArithmeticDivideRealSig{base} + case tipb.ScalarFuncSig_MultiplyIntUnsigned: + f = &builtinArithmeticMultiplyIntUnsignedSig{base} + case tipb.ScalarFuncSig_LogicalAnd: + f = &builtinLogicAndSig{base} + case tipb.ScalarFuncSig_LogicalOr: + f = &builtinLogicOrSig{base} + case tipb.ScalarFuncSig_UnaryNotInt: + f = &builtinUnaryNotIntSig{base} + case tipb.ScalarFuncSig_UnaryNotReal: + f = &builtinUnaryNotRealSig{base} + case tipb.ScalarFuncSig_UnaryMinusInt: + f = &builtinUnaryMinusIntSig{base} + case tipb.ScalarFuncSig_UnaryMinusReal: + f = &builtinUnaryMinusRealSig{base} + case tipb.ScalarFuncSig_RealIsNull: + f = &builtinRealIsNullSig{base} + case tipb.ScalarFuncSig_StringIsNull: + f = &builtinStringIsNullSig{base} + case tipb.ScalarFuncSig_IntIsNull: + f = &builtinIntIsNullSig{base} + case tipb.ScalarFuncSig_GetVar: + f = &builtinGetVarSig{base} + case tipb.ScalarFuncSig_SetVar: + f = &builtinSetVarSig{base} + case tipb.ScalarFuncSig_InInt: + f = &builtinInIntSig{base} + case tipb.ScalarFuncSig_InReal: + f = &builtinInRealSig{base} + case tipb.ScalarFuncSig_InString: + f = &builtinInStringSig{base} + case tipb.ScalarFuncSig_IfNullInt: + f = &builtinIfNullIntSig{base} + case tipb.ScalarFuncSig_IfNullReal: + f = &builtinIfNullRealSig{base} + case tipb.ScalarFuncSig_IfNullString: + f = &builtinIfNullStringSig{base} + case tipb.ScalarFuncSig_IfInt: + f = &builtinIfIntSig{base} + case tipb.ScalarFuncSig_IfReal: + f = &builtinIfRealSig{base} + case tipb.ScalarFuncSig_IfString: + f = &builtinIfStringSig{base} + case tipb.ScalarFuncSig_Length: + f = &builtinLengthSig{base} + case tipb.ScalarFuncSig_Strcmp: + f = &builtinStrcmpSig{base} + + default: + e = errFunctionNotExists.GenWithStackByArgs("FUNCTION", sigCode) + return nil, e + } + f.setPbCode(sigCode) + return f, nil +} + +func newDistSQLFunctionBySig(sc *stmtctx.StatementContext, sigCode tipb.ScalarFuncSig, tp *tipb.FieldType, args []Expression) (Expression, error) { + ctx := mock.NewContext() + ctx.GetSessionVars().StmtCtx = sc + f, err := getSignatureByPB(ctx, sigCode, tp, args) + if err != nil { + return nil, err + } + return &ScalarFunction{ + FuncName: model.NewCIStr(fmt.Sprintf("sig_%T", f)), + Function: f, + RetType: f.getRetTp(), + }, nil +} + +// PBToExprs converts pb structures to expressions. +func PBToExprs(pbExprs []*tipb.Expr, fieldTps []*types.FieldType, sc *stmtctx.StatementContext) ([]Expression, error) { + exprs := make([]Expression, 0, len(pbExprs)) + for _, expr := range pbExprs { + e, err := PBToExpr(expr, fieldTps, sc) + if err != nil { + return nil, errors.Trace(err) + } + if e == nil { + return nil, errors.Errorf("pb to expression failed, pb expression is %v", expr) + } + exprs = append(exprs, e) + } + return exprs, nil +} + +// PBToExpr converts pb structure to expression. +func PBToExpr(expr *tipb.Expr, tps []*types.FieldType, sc *stmtctx.StatementContext) (Expression, error) { + switch expr.Tp { + case tipb.ExprType_ColumnRef: + _, offset, err := codec.DecodeInt(expr.Val) + if err != nil { + return nil, err + } + return &Column{Index: int(offset), RetType: tps[offset]}, nil + case tipb.ExprType_Null: + return &Constant{Value: types.Datum{}, RetType: types.NewFieldType(mysql.TypeNull)}, nil + case tipb.ExprType_Int64: + return convertInt(expr.Val) + case tipb.ExprType_Uint64: + return convertUint(expr.Val) + case tipb.ExprType_String: + return convertString(expr.Val) + case tipb.ExprType_Bytes: + return &Constant{Value: types.NewBytesDatum(expr.Val), RetType: types.NewFieldType(mysql.TypeString)}, nil + case tipb.ExprType_Float32: + return convertFloat(expr.Val, true) + case tipb.ExprType_Float64: + return convertFloat(expr.Val, false) + } + if expr.Tp != tipb.ExprType_ScalarFunc { + panic("should be a tipb.ExprType_ScalarFunc") + } + // Then it must be a scalar function. + args := make([]Expression, 0, len(expr.Children)) + for _, child := range expr.Children { + if child.Tp == tipb.ExprType_ValueList { + results, err := decodeValueList(child.Val) + if err != nil { + return nil, err + } + if len(results) == 0 { + return &Constant{Value: types.NewDatum(false), RetType: types.NewFieldType(mysql.TypeLonglong)}, nil + } + args = append(args, results...) + continue + } + arg, err := PBToExpr(child, tps, sc) + if err != nil { + return nil, err + } + args = append(args, arg) + } + return newDistSQLFunctionBySig(sc, expr.Sig, expr.FieldType, args) +} + +func decodeValueList(data []byte) ([]Expression, error) { + if len(data) == 0 { + return nil, nil + } + list, err := codec.Decode(data, 1) + if err != nil { + return nil, err + } + result := make([]Expression, 0, len(list)) + for _, value := range list { + result = append(result, &Constant{Value: value}) + } + return result, nil +} + +func convertInt(val []byte) (*Constant, error) { + var d types.Datum + _, i, err := codec.DecodeInt(val) + if err != nil { + return nil, errors.Errorf("invalid int % x", val) + } + d.SetInt64(i) + return &Constant{Value: d, RetType: types.NewFieldType(mysql.TypeLonglong)}, nil +} + +func convertUint(val []byte) (*Constant, error) { + var d types.Datum + _, u, err := codec.DecodeUint(val) + if err != nil { + return nil, errors.Errorf("invalid uint % x", val) + } + d.SetUint64(u) + return &Constant{Value: d, RetType: &types.FieldType{Tp: mysql.TypeLonglong, Flag: mysql.UnsignedFlag}}, nil +} + +func convertString(val []byte) (*Constant, error) { + var d types.Datum + d.SetBytesAsString(val) + return &Constant{Value: d, RetType: types.NewFieldType(mysql.TypeVarString)}, nil +} + +func convertFloat(val []byte, f32 bool) (*Constant, error) { + var d types.Datum + _, f, err := codec.DecodeFloat(val) + if err != nil { + return nil, errors.Errorf("invalid float % x", val) + } + if f32 { + d.SetFloat32(float32(f)) + } else { + d.SetFloat64(f) + } + return &Constant{Value: d, RetType: types.NewFieldType(mysql.TypeDouble)}, nil +} diff --git a/expression/errors.go b/expression/errors.go new file mode 100644 index 0000000..864e8fc --- /dev/null +++ b/expression/errors.go @@ -0,0 +1,77 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx" +) + +// Error instances. +var ( + // All the exported errors are defined here: + ErrIncorrectParameterCount = terror.ClassExpression.New(mysql.ErrWrongParamcountToNativeFct, mysql.MySQLErrName[mysql.ErrWrongParamcountToNativeFct]) + ErrDivisionByZero = terror.ClassExpression.New(mysql.ErrDivisionByZero, mysql.MySQLErrName[mysql.ErrDivisionByZero]) + ErrRegexp = terror.ClassExpression.New(mysql.ErrRegexp, mysql.MySQLErrName[mysql.ErrRegexp]) + ErrOperandColumns = terror.ClassExpression.New(mysql.ErrOperandColumns, mysql.MySQLErrName[mysql.ErrOperandColumns]) + ErrCutValueGroupConcat = terror.ClassExpression.New(mysql.ErrCutValueGroupConcat, mysql.MySQLErrName[mysql.ErrCutValueGroupConcat]) + ErrFunctionsNoopImpl = terror.ClassExpression.New(mysql.ErrNotSupportedYet, "function %s has only noop implementation in tidb now, use tidb_enable_noop_functions to enable these functions") + ErrIncorrectType = terror.ClassExpression.New(mysql.ErrIncorrectType, mysql.MySQLErrName[mysql.ErrIncorrectType]) + + // All the un-exported errors are defined here: + errFunctionNotExists = terror.ClassExpression.New(mysql.ErrSpDoesNotExist, mysql.MySQLErrName[mysql.ErrSpDoesNotExist]) + errNonUniq = terror.ClassExpression.New(mysql.ErrNonUniq, mysql.MySQLErrName[mysql.ErrNonUniq]) +) + +func init() { + expressionMySQLErrCodes := map[terror.ErrCode]uint16{ + mysql.ErrWrongParamcountToNativeFct: mysql.ErrWrongParamcountToNativeFct, + mysql.ErrDivisionByZero: mysql.ErrDivisionByZero, + mysql.ErrSpDoesNotExist: mysql.ErrSpDoesNotExist, + mysql.ErrNotSupportedYet: mysql.ErrNotSupportedYet, + mysql.ErrZlibZData: mysql.ErrZlibZData, + mysql.ErrZlibZBuf: mysql.ErrZlibZBuf, + mysql.ErrWrongArguments: mysql.ErrWrongArguments, + mysql.ErrUnknownCharacterSet: mysql.ErrUnknownCharacterSet, + mysql.ErrInvalidDefault: mysql.ErrInvalidDefault, + mysql.ErrWarnDeprecatedSyntaxNoReplacement: mysql.ErrWarnDeprecatedSyntaxNoReplacement, + mysql.ErrOperandColumns: mysql.ErrOperandColumns, + mysql.ErrCutValueGroupConcat: mysql.ErrCutValueGroupConcat, + mysql.ErrRegexp: mysql.ErrRegexp, + mysql.ErrWarnAllowedPacketOverflowed: mysql.ErrWarnAllowedPacketOverflowed, + mysql.WarnOptionIgnored: mysql.WarnOptionIgnored, + mysql.ErrTruncatedWrongValue: mysql.ErrTruncatedWrongValue, + mysql.ErrUnknownLocale: mysql.ErrUnknownLocale, + mysql.ErrBadField: mysql.ErrBadField, + mysql.ErrNonUniq: mysql.ErrNonUniq, + mysql.ErrIncorrectType: mysql.ErrIncorrectType, + } + terror.ErrClassToMySQLCodes[terror.ClassExpression] = expressionMySQLErrCodes +} + +// handleDivisionByZeroError reports error or warning depend on the context. +func handleDivisionByZeroError(ctx sessionctx.Context) error { + sc := ctx.GetSessionVars().StmtCtx + if sc.InInsertStmt || sc.InDeleteStmt { + if !ctx.GetSessionVars().SQLMode.HasErrorForDivisionByZeroMode() { + return nil + } + if ctx.GetSessionVars().StrictSQLMode && !sc.DividedByZeroAsWarning { + return ErrDivisionByZero + } + } + sc.AppendWarning(ErrDivisionByZero) + return nil +} diff --git a/expression/evaluator.go b/expression/evaluator.go new file mode 100644 index 0000000..da3ee00 --- /dev/null +++ b/expression/evaluator.go @@ -0,0 +1,133 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/util/chunk" +) + +type columnEvaluator struct { + inputIdxToOutputIdxes map[int][]int +} + +// run evaluates "Column" expressions. +// NOTE: It should be called after all the other expressions are evaluated +// since it will change the content of the input Chunk. +func (e *columnEvaluator) run(ctx sessionctx.Context, input, output *chunk.Chunk) error { + for inputIdx, outputIdxes := range e.inputIdxToOutputIdxes { + if err := output.SwapColumn(outputIdxes[0], input, inputIdx); err != nil { + return err + } + for i, length := 1, len(outputIdxes); i < length; i++ { + output.MakeRef(outputIdxes[0], outputIdxes[i]) + } + } + return nil +} + +type defaultEvaluator struct { + outputIdxes []int + exprs []Expression + vectorizable bool +} + +func (e *defaultEvaluator) run(ctx sessionctx.Context, input, output *chunk.Chunk) error { + iter := chunk.NewIterator4Chunk(input) + if e.vectorizable { + for i := range e.outputIdxes { + if ctx.GetSessionVars().EnableVectorizedExpression && e.exprs[i].Vectorized() { + if err := evalOneVec(ctx, e.exprs[i], input, output, e.outputIdxes[i]); err != nil { + return err + } + continue + } + + err := evalOneColumn(ctx, e.exprs[i], iter, output, e.outputIdxes[i]) + if err != nil { + return err + } + } + return nil + } + + for row := iter.Begin(); row != iter.End(); row = iter.Next() { + for i := range e.outputIdxes { + err := evalOneCell(ctx, e.exprs[i], row, output, e.outputIdxes[i]) + if err != nil { + return err + } + } + } + return nil +} + +// EvaluatorSuite is responsible for the evaluation of a list of expressions. +// It separates them to "column" and "other" expressions and evaluates "other" +// expressions before "column" expressions. +type EvaluatorSuite struct { + *columnEvaluator // Evaluator for column expressions. + *defaultEvaluator // Evaluator for other expressions. +} + +// NewEvaluatorSuite creates an EvaluatorSuite to evaluate all the exprs. +// avoidColumnEvaluator can be removed after column pool is supported. +func NewEvaluatorSuite(exprs []Expression) *EvaluatorSuite { + e := &EvaluatorSuite{} + + for i := 0; i < len(exprs); i++ { + if col, isCol := exprs[i].(*Column); isCol { + if e.columnEvaluator == nil { + e.columnEvaluator = &columnEvaluator{inputIdxToOutputIdxes: make(map[int][]int)} + } + inputIdx, outputIdx := col.Index, i + e.columnEvaluator.inputIdxToOutputIdxes[inputIdx] = append(e.columnEvaluator.inputIdxToOutputIdxes[inputIdx], outputIdx) + continue + } + if e.defaultEvaluator == nil { + e.defaultEvaluator = &defaultEvaluator{ + outputIdxes: make([]int, 0, len(exprs)), + exprs: make([]Expression, 0, len(exprs)), + } + } + e.defaultEvaluator.exprs = append(e.defaultEvaluator.exprs, exprs[i]) + e.defaultEvaluator.outputIdxes = append(e.defaultEvaluator.outputIdxes, i) + } + + if e.defaultEvaluator != nil { + e.defaultEvaluator.vectorizable = Vectorizable(e.defaultEvaluator.exprs) + } + return e +} + +// Vectorizable checks whether this EvaluatorSuite can use vectorizd execution mode. +func (e *EvaluatorSuite) Vectorizable() bool { + return e.defaultEvaluator == nil || e.defaultEvaluator.vectorizable +} + +// Run evaluates all the expressions hold by this EvaluatorSuite. +// NOTE: "defaultEvaluator" must be evaluated before "columnEvaluator". +func (e *EvaluatorSuite) Run(ctx sessionctx.Context, input, output *chunk.Chunk) error { + if e.defaultEvaluator != nil { + err := e.defaultEvaluator.run(ctx, input, output) + if err != nil { + return err + } + } + + if e.columnEvaluator != nil { + return e.columnEvaluator.run(ctx, input, output) + } + return nil +} diff --git a/expression/evaluator_test.go b/expression/evaluator_test.go new file mode 100644 index 0000000..78968fa --- /dev/null +++ b/expression/evaluator_test.go @@ -0,0 +1,138 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "testing" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/mock" + "github.com/pingcap/tidb/util/testleak" + "github.com/pingcap/tidb/util/testutil" +) + +var _ = SerialSuites(&testEvaluatorSerialSuites{}) +var _ = Suite(&testEvaluatorSuite{}) + +func TestT(t *testing.T) { + testleak.BeforeTest() + defer testleak.AfterTestT(t) + + CustomVerboseFlag = true + *CustomParallelSuiteFlag = true + TestingT(t) +} + +type testEvaluatorSuite struct { + *parser.Parser + ctx sessionctx.Context +} + +type testEvaluatorSerialSuites struct { + *parser.Parser +} + +func (s *testEvaluatorSuite) SetUpSuite(c *C) { + s.Parser = parser.New() + s.ctx = mock.NewContext() + s.ctx.GetSessionVars().StmtCtx.TimeZone = time.Local + s.ctx.GetSessionVars().SetSystemVar("max_allowed_packet", "67108864") +} + +func (s *testEvaluatorSuite) TearDownSuite(c *C) { +} + +func (s *testEvaluatorSuite) SetUpTest(c *C) { + s.ctx.GetSessionVars().PlanColumnID = 0 +} + +func (s *testEvaluatorSuite) TearDownTest(c *C) { + s.ctx.GetSessionVars().StmtCtx.SetWarnings(nil) +} + +func (s *testEvaluatorSuite) kindToFieldType(kind byte) types.FieldType { + ft := types.FieldType{} + switch kind { + case types.KindNull: + ft.Tp = mysql.TypeNull + case types.KindInt64: + ft.Tp = mysql.TypeLonglong + case types.KindUint64: + ft.Tp = mysql.TypeLonglong + ft.Flag |= mysql.UnsignedFlag + case types.KindMinNotNull: + ft.Tp = mysql.TypeLonglong + case types.KindMaxValue: + ft.Tp = mysql.TypeLonglong + case types.KindFloat32: + ft.Tp = mysql.TypeDouble + case types.KindFloat64: + ft.Tp = mysql.TypeDouble + case types.KindString: + ft.Tp = mysql.TypeVarString + case types.KindBytes: + ft.Tp = mysql.TypeVarString + case types.KindInterface: + ft.Tp = mysql.TypeVarString + } + return ft +} + +func (s *testEvaluatorSuite) datumsToConstants(datums []types.Datum) []Expression { + constants := make([]Expression, 0, len(datums)) + for _, d := range datums { + ft := s.kindToFieldType(d.Kind()) + ft.Flen, ft.Decimal = types.UnspecifiedLength, types.UnspecifiedLength + constants = append(constants, &Constant{Value: d, RetType: &ft}) + } + return constants +} + +func (s *testEvaluatorSuite) primitiveValsToConstants(args []interface{}) []Expression { + cons := s.datumsToConstants(types.MakeDatums(args...)) + for i, arg := range args { + types.DefaultTypeForValue(arg, cons[i].GetType()) + } + return cons +} + +func (s *testEvaluatorSuite) TestUnaryOp(c *C) { + tbl := []struct { + arg interface{} + op string + result interface{} + }{ + // test Minus. + {nil, ast.UnaryMinus, nil}, + {float64(1.0), ast.UnaryMinus, float64(-1.0)}, + {int64(1), ast.UnaryMinus, int64(-1)}, + {int64(1), ast.UnaryMinus, int64(-1)}, + {uint64(1), ast.UnaryMinus, -int64(1)}, + } + for i, t := range tbl { + fc := funcs[t.op] + f, err := fc.getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(t.arg))) + c.Assert(err, IsNil) + result, err := evalBuiltinFunc(f, chunk.Row{}) + c.Assert(err, IsNil) + c.Assert(result, testutil.DatumEquals, types.NewDatum(t.result), Commentf("%d", i)) + } +} diff --git a/expression/explain.go b/expression/explain.go new file mode 100644 index 0000000..06e0a99 --- /dev/null +++ b/expression/explain.go @@ -0,0 +1,147 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "bytes" + "fmt" + "sort" + "strings" + + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" +) + +// ExplainInfo implements the Expression interface. +func (expr *ScalarFunction) ExplainInfo() string { + return expr.explainInfo(false) +} + +func (expr *ScalarFunction) explainInfo(normalized bool) string { + var buffer bytes.Buffer + fmt.Fprintf(&buffer, "%s(", expr.FuncName.L) + for i, arg := range expr.GetArgs() { + buffer.WriteString(arg.ExplainInfo()) + if i+1 < len(expr.GetArgs()) { + buffer.WriteString(", ") + } + } + buffer.WriteString(")") + return buffer.String() +} + +// ExplainNormalizedInfo implements the Expression interface. +func (expr *ScalarFunction) ExplainNormalizedInfo() string { + return expr.explainInfo(true) +} + +// ExplainInfo implements the Expression interface. +func (col *Column) ExplainInfo() string { + return col.String() +} + +// ExplainNormalizedInfo implements the Expression interface. +func (col *Column) ExplainNormalizedInfo() string { + return col.ExplainInfo() +} + +// ExplainInfo implements the Expression interface. +func (expr *Constant) ExplainInfo() string { + dt, err := expr.Eval(chunk.Row{}) + if err != nil { + return "not recognized const vanue" + } + return expr.format(dt) +} + +// ExplainNormalizedInfo implements the Expression interface. +func (expr *Constant) ExplainNormalizedInfo() string { + return "?" +} + +func (expr *Constant) format(dt types.Datum) string { + switch dt.Kind() { + case types.KindNull: + return "NULL" + case types.KindString, types.KindBytes: + return fmt.Sprintf("\"%v\"", dt.GetValue()) + } + return fmt.Sprintf("%v", dt.GetValue()) +} + +// ExplainExpressionList generates explain information for a list of expressions. +func ExplainExpressionList(exprs []Expression, schema *Schema) string { + builder := &strings.Builder{} + for i, expr := range exprs { + switch expr.(type) { + case *Column: + builder.WriteString(expr.String()) + default: + fmt.Fprintf(builder, "%v->%v", expr.String(), schema.Columns[i]) + } + if i+1 < len(exprs) { + builder.WriteString(", ") + } + } + return builder.String() +} + +// SortedExplainExpressionList generates explain information for a list of expressions in order. +// In some scenarios, the expr's order may not be stable when executing multiple times. +// So we add a sort to make its explain result stable. +func SortedExplainExpressionList(exprs []Expression) []byte { + return sortedExplainExpressionList(exprs, false) +} + +func sortedExplainExpressionList(exprs []Expression, normalized bool) []byte { + buffer := bytes.NewBufferString("") + exprInfos := make([]string, 0, len(exprs)) + for _, expr := range exprs { + exprInfos = append(exprInfos, expr.ExplainInfo()) + } + sort.Strings(exprInfos) + for i, info := range exprInfos { + buffer.WriteString(info) + if i+1 < len(exprInfos) { + buffer.WriteString(", ") + } + } + return buffer.Bytes() +} + +// SortedExplainNormalizedExpressionList is same like SortedExplainExpressionList, but use for generating normalized information. +func SortedExplainNormalizedExpressionList(exprs []Expression) []byte { + return sortedExplainExpressionList(exprs, true) +} + +// SortedExplainNormalizedScalarFuncList is same like SortedExplainExpressionList, but use for generating normalized information. +func SortedExplainNormalizedScalarFuncList(exprs []*ScalarFunction) []byte { + expressions := make([]Expression, len(exprs)) + for i := range exprs { + expressions[i] = exprs[i] + } + return sortedExplainExpressionList(expressions, true) +} + +// ExplainColumnList generates explain information for a list of columns. +func ExplainColumnList(cols []*Column) []byte { + buffer := bytes.NewBufferString("") + for i, col := range cols { + buffer.WriteString(col.ExplainInfo()) + if i+1 < len(cols) { + buffer.WriteString(", ") + } + } + return buffer.Bytes() +} diff --git a/expression/expr_to_pb.go b/expression/expr_to_pb.go new file mode 100644 index 0000000..ccb4000 --- /dev/null +++ b/expression/expr_to_pb.go @@ -0,0 +1,313 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "github.com/gogo/protobuf/proto" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/charset" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/codec" + "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tipb/go-tipb" + "go.uber.org/zap" +) + +// ExpressionsToPB converts expression to tipb.Expr. +func ExpressionsToPB(sc *stmtctx.StatementContext, exprs []Expression, client kv.Client) (pbCNF *tipb.Expr, pushed []Expression, remained []Expression) { + pc := PbConverter{client: client, sc: sc} + retTypeOfAnd := &types.FieldType{ + Tp: mysql.TypeLonglong, + Flen: 1, + Decimal: 0, + Flag: mysql.BinaryFlag, + Charset: charset.CharsetBin, + Collate: charset.CollationBin, + } + + for _, expr := range exprs { + pbExpr := pc.ExprToPB(expr) + if pbExpr == nil { + remained = append(remained, expr) + continue + } + + pushed = append(pushed, expr) + if pbCNF == nil { + pbCNF = pbExpr + continue + } + + // Merge multiple converted pb expression into a CNF. + pbCNF = &tipb.Expr{ + Tp: tipb.ExprType_ScalarFunc, + Sig: tipb.ScalarFuncSig_LogicalAnd, + Children: []*tipb.Expr{pbCNF, pbExpr}, + FieldType: ToPBFieldType(retTypeOfAnd), + } + } + return +} + +// ExpressionsToPBList converts expressions to tipb.Expr list for new plan. +func ExpressionsToPBList(sc *stmtctx.StatementContext, exprs []Expression, client kv.Client) (pbExpr []*tipb.Expr) { + pc := PbConverter{client: client, sc: sc} + for _, expr := range exprs { + v := pc.ExprToPB(expr) + pbExpr = append(pbExpr, v) + } + return +} + +// PbConverter supplys methods to convert TiDB expressions to TiPB. +type PbConverter struct { + client kv.Client + sc *stmtctx.StatementContext +} + +// NewPBConverter creates a PbConverter. +func NewPBConverter(client kv.Client, sc *stmtctx.StatementContext) PbConverter { + return PbConverter{client: client, sc: sc} +} + +// ExprToPB converts Expression to TiPB. +func (pc PbConverter) ExprToPB(expr Expression) *tipb.Expr { + switch x := expr.(type) { + case *Constant: + return pc.conOrCorColToPBExpr(expr) + case *Column: + return pc.columnToPBExpr(x) + case *ScalarFunction: + return pc.scalarFuncToPBExpr(x) + } + return nil +} + +func (pc PbConverter) conOrCorColToPBExpr(expr Expression) *tipb.Expr { + ft := expr.GetType() + d, err := expr.Eval(chunk.Row{}) + if err != nil { + logutil.BgLogger().Error("eval constant or correlated column", zap.String("expression", expr.ExplainInfo()), zap.Error(err)) + return nil + } + tp, val, ok := pc.encodeDatum(ft, d) + if !ok { + return nil + } + + if !pc.client.IsRequestTypeSupported(kv.ReqTypeSelect, int64(tp)) { + return nil + } + return &tipb.Expr{Tp: tp, Val: val, FieldType: ToPBFieldType(ft)} +} + +func (pc *PbConverter) encodeDatum(ft *types.FieldType, d types.Datum) (tipb.ExprType, []byte, bool) { + var ( + tp tipb.ExprType + val []byte + ) + switch d.Kind() { + case types.KindNull: + tp = tipb.ExprType_Null + case types.KindInt64: + tp = tipb.ExprType_Int64 + val = codec.EncodeInt(nil, d.GetInt64()) + case types.KindUint64: + tp = tipb.ExprType_Uint64 + val = codec.EncodeUint(nil, d.GetUint64()) + case types.KindString: + tp = tipb.ExprType_String + val = d.GetBytes() + case types.KindBytes: + tp = tipb.ExprType_Bytes + val = d.GetBytes() + case types.KindFloat32: + tp = tipb.ExprType_Float32 + val = codec.EncodeFloat(nil, d.GetFloat64()) + case types.KindFloat64: + tp = tipb.ExprType_Float64 + val = codec.EncodeFloat(nil, d.GetFloat64()) + default: + return tp, nil, false + } + return tp, val, true +} + +// ToPBFieldType converts *types.FieldType to *tipb.FieldType. +func ToPBFieldType(ft *types.FieldType) *tipb.FieldType { + return &tipb.FieldType{ + Tp: int32(ft.Tp), + Flag: uint32(ft.Flag), + Flen: int32(ft.Flen), + Decimal: int32(ft.Decimal), + Charset: ft.Charset, + Collate: collationToProto(ft.Collate), + } +} + +// FieldTypeFromPB converts *tipb.FieldType to *types.FieldType. +func FieldTypeFromPB(ft *tipb.FieldType) *types.FieldType { + return &types.FieldType{ + Tp: byte(ft.Tp), + Flag: uint(ft.Flag), + Flen: int(ft.Flen), + Decimal: int(ft.Decimal), + Charset: ft.Charset, + Collate: protoToCollation(ft.Collate), + } +} + +func collationToProto(c string) int32 { + v, ok := mysql.CollationNames[c] + if ok { + return int32(v) + } + return int32(mysql.DefaultCollationID) +} + +func protoToCollation(c int32) string { + v, ok := mysql.Collations[uint8(c)] + if ok { + return v + } + return mysql.DefaultCollationName +} + +func (pc PbConverter) columnToPBExpr(column *Column) *tipb.Expr { + if !pc.client.IsRequestTypeSupported(kv.ReqTypeSelect, int64(tipb.ExprType_ColumnRef)) { + return nil + } + switch column.GetType().Tp { + case mysql.TypeBit, mysql.TypeSet, mysql.TypeEnum, mysql.TypeGeometry, mysql.TypeUnspecified: + return nil + } + + if pc.client.IsRequestTypeSupported(kv.ReqTypeDAG, kv.ReqSubTypeBasic) { + return &tipb.Expr{ + Tp: tipb.ExprType_ColumnRef, + Val: codec.EncodeInt(nil, int64(column.Index)), + FieldType: ToPBFieldType(column.RetType), + } + } + id := column.ID + // Zero Column ID is not a column from table, can not support for now. + if id == 0 || id == -1 { + return nil + } + + return &tipb.Expr{ + Tp: tipb.ExprType_ColumnRef, + Val: codec.EncodeInt(nil, id)} +} + +func (pc PbConverter) scalarFuncToPBExpr(expr *ScalarFunction) *tipb.Expr { + // Check whether this function has ProtoBuf signature. + pbCode := expr.Function.PbCode() + if pbCode <= tipb.ScalarFuncSig_Unspecified { + return nil + } + + // Check whether this function can be pushed. + if !pc.canFuncBePushed(expr) { + return nil + } + + // Check whether all of its parameters can be pushed. + children := make([]*tipb.Expr, 0, len(expr.GetArgs())) + for _, arg := range expr.GetArgs() { + pbArg := pc.ExprToPB(arg) + if pbArg == nil { + return nil + } + children = append(children, pbArg) + } + + var encoded []byte + if metadata := expr.Function.metadata(); metadata != nil { + var err error + encoded, err = proto.Marshal(metadata) + if err != nil { + logutil.BgLogger().Error("encode metadata", zap.Any("metadata", metadata), zap.Error(err)) + return nil + } + } + + // Construct expression ProtoBuf. + return &tipb.Expr{ + Tp: tipb.ExprType_ScalarFunc, + Val: encoded, + Sig: pbCode, + Children: children, + FieldType: ToPBFieldType(expr.RetType), + } +} + +// GroupByItemToPB converts group by items to pb. +func GroupByItemToPB(sc *stmtctx.StatementContext, client kv.Client, expr Expression) *tipb.ByItem { + pc := PbConverter{client: client, sc: sc} + e := pc.ExprToPB(expr) + if e == nil { + return nil + } + return &tipb.ByItem{Expr: e} +} + +// SortByItemToPB converts order by items to pb. +func SortByItemToPB(sc *stmtctx.StatementContext, client kv.Client, expr Expression, desc bool) *tipb.ByItem { + pc := PbConverter{client: client, sc: sc} + e := pc.ExprToPB(expr) + if e == nil { + return nil + } + return &tipb.ByItem{Expr: e, Desc: desc} +} + +func (pc PbConverter) canFuncBePushed(sf *ScalarFunction) bool { + switch sf.FuncName.L { + case + // op functions. + ast.LogicAnd, + ast.LogicOr, + ast.UnaryNot, + + // compare functions. + ast.LT, + ast.LE, + ast.EQ, + ast.NE, + ast.GE, + ast.GT, + ast.In, + ast.IsNull, + + // arithmetical functions. + ast.Plus, + ast.Minus, + ast.Mul, + ast.Div, + + // control flow functions. + ast.If, + ast.Ifnull, + + // string functions. + ast.Length: + return true + } + return false +} diff --git a/expression/expression.go b/expression/expression.go new file mode 100644 index 0000000..acf1b37 --- /dev/null +++ b/expression/expression.go @@ -0,0 +1,556 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + goJSON "encoding/json" + "fmt" + "sync" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" +) + +// These are byte flags used for `HashCode()`. +const ( + constantFlag byte = 0 + columnFlag byte = 1 + scalarFunctionFlag byte = 3 +) + +// EvalAstExpr evaluates ast expression directly. +var EvalAstExpr func(sctx sessionctx.Context, expr ast.ExprNode) (types.Datum, error) + +// VecExpr contains all vectorized evaluation methods. +type VecExpr interface { + // Vectorized returns if this expression supports vectorized evaluation. + Vectorized() bool + + // VecEvalInt evaluates this expression in a vectorized manner. + VecEvalInt(ctx sessionctx.Context, input *chunk.Chunk, result *chunk.Column) error + + // VecEvalReal evaluates this expression in a vectorized manner. + VecEvalReal(ctx sessionctx.Context, input *chunk.Chunk, result *chunk.Column) error + + // VecEvalString evaluates this expression in a vectorized manner. + VecEvalString(ctx sessionctx.Context, input *chunk.Chunk, result *chunk.Column) error +} + +// Expression represents all scalar expression in SQL. +type Expression interface { + fmt.Stringer + goJSON.Marshaler + VecExpr + + // Eval evaluates an expression through a row. + Eval(row chunk.Row) (types.Datum, error) + + // EvalInt returns the int64 representation of expression. + EvalInt(ctx sessionctx.Context, row chunk.Row) (val int64, isNull bool, err error) + + // EvalReal returns the float64 representation of expression. + EvalReal(ctx sessionctx.Context, row chunk.Row) (val float64, isNull bool, err error) + + // EvalString returns the string representation of expression. + EvalString(ctx sessionctx.Context, row chunk.Row) (val string, isNull bool, err error) + + // GetType gets the type that the expression returns. + GetType() *types.FieldType + + // Clone copies an expression totally. + Clone() Expression + + // Equal checks whether two expressions are equal. + Equal(ctx sessionctx.Context, e Expression) bool + + // IsCorrelated checks if this expression has correlated key. + IsCorrelated() bool + + // ConstItem checks if this expression is constant item, regardless of query evaluation state. + // A constant item can be eval() when build a plan. + // An expression is constant item if it: + // refers no tables. + // refers no subqueries that refers any tables. + // refers no non-deterministic functions. + // refers no statement parameters. + ConstItem() bool + + // Decorrelate try to decorrelate the expression by schema. + Decorrelate(schema *Schema) Expression + + // ResolveIndices resolves indices by the given schema. It will copy the original expression and return the copied one. + ResolveIndices(schema *Schema) (Expression, error) + + // resolveIndices is called inside the `ResolveIndices` It will perform on the expression itself. + resolveIndices(schema *Schema) error + + // ExplainInfo returns operator information to be explained. + ExplainInfo() string + + // HashCode creates the hashcode for expression which can be used to identify itself from other expression. + // It generated as the following: + // Constant: ConstantFlag+encoded value + // Column: ColumnFlag+encoded value + // ScalarFunction: SFFlag+encoded function name + encoded arg_1 + encoded arg_2 + ... + HashCode(sc *stmtctx.StatementContext) []byte +} + +// CNFExprs stands for a CNF expression. +type CNFExprs []Expression + +// Clone clones itself. +func (e CNFExprs) Clone() CNFExprs { + cnf := make(CNFExprs, 0, len(e)) + for _, expr := range e { + cnf = append(cnf, expr.Clone()) + } + return cnf +} + +// Shallow makes a shallow copy of itself. +func (e CNFExprs) Shallow() CNFExprs { + cnf := make(CNFExprs, 0, len(e)) + cnf = append(cnf, e...) + return cnf +} + +// EvalBool evaluates expression list to a boolean value. The first returned value +// indicates bool result of the expression list, the second returned value indicates +// whether the result of the expression list is null, it can only be true when the +// first returned values is false. +func EvalBool(ctx sessionctx.Context, exprList CNFExprs, row chunk.Row) (bool, bool, error) { + hasNull := false + for _, expr := range exprList { + data, err := expr.Eval(row) + if err != nil { + return false, false, err + } + if data.IsNull() { + return false, false, nil + } + + i, err := data.ToBool(ctx.GetSessionVars().StmtCtx) + if err != nil { + return false, false, err + } + if i == 0 { + return false, false, nil + } + } + if hasNull { + return false, true, nil + } + return true, false, nil +} + +var ( + defaultChunkSize = 1024 + selPool = sync.Pool{ + New: func() interface{} { + return make([]int, defaultChunkSize) + }, + } + zeroPool = sync.Pool{ + New: func() interface{} { + return make([]int8, defaultChunkSize) + }, + } +) + +func allocSelSlice(n int) []int { + if n > defaultChunkSize { + return make([]int, n) + } + return selPool.Get().([]int) +} + +func deallocateSelSlice(sel []int) { + if cap(sel) <= defaultChunkSize { + selPool.Put(sel) + } +} + +func allocZeroSlice(n int) []int8 { + if n > defaultChunkSize { + return make([]int8, n) + } + return zeroPool.Get().([]int8) +} + +func deallocateZeroSlice(isZero []int8) { + if cap(isZero) <= defaultChunkSize { + zeroPool.Put(isZero) + } +} + +// VecEvalBool does the same thing as EvalBool but it works in a vectorized manner. +func VecEvalBool(ctx sessionctx.Context, exprList CNFExprs, input *chunk.Chunk, selected, nulls []bool) ([]bool, []bool, error) { + // If input.Sel() != nil, we will call input.SetSel(nil) to clear the sel slice in input chunk. + // After the function finished, then we reset the input.Sel(). + // The caller will handle the input.Sel() and selected slices. + defer input.SetSel(input.Sel()) + input.SetSel(nil) + + n := input.NumRows() + selected = selected[:0] + nulls = nulls[:0] + for i := 0; i < n; i++ { + selected = append(selected, false) + nulls = append(nulls, false) + } + + sel := allocSelSlice(n) + defer deallocateSelSlice(sel) + sel = sel[:0] + for i := 0; i < n; i++ { + sel = append(sel, i) + } + input.SetSel(sel) + + // In isZero slice, -1 means Null, 0 means zero, 1 means not zero + isZero := allocZeroSlice(n) + defer deallocateZeroSlice(isZero) + for _, expr := range exprList { + eType := expr.GetType().EvalType() + buf, err := globalColumnAllocator.get(eType, n) + if err != nil { + return nil, nil, err + } + + if err := VecEval(ctx, expr, input, buf); err != nil { + return nil, nil, err + } + + err = toBool(ctx.GetSessionVars().StmtCtx, eType, buf, sel, isZero) + if err != nil { + return nil, nil, err + } + + j := 0 + for i := range sel { + if isZero[i] == -1 { + if eType != types.ETInt { + continue + } + // In this case, we set this row to null and let it pass this filter. + // The null flag may be set to false later by other expressions in some cases. + nulls[sel[i]] = true + sel[j] = sel[i] + j++ + continue + } + + if isZero[i] == 0 { + continue + } + sel[j] = sel[i] // this row passes this filter + j++ + } + sel = sel[:j] + input.SetSel(sel) + globalColumnAllocator.put(buf) + } + + for _, i := range sel { + if !nulls[i] { + selected[i] = true + } + } + + return selected, nulls, nil +} + +func toBool(sc *stmtctx.StatementContext, eType types.EvalType, buf *chunk.Column, sel []int, isZero []int8) error { + var err error + switch eType { + case types.ETInt: + i64s := buf.Int64s() + for i := range sel { + if buf.IsNull(i) { + isZero[i] = -1 + } else { + if i64s[i] == 0 { + isZero[i] = 0 + } else { + isZero[i] = 1 + } + } + } + case types.ETReal: + f64s := buf.Float64s() + for i := range sel { + if buf.IsNull(i) { + isZero[i] = -1 + } else { + if types.RoundFloat(f64s[i]) == 0 { + isZero[i] = 0 + } else { + isZero[i] = 1 + } + } + } + case types.ETString: + for i := range sel { + if buf.IsNull(i) { + isZero[i] = -1 + } else { + iVal, err1 := types.StrToInt(sc, buf.GetString(i)) + err = err1 + if iVal == 0 { + isZero[i] = 0 + } else { + isZero[i] = 1 + } + } + } + } + return errors.Trace(err) +} + +// VecEval evaluates this expr according to its type. +func VecEval(ctx sessionctx.Context, expr Expression, input *chunk.Chunk, result *chunk.Column) (err error) { + switch expr.GetType().EvalType() { + case types.ETInt: + err = expr.VecEvalInt(ctx, input, result) + case types.ETReal: + err = expr.VecEvalReal(ctx, input, result) + case types.ETString: + err = expr.VecEvalString(ctx, input, result) + default: + err = errors.New(fmt.Sprintf("invalid eval type %v", expr.GetType().EvalType())) + } + return +} + +// composeConditionWithBinaryOp composes condition with binary operator into a balance deep tree, which benefits a lot for pb decoder/encoder. +func composeConditionWithBinaryOp(ctx sessionctx.Context, conditions []Expression, funcName string) Expression { + length := len(conditions) + if length == 0 { + return nil + } + if length == 1 { + return conditions[0] + } + expr := NewFunctionInternal(ctx, funcName, + types.NewFieldType(mysql.TypeTiny), + composeConditionWithBinaryOp(ctx, conditions[:length/2], funcName), + composeConditionWithBinaryOp(ctx, conditions[length/2:], funcName)) + return expr +} + +// ComposeCNFCondition composes CNF items into a balance deep CNF tree, which benefits a lot for pb decoder/encoder. +func ComposeCNFCondition(ctx sessionctx.Context, conditions ...Expression) Expression { + return composeConditionWithBinaryOp(ctx, conditions, ast.LogicAnd) +} + +// ComposeDNFCondition composes DNF items into a balance deep DNF tree. +func ComposeDNFCondition(ctx sessionctx.Context, conditions ...Expression) Expression { + return composeConditionWithBinaryOp(ctx, conditions, ast.LogicOr) +} + +func extractBinaryOpItems(conditions *ScalarFunction, funcName string) []Expression { + var ret []Expression + for _, arg := range conditions.GetArgs() { + if sf, ok := arg.(*ScalarFunction); ok && sf.FuncName.L == funcName { + ret = append(ret, extractBinaryOpItems(sf, funcName)...) + } else { + ret = append(ret, arg) + } + } + return ret +} + +// FlattenDNFConditions extracts DNF expression's leaf item. +// e.g. or(or(a=1, a=2), or(a=3, a=4)), we'll get [a=1, a=2, a=3, a=4]. +func FlattenDNFConditions(DNFCondition *ScalarFunction) []Expression { + return extractBinaryOpItems(DNFCondition, ast.LogicOr) +} + +// FlattenCNFConditions extracts CNF expression's leaf item. +// e.g. and(and(a>1, a>2), and(a>3, a>4)), we'll get [a>1, a>2, a>3, a>4]. +func FlattenCNFConditions(CNFCondition *ScalarFunction) []Expression { + return extractBinaryOpItems(CNFCondition, ast.LogicAnd) +} + +// Assignment represents a set assignment in Update, such as +// Update t set c1 = hex(12), c2 = c3 where c2 = 1 +type Assignment struct { + Col *Column + // ColName indicates its original column name in table schema. It's used for outputting helping message when executing meets some errors. + ColName model.CIStr + Expr Expression +} + +// VarAssignment represents a variable assignment in Set, such as set global a = 1. +type VarAssignment struct { + Name string + Expr Expression + IsDefault bool + IsGlobal bool + IsSystem bool +} + +// splitNormalFormItems split CNF(conjunctive normal form) like "a and b and c", or DNF(disjunctive normal form) like "a or b or c" +func splitNormalFormItems(onExpr Expression, funcName string) []Expression { + switch v := onExpr.(type) { + case *ScalarFunction: + if v.FuncName.L == funcName { + var ret []Expression + for _, arg := range v.GetArgs() { + ret = append(ret, splitNormalFormItems(arg, funcName)...) + } + return ret + } + } + return []Expression{onExpr} +} + +// SplitCNFItems splits CNF items. +// CNF means conjunctive normal form, e.g. "a and b and c". +func SplitCNFItems(onExpr Expression) []Expression { + return splitNormalFormItems(onExpr, ast.LogicAnd) +} + +// SplitDNFItems splits DNF items. +// DNF means disjunctive normal form, e.g. "a or b or c". +func SplitDNFItems(onExpr Expression) []Expression { + return splitNormalFormItems(onExpr, ast.LogicOr) +} + +// EvaluateExprWithNull sets columns in schema as null and calculate the final result of the scalar function. +// If the Expression is a non-constant value, it means the result is unknown. +func EvaluateExprWithNull(ctx sessionctx.Context, schema *Schema, expr Expression) Expression { + switch x := expr.(type) { + case *ScalarFunction: + args := make([]Expression, len(x.GetArgs())) + for i, arg := range x.GetArgs() { + args[i] = EvaluateExprWithNull(ctx, schema, arg) + } + return NewFunctionInternal(ctx, x.FuncName.L, x.RetType, args...) + case *Column: + if !schema.Contains(x) { + return x + } + return &Constant{Value: types.Datum{}, RetType: types.NewFieldType(mysql.TypeNull)} + } + return expr +} + +// TableInfo2SchemaAndNames converts the TableInfo to the schema and name slice. +func TableInfo2SchemaAndNames(ctx sessionctx.Context, dbName model.CIStr, tbl *model.TableInfo) (*Schema, []*types.FieldName) { + cols, names := ColumnInfos2ColumnsAndNames(ctx, dbName, tbl.Name, tbl.Columns) + keys := make([]KeyInfo, 0, len(tbl.Indices)+1) + for _, idx := range tbl.Indices { + if !idx.Unique || idx.State != model.StatePublic { + continue + } + ok := true + newKey := make([]*Column, 0, len(idx.Columns)) + for _, idxCol := range idx.Columns { + find := false + for i, col := range tbl.Columns { + if idxCol.Name.L == col.Name.L { + if !mysql.HasNotNullFlag(col.Flag) { + break + } + newKey = append(newKey, cols[i]) + find = true + break + } + } + if !find { + ok = false + break + } + } + if ok { + keys = append(keys, newKey) + } + } + if tbl.PKIsHandle { + for i, col := range tbl.Columns { + if mysql.HasPriKeyFlag(col.Flag) { + keys = append(keys, KeyInfo{cols[i]}) + break + } + } + } + schema := NewSchema(cols...) + schema.SetUniqueKeys(keys) + return schema, names +} + +// ColumnInfos2ColumnsAndNames converts the ColumnInfo to the *Column and NameSlice. +func ColumnInfos2ColumnsAndNames(ctx sessionctx.Context, dbName, tblName model.CIStr, colInfos []*model.ColumnInfo) ([]*Column, types.NameSlice) { + columns := make([]*Column, 0, len(colInfos)) + names := make([]*types.FieldName, 0, len(colInfos)) + for i, col := range colInfos { + if col.State != model.StatePublic { + continue + } + names = append(names, &types.FieldName{ + OrigTblName: tblName, + OrigColName: col.Name, + DBName: dbName, + TblName: tblName, + ColName: col.Name, + }) + newCol := &Column{ + RetType: &col.FieldType, + ID: col.ID, + UniqueID: ctx.GetSessionVars().AllocPlanColumnID(), + Index: col.Offset, + OrigName: names[i].String(), + } + columns = append(columns, newCol) + } + return columns, names +} + +// NewValuesFunc creates a new values function. +func NewValuesFunc(ctx sessionctx.Context, offset int, retTp *types.FieldType) *ScalarFunction { + fc := &valuesFunctionClass{baseFunctionClass{ast.Values, 0, 0}, offset, retTp} + bt, err := fc.getFunction(ctx, nil) + terror.Log(err) + return &ScalarFunction{ + FuncName: model.NewCIStr(ast.Values), + RetType: retTp, + Function: bt, + } +} + +// FindFieldName finds the column name from NameSlice. +func FindFieldName(names types.NameSlice, astCol *ast.ColumnName) (int, error) { + dbName, tblName, colName := astCol.Schema, astCol.Table, astCol.Name + idx := -1 + for i, name := range names { + if (dbName.L == "" || dbName.L == name.DBName.L) && + (tblName.L == "" || tblName.L == name.TblName.L) && + (colName.L == name.ColName.L) { + if idx == -1 { + idx = i + } else { + return -1, errNonUniq.GenWithStackByArgs(name.String(), "field list") + } + } + } + return idx, nil +} diff --git a/expression/expression_test.go b/expression/expression_test.go new file mode 100644 index 0000000..cddd0c2 --- /dev/null +++ b/expression/expression_test.go @@ -0,0 +1,43 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" +) + +func (s *testEvaluatorSuite) TestNewValuesFunc(c *C) { + res := NewValuesFunc(s.ctx, 0, types.NewFieldType(mysql.TypeLonglong)) + c.Assert(res.FuncName.O, Equals, "values") + c.Assert(res.RetType.Tp, Equals, mysql.TypeLonglong) + _, ok := res.Function.(*builtinValuesIntSig) + c.Assert(ok, IsTrue) +} + +func (s *testEvaluatorSuite) TestConstant(c *C) { + sc := &stmtctx.StatementContext{TimeZone: time.Local} + c.Assert(Zero.IsCorrelated(), IsFalse) + c.Assert(Zero.ConstItem(), IsTrue) + c.Assert(Zero.Decorrelate(nil).Equal(s.ctx, Zero), IsTrue) + c.Assert(Zero.HashCode(sc), DeepEquals, []byte{0x0, 0x8, 0x0}) + c.Assert(Zero.Equal(s.ctx, One), IsFalse) + res, err := Zero.MarshalJSON() + c.Assert(err, IsNil) + c.Assert(res, DeepEquals, []byte{0x22, 0x30, 0x22}) +} diff --git a/expression/function_traits.go b/expression/function_traits.go new file mode 100644 index 0000000..ab846ff --- /dev/null +++ b/expression/function_traits.go @@ -0,0 +1,37 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "github.com/pingcap/tidb/parser/ast" +) + +// unFoldableFunctions stores functions which can not be folded duration constant folding stage. +var unFoldableFunctions = map[string]struct{}{ + ast.RowFunc: {}, + ast.SetVar: {}, + ast.GetVar: {}, +} + +// inequalFunctions stores functions which cannot be propagated from column equal condition. +var inequalFunctions = map[string]struct{}{ + ast.IsNull: {}, +} + +// mutableEffectsFunctions stores functions which are mutable or have side effects, specifically, +// we cannot remove them from filter even if they have duplicates. +var mutableEffectsFunctions = map[string]struct{}{ + ast.SetVar: {}, + ast.GetVar: {}, +} diff --git a/expression/generator/compare_vec.go b/expression/generator/compare_vec.go new file mode 100644 index 0000000..9802617 --- /dev/null +++ b/expression/generator/compare_vec.go @@ -0,0 +1,247 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build ignore + +package main + +import ( + "bytes" + "flag" + "go/format" + "io/ioutil" + "log" + "path/filepath" + "text/template" + + . "github.com/pingcap/tidb/expression/generator/helper" +) + +const header = `// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by go generate in expression/generator; DO NOT EDIT. + +package expression +` + +const newLine = "\n" + +const builtinCompareImports = `import ( + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" +) +` + +var builtinCompareVecTpl = template.Must(template.New("").Parse(` +func (b *builtin{{ .compare.CompareName }}{{ .type.TypeName }}Sig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + buf0, err := b.bufAllocator.get(types.ET{{ .type.ETName }}, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf0) + if err := b.args[0].VecEval{{ .type.TypeName }}(b.ctx, input, buf0); err != nil { + return err + } + buf1, err := b.bufAllocator.get(types.ET{{ .type.ETName }}, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) + if err := b.args[1].VecEval{{ .type.TypeName }}(b.ctx, input, buf1); err != nil { + return err + } + +{{ if .type.Fixed }} + arg0 := buf0.{{ .type.TypeNameInColumn }}s() + arg1 := buf1.{{ .type.TypeNameInColumn }}s() +{{- end }} + result.ResizeInt64(n, false) + result.MergeNulls(buf0, buf1) + i64s := result.Int64s() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } +{{- if eq .type.ETName "Real" }} + val := types.CompareFloat64(arg0[i], arg1[i]) +{{- else }} + val := types.CompareString(buf0.GetString(i), buf1.GetString(i)) +{{- end }} + if val {{ .compare.Operator }} 0 { + i64s[i] = 1 + } else { + i64s[i] = 0 + } + } + return nil +} + +func (b *builtin{{ .compare.CompareName }}{{ .type.TypeName }}Sig) vectorized() bool { + return true +} +`)) + +const builtinCompareVecTestHeader = `import ( + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/types" +) + +var vecGeneratedBuiltinCompareCases = map[string][]vecExprBenchCase{ +` + +var builtinCompareVecTestFuncHeader = template.Must(template.New("").Parse(` ast.{{ .CompareName }}: { +`)) + +var builtinCompareVecTestCase = template.Must(template.New("").Parse(` {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ET{{ .ETName }}, types.ET{{ .ETName }}}}, +`)) + +var builtinCompareVecTestFuncTail = ` }, +` + +var builtinCompareVecTestTail = `} + +func (s *testEvaluatorSuite) TestVectorizedGeneratedBuiltinCompareEvalOneVec(c *C) { + testVectorizedEvalOneVec(c, vecGeneratedBuiltinCompareCases) +} + +func (s *testEvaluatorSuite) TestVectorizedGeneratedBuiltinCompareFunc(c *C) { + testVectorizedBuiltinFunc(c, vecGeneratedBuiltinCompareCases) +} + +func BenchmarkVectorizedGeneratedBuiltinCompareEvalOneVec(b *testing.B) { + benchmarkVectorizedEvalOneVec(b, vecGeneratedBuiltinCompareCases) +} + +func BenchmarkVectorizedGeneratedBuiltinCompareFunc(b *testing.B) { + benchmarkVectorizedBuiltinFunc(b, vecGeneratedBuiltinCompareCases) +} +` + +type CompareContext struct { + // Describe the name of CompareContext(LT/LE/GT/GE/EQ/NE/NullEQ) + CompareName string + // Compare Operators + Operator string +} + +var comparesMap = []CompareContext{ + {CompareName: "LT", Operator: "<"}, + {CompareName: "LE", Operator: "<="}, + {CompareName: "GT", Operator: ">"}, + {CompareName: "GE", Operator: ">="}, + {CompareName: "EQ", Operator: "=="}, + {CompareName: "NE", Operator: "!="}, +} + +var typesMap = []TypeContext{ + TypeInt, + TypeReal, + TypeString, +} + +func generateDotGo(fileName string, compares []CompareContext, types []TypeContext) (err error) { + w := new(bytes.Buffer) + w.WriteString(header) + w.WriteString(newLine) + w.WriteString(builtinCompareImports) + + var ctx = make(map[string]interface{}) + for _, compareCtx := range compares { + for _, typeCtx := range types { + ctx["compare"] = compareCtx + ctx["type"] = typeCtx + if typeCtx.TypeName == TypeInt.TypeName { + continue + } + err := builtinCompareVecTpl.Execute(w, ctx) + if err != nil { + return err + } + } + } + data, err := format.Source(w.Bytes()) + if err != nil { + log.Println("[Warn]", fileName+": gofmt failed", err) + data = w.Bytes() // write original data for debugging + } + return ioutil.WriteFile(fileName, data, 0644) +} + +func generateTestDotGo(fileName string, compares []CompareContext, types []TypeContext) error { + w := new(bytes.Buffer) + w.WriteString(header) + w.WriteString(builtinCompareVecTestHeader) + + for _, compareCtx := range compares { + err := builtinCompareVecTestFuncHeader.Execute(w, compareCtx) + if err != nil { + return err + } + for _, typeCtx := range types { + if typeCtx.TypeName == TypeInt.TypeName { + continue + } + err := builtinCompareVecTestCase.Execute(w, typeCtx) + if err != nil { + return err + } + } + w.WriteString(builtinCompareVecTestFuncTail) + } + w.WriteString(builtinCompareVecTestTail) + + data, err := format.Source(w.Bytes()) + if err != nil { + log.Println("[Warn]", fileName+": gofmt failed", err) + data = w.Bytes() // write original data for debugging + } + return ioutil.WriteFile(fileName, data, 0644) +} + +// generateOneFile generate one xxx.go file and the associated xxx_test.go file. +func generateOneFile(fileNamePrefix string, compares []CompareContext, + types []TypeContext) (err error) { + + err = generateDotGo(fileNamePrefix+".go", compares, types) + if err != nil { + return + } + err = generateTestDotGo(fileNamePrefix+"_test.go", compares, types) + return +} + +func main() { + flag.Parse() + var err error + outputDir := "." + err = generateOneFile(filepath.Join(outputDir, "builtin_compare_vec_generated"), + comparesMap, typesMap) + if err != nil { + log.Fatalln("generateOneFile", err) + } +} diff --git a/expression/generator/control_vec.go b/expression/generator/control_vec.go new file mode 100644 index 0000000..9db65df --- /dev/null +++ b/expression/generator/control_vec.go @@ -0,0 +1,374 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build ignore + +package main + +import ( + "bytes" + "go/format" + "io/ioutil" + "log" + "path/filepath" + "text/template" + + . "github.com/pingcap/tidb/expression/generator/helper" +) + +const header = `// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by go generate in expression/generator; DO NOT EDIT. + +package expression + +import ( + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" +) +` + +var builtinIfNullVec = template.Must(template.New("builtinIfNullVec").Parse(` +{{ range .Sigs }}{{ with .Arg0 }} +func (b *builtinIfNull{{ .TypeName }}Sig) vecEval{{ .TypeName }}(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + + {{- if .Fixed }} + if err := b.args[0].VecEval{{ .TypeName }}(b.ctx, input, result); err != nil { + return err + } + buf1, err := b.bufAllocator.get(types.ET{{ .ETName }}, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) + if err := b.args[1].VecEval{{ .TypeName }}(b.ctx, input, buf1); err != nil { + return err + } + + arg0 := result.{{ .TypeNameInColumn }}s() + arg1 := buf1.{{ .TypeNameInColumn }}s() + for i := 0; i < n; i++ { + if result.IsNull(i) && !buf1.IsNull(i) { + result.SetNull(i, false) + arg0[i] = arg1[i] + } + } + {{ else }} + buf0, err := b.bufAllocator.get(types.ET{{ .ETName }}, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf0) + if err := b.args[0].VecEval{{ .TypeName }}(b.ctx, input, buf0); err != nil { + return err + } + buf1, err := b.bufAllocator.get(types.ET{{ .ETName }}, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) + if err := b.args[1].VecEval{{ .TypeName }}(b.ctx, input, buf1); err != nil { + return err + } + + result.Reserve{{ .TypeNameInColumn }}(n) + for i := 0; i < n; i++ { + if !buf0.IsNull(i) { + result.Append{{ .TypeNameInColumn }}(buf0.Get{{ .TypeNameInColumn }}(i)) + } else if !buf1.IsNull(i) { + result.Append{{ .TypeNameInColumn }}(buf1.Get{{ .TypeNameInColumn }}(i)) + } else { + result.AppendNull() + } + } + {{ end -}} + return nil +} + +func (b *builtinIfNull{{ .TypeName }}Sig) vectorized() bool { + return true +} +{{ end }}{{/* with */}} +{{ end }}{{/* range .Sigs */}} +`)) + +var builtinIfVec = template.Must(template.New("builtinIfVec").Parse(` +{{ range .Sigs }}{{ with .Arg0 }} +func (b *builtinIf{{ .TypeName }}Sig) vecEval{{ .TypeName }}(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + buf0, err := b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf0) + if err := b.args[0].VecEvalInt(b.ctx, input, buf0); err != nil { + return err + } + +{{- if .Fixed }} + if err := b.args[1].VecEval{{ .TypeName }}(b.ctx, input, result); err != nil { + return err + } +{{- else }} + buf1, err := b.bufAllocator.get(types.ET{{ .ETName }}, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) + if err := b.args[1].VecEval{{ .TypeName }}(b.ctx, input, buf1); err != nil { + return err + } +{{- end }} + buf2, err := b.bufAllocator.get(types.ET{{ .ETName }}, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf2) + if err := b.args[2].VecEval{{ .TypeName }}(b.ctx, input, buf2); err != nil { + return err + } + +{{ if not .Fixed }} + result.Reserve{{ .TypeNameInColumn }}(n) +{{- end }} + arg0 := buf0.Int64s() +{{- if .Fixed }} + arg2 := buf2.{{ .TypeNameInColumn }}s() + rs := result.{{ .TypeNameInColumn }}s() +{{- end }} + for i := 0; i < n; i++ { + arg := arg0[i] + isNull0 := buf0.IsNull(i) + switch { + case isNull0 || arg == 0: +{{- if .Fixed }} + if buf2.IsNull(i) { + result.SetNull(i, true) + } else { + result.SetNull(i, false) + rs[i] = arg2[i] + } +{{- else }} + if buf2.IsNull(i) { + result.AppendNull() + } else { + result.Append{{ .TypeNameInColumn }}(buf2.Get{{ .TypeNameInColumn }}(i)) + } +{{- end }} + case arg != 0: +{{- if .Fixed }} +{{- else }} + if buf1.IsNull(i) { + result.AppendNull() + } else { + result.Append{{ .TypeNameInColumn }}(buf1.Get{{ .TypeNameInColumn }}(i)) + } +{{- end }} + } + } + return nil +} + +func (b *builtinIf{{ .TypeName }}Sig) vectorized() bool { + return true +} +{{ end }}{{/* with */}} +{{ end }}{{/* range .Sigs */}} +`)) + +var testFile = template.Must(template.New("testFile").Parse(`// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by go generate in expression/generator; DO NOT EDIT. + +package expression + +import ( + "math/rand" + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/types" +) + +var defaultControlIntGener = &controlIntGener{zeroRation: 0.3, defaultGener: defaultGener{0.3, types.ETInt}} + +type controlIntGener struct { + zeroRation float64 + defaultGener +} + +func (g *controlIntGener) gen() interface{} { + if rand.Float64() < g.zeroRation { + return int64(0) + } + return g.defaultGener.gen() +} + +{{/* Add more test cases here if we have more functions in this file */}} +var vecBuiltin{{.Category}}Cases = map[string][]vecExprBenchCase{ +{{ with index .Functions 0 }} + ast.Ifnull: { + {{ range .Sigs }} + {retEvalType: types.ET{{ .Arg0.ETName }}, childrenTypes: []types.EvalType{types.ET{{ .Arg0.ETName }}, types.ET{{ .Arg0.ETName }}}}, + {{ end }} + }, +{{ end }} + +{{ with index .Functions 1 }} + ast.If: { + {{ range .Sigs }} + {retEvalType: types.ET{{ .Arg0.ETName }}, childrenTypes: []types.EvalType{types.ETInt, types.ET{{ .Arg0.ETName }}, types.ET{{ .Arg0.ETName }}}, geners: []dataGenerator{defaultControlIntGener}}, + {{ end }} + }, +{{ end }} +} + +func (s *testEvaluatorSuite) TestVectorizedBuiltin{{.Category}}EvalOneVecGenerated(c *C) { + testVectorizedEvalOneVec(c, vecBuiltinControlCases) +} + +func (s *testEvaluatorSuite) TestVectorizedBuiltin{{.Category}}FuncGenerated(c *C) { + testVectorizedBuiltinFunc(c, vecBuiltinControlCases) +} + +func BenchmarkVectorizedBuiltin{{.Category}}EvalOneVecGenerated(b *testing.B) { + benchmarkVectorizedEvalOneVec(b, vecBuiltinControlCases) +} + +func BenchmarkVectorizedBuiltin{{.Category}}FuncGenerated(b *testing.B) { + benchmarkVectorizedBuiltinFunc(b, vecBuiltinControlCases) +} +`)) + +type typeContext struct { + // Describe the name of "github.com/pingcap/tidb/types".ET{{ .ETName }} + ETName string + // Describe the name of "github.com/pingcap/tidb/expression".VecExpr.VecEval{{ .TypeName }} + // If undefined, it's same as ETName. + TypeName string + // Describe the name of "github.com/pingcap/tidb/util/chunk".*Column.Append{{ .TypeNameInColumn }}, + // Resize{{ .TypeNameInColumn }}, Reserve{{ .TypeNameInColumn }}, Get{{ .TypeNameInColumn }} and + // {{ .TypeNameInColumn }}s. + // If undefined, it's same as TypeName. + TypeNameInColumn string + // Same as "github.com/pingcap/tidb/util/chunk".getFixedLen() + Fixed bool +} + +var ifNullSigs = []sig{ + {Arg0: TypeInt}, + {Arg0: TypeReal}, + {Arg0: TypeString}, +} + +var ifSigs = []sig{ + {Arg0: TypeInt}, + {Arg0: TypeReal}, + {Arg0: TypeString}, +} + +type sig struct { + Arg0 TypeContext +} + +type function struct { + FuncName string + Sigs []sig + Tmpl *template.Template +} + +var tmplVal = struct { + Category string + Functions []function +}{ + Category: "Control", + Functions: []function{ + {FuncName: "Ifnull", Sigs: ifNullSigs, Tmpl: builtinIfNullVec}, + {FuncName: "If", Sigs: ifSigs, Tmpl: builtinIfVec}, + }, +} + +func generateDotGo(fileName string) error { + w := new(bytes.Buffer) + w.WriteString(header) + for _, function := range tmplVal.Functions { + err := function.Tmpl.Execute(w, function) + if err != nil { + return err + } + } + data, err := format.Source(w.Bytes()) + if err != nil { + log.Println("[Warn]", fileName+": gofmt failed", err) + data = w.Bytes() // write original data for debugging + } + return ioutil.WriteFile(fileName, data, 0644) +} + +func generateTestDotGo(fileName string) error { + w := new(bytes.Buffer) + err := testFile.Execute(w, tmplVal) + if err != nil { + return err + } + data, err := format.Source(w.Bytes()) + if err != nil { + log.Println("[Warn]", fileName+": gofmt failed", err) + data = w.Bytes() // write original data for debugging + } + return ioutil.WriteFile(fileName, data, 0644) +} + +// generateOneFile generate one xxx.go file and the associated xxx_test.go file. +func generateOneFile(fileNamePrefix string) (err error) { + + err = generateDotGo(fileNamePrefix + ".go") + if err != nil { + return + } + err = generateTestDotGo(fileNamePrefix + "_test.go") + return +} + +func main() { + var err error + outputDir := "." + err = generateOneFile(filepath.Join(outputDir, "builtin_control_vec_generated")) + if err != nil { + log.Fatalln("generateOneFile", err) + } +} diff --git a/expression/generator/helper/helper.go b/expression/generator/helper/helper.go new file mode 100644 index 0000000..b749db5 --- /dev/null +++ b/expression/generator/helper/helper.go @@ -0,0 +1,40 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package helper + +// TypeContext is the template context for each "github.com/pingcap/tidb/types".EvalType . +type TypeContext struct { + // Describe the name of "github.com/pingcap/tidb/types".ET{{ .ETName }} . + ETName string + // Describe the name of "github.com/pingcap/tidb/expression".VecExpr.VecEval{{ .TypeName }} . + TypeName string + // Describe the name of "github.com/pingcap/tidb/util/chunk".*Column.Append{{ .TypeNameInColumn }}, + // Resize{{ .TypeNameInColumn }}, Reserve{{ .TypeNameInColumn }}, Get{{ .TypeNameInColumn }} and + // {{ .TypeNameInColumn }}s. + // If undefined, it's same as TypeName. + TypeNameInColumn string + // Describe the type name in golang. + TypeNameGo string + // Same as "github.com/pingcap/tidb/util/chunk".getFixedLen() . + Fixed bool +} + +var ( + // TypeInt represents the template context of types.ETInt . + TypeInt = TypeContext{ETName: "Int", TypeName: "Int", TypeNameInColumn: "Int64", TypeNameGo: "int64", Fixed: true} + // TypeReal represents the template context of types.ETReal . + TypeReal = TypeContext{ETName: "Real", TypeName: "Real", TypeNameInColumn: "Float64", TypeNameGo: "float64", Fixed: true} + // TypeString represents the template context of types.ETString . + TypeString = TypeContext{ETName: "String", TypeName: "String", TypeNameInColumn: "String", TypeNameGo: "string", Fixed: false} +) diff --git a/expression/generator/other_vec.go b/expression/generator/other_vec.go new file mode 100644 index 0000000..0b8950c --- /dev/null +++ b/expression/generator/other_vec.go @@ -0,0 +1,337 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build ignore + +package main + +import ( + "bytes" + "go/format" + "io/ioutil" + "log" + "path/filepath" + "text/template" + + . "github.com/pingcap/tidb/expression/generator/helper" +) + +const header = `// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by go generate in expression/generator; DO NOT EDIT. + +package expression +` + +const newLine = "\n" + +const builtinOtherImports = `import ( + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" +) +` + +var builtinInTmpl = template.Must(template.New("builtinInTmpl").Parse(` +{{ define "BufAllocator" }} + buf0, err := b.bufAllocator.get(types.ET{{ .Input.ETName }}, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf0) + if err := b.args[0].VecEval{{ .Input.TypeName }}(b.ctx, input, buf0); err != nil { + return err + } + buf1, err := b.bufAllocator.get(types.ET{{ .Input.ETName }}, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) +{{ end }} +{{ define "SetHasNull" }} + for i := 0; i < n; i++ { + if result.IsNull(i) { + result.SetNull(i, hasNull[i]) + } + } + return nil +{{ end }} +{{ define "Compare" }} + {{ if eq .Input.TypeName "Int" -}} + compareResult = 1 + switch { + case (isUnsigned0 && isUnsigned), (!isUnsigned0 && !isUnsigned): + if arg1 == arg0 { + compareResult = 0 + } + case !isUnsigned0 && isUnsigned: + if arg0 >= 0 && arg1 == arg0 { + compareResult = 0 + } + case isUnsigned0 && !isUnsigned: + if arg1 >= 0 && arg1 == arg0 { + compareResult = 0 + } + } + {{- else -}} + compareResult = types.Compare{{ .Input.TypeNameInColumn }}(arg0, arg1) + {{- end -}} +{{ end }} + +{{ range . }} +{{ $InputInt := (eq .Input.TypeName "Int") }} +{{ $InputString := (eq .Input.TypeName "String") }} +{{ $InputFixed := ( .Input.Fixed ) }} +func (b *{{.SigName}}) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + {{- template "BufAllocator" . }} + {{- if $InputFixed }} + args0 := buf0.{{.Input.TypeNameInColumn}}s() + {{- end }} + result.ResizeInt64(n, true) + r64s := result.Int64s() + for i:=0; i 2 and c < 3)", + result: "[eq(test.t.a, 1) eq(test.t.b, 1)]", + }, + } + + ctx := context.Background() + for _, tt := range tests { + sql := "select * from t where " + tt.exprStr + sctx := tk.Se.(sessionctx.Context) + sc := sctx.GetSessionVars().StmtCtx + stmts, err := session.Parse(sctx, sql) + c.Assert(err, IsNil, Commentf("error %v, for expr %s", err, tt.exprStr)) + c.Assert(stmts, HasLen, 1) + is := domain.GetDomain(sctx).InfoSchema() + err = plannercore.Preprocess(sctx, stmts[0], is) + c.Assert(err, IsNil, Commentf("error %v, for resolve name, expr %s", err, tt.exprStr)) + p, _, err := plannercore.BuildLogicalPlan(ctx, sctx, stmts[0], is) + c.Assert(err, IsNil, Commentf("error %v, for build plan, expr %s", err, tt.exprStr)) + selection := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) + conds := make([]expression.Expression, len(selection.Conditions)) + for i, cond := range selection.Conditions { + conds[i] = expression.PushDownNot(sctx, cond) + } + afterFunc := expression.ExtractFiltersFromDNFs(sctx, conds) + sort.Slice(afterFunc, func(i, j int) bool { + return bytes.Compare(afterFunc[i].HashCode(sc), afterFunc[j].HashCode(sc)) < 0 + }) + c.Assert(fmt.Sprintf("%s", afterFunc), Equals, tt.result, Commentf("wrong result for expr: %s", tt.exprStr)) + } +} + +func newStoreWithBootstrap() (kv.Storage, *domain.Domain, error) { + store, err := mockstore.NewMockTikvStore() + if err != nil { + return nil, nil, err + } + session.SetSchemaLease(0) + dom, err := session.BootstrapSession(store) + return store, dom, err +} + +func (s *testIntegrationSuite) TestPrefixIndex(c *C) { + tk := testkit.NewTestKit(c, s.store) + defer s.cleanEnv(c) + tk.MustExec("use test") + tk.MustExec(`CREATE TABLE t1 ( + name varchar(12) DEFAULT NULL, + KEY pname (name(12)) + )`) + + tk.MustExec("insert into t1 values('借款策略集_网页');") + res := tk.MustQuery("select * from t1 where name = '借款策略集_网页';") + res.Check(testkit.Rows("借款策略集_网页")) + + tk.MustExec(`CREATE TABLE prefix ( + a int(11) NOT NULL, + b varchar(55) DEFAULT NULL, + c int(11) DEFAULT NULL, + PRIMARY KEY (a), + KEY prefix_index (b(2)), + KEY prefix_complex (a,b(2)) + );`) + + tk.MustExec("INSERT INTO prefix VALUES(0, 'b', 2), (1, 'bbb', 3), (2, 'bbc', 4), (3, 'bbb', 5), (4, 'abc', 6), (5, 'abc', 7), (6, 'abc', 7), (7, 'ÿÿ', 8), (8, 'ÿÿ0', 9), (9, 'ÿÿÿ', 10);") + res = tk.MustQuery("select c, b from prefix where b > 'ÿ' and b < 'ÿÿc'") + res.Check(testkit.Rows("8 ÿÿ", "9 ÿÿ0")) +} + +func (s *testIntegrationSuite) TestUnknowHintIgnore(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("USE test") + tk.MustExec("create table t(a int)") + tk.MustQuery("select /*+ unknown_hint(c1)*/ 1").Check(testkit.Rows("1")) + tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1064 You have an error in your SQL syntax; check the manual that corresponds to your TiDB version for the right syntax to use line 1 column 29 near \"unknown_hint(c1)*/ 1\" ")) + _, err := tk.Exec("select 1 from /*+ test1() */ t") + c.Assert(err, NotNil) +} + +func (s *testIntegrationSuite) TestForeignKeyVar(c *C) { + + tk := testkit.NewTestKit(c, s.store) + + tk.MustExec("SET FOREIGN_KEY_CHECKS=1") + tk.MustQuery("SHOW WARNINGS").Check(testkit.Rows("Warning 8047 variable 'foreign_key_checks' does not yet support value: 1")) +} + +func (s *testIntegrationSuite) TestIssue10804(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustQuery(`SELECT @@information_schema_stats_expiry`).Check(testkit.Rows(`86400`)) + tk.MustExec("/*!80000 SET SESSION information_schema_stats_expiry=0 */") + tk.MustQuery(`SELECT @@information_schema_stats_expiry`).Check(testkit.Rows(`0`)) + tk.MustQuery(`SELECT @@GLOBAL.information_schema_stats_expiry`).Check(testkit.Rows(`86400`)) + tk.MustExec("/*!80000 SET GLOBAL information_schema_stats_expiry=0 */") + tk.MustQuery(`SELECT @@GLOBAL.information_schema_stats_expiry`).Check(testkit.Rows(`0`)) +} + +func (s *testIntegrationSuite) TestInvalidEndingStatement(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + parseErrMsg := "[parser:1064]" + errMsgLen := len(parseErrMsg) + + assertParseErr := func(sql string) { + _, err := tk.Exec(sql) + c.Assert(err, NotNil) + c.Assert(err.Error()[:errMsgLen], Equals, parseErrMsg) + } + + assertParseErr("drop table if exists t'xyz") + assertParseErr("drop table if exists t'") + assertParseErr("drop table if exists t`") + assertParseErr(`drop table if exists t'`) + assertParseErr(`drop table if exists t"`) +} + +func (s *testIntegrationSuite) TestIssue10675(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec(`drop table if exists t;`) + tk.MustExec(`create table t(a int);`) + tk.MustExec(`insert into t values(1);`) + tk.MustQuery(`select * from t where a < -184467440737095516167.1;`).Check(testkit.Rows()) + tk.MustQuery(`select * from t where a > -184467440737095516167.1;`).Check( + testkit.Rows("1")) + tk.MustQuery(`select * from t where a < 184467440737095516167.1;`).Check( + testkit.Rows("1")) + tk.MustQuery(`select * from t where a > 184467440737095516167.1;`).Check(testkit.Rows()) +} + +func (s *testIntegrationSuite) TestDefEnableVectorizedEvaluation(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use mysql") + tk.MustQuery(`select @@tidb_enable_vectorized_expression`).Check(testkit.Rows("1")) +} + +func (s *testIntegrationSuite) TestDecodetoChunkReuse(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("create table chk (a int,b varchar(20))") + for i := 0; i < 200; i++ { + if i%5 == 0 { + tk.MustExec(fmt.Sprintf("insert chk values (NULL,NULL)")) + continue + } + tk.MustExec(fmt.Sprintf("insert chk values (%d,'%s')", i, strconv.Itoa(i))) + } + + tk.Se.GetSessionVars().DistSQLScanConcurrency = 1 + tk.MustExec("set tidb_init_chunk_size = 2") + tk.MustExec("set tidb_max_chunk_size = 32") + defer func() { + tk.MustExec(fmt.Sprintf("set tidb_init_chunk_size = %d", variable.DefInitChunkSize)) + tk.MustExec(fmt.Sprintf("set tidb_max_chunk_size = %d", variable.DefMaxChunkSize)) + }() + rs, err := tk.Exec("select * from chk") + c.Assert(err, IsNil) + req := rs.NewChunk() + var count int + for { + err = rs.Next(context.TODO(), req) + c.Assert(err, IsNil) + numRows := req.NumRows() + if numRows == 0 { + break + } + for i := 0; i < numRows; i++ { + if count%5 == 0 { + c.Assert(req.GetRow(i).IsNull(0), Equals, true) + c.Assert(req.GetRow(i).IsNull(1), Equals, true) + } else { + c.Assert(req.GetRow(i).IsNull(0), Equals, false) + c.Assert(req.GetRow(i).IsNull(1), Equals, false) + c.Assert(req.GetRow(i).GetInt64(0), Equals, int64(count)) + c.Assert(req.GetRow(i).GetString(1), Equals, strconv.Itoa(count)) + } + count++ + } + } + c.Assert(count, Equals, 200) + rs.Close() +} diff --git a/expression/scalar_function.go b/expression/scalar_function.go new file mode 100644 index 0000000..ba0b220 --- /dev/null +++ b/expression/scalar_function.go @@ -0,0 +1,292 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "bytes" + "fmt" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/codec" + "github.com/pingcap/tidb/util/hack" +) + +// ScalarFunction is the function that returns a value. +type ScalarFunction struct { + FuncName model.CIStr + // RetType is the type that ScalarFunction returns. + // TODO: Implement type inference here, now we use ast's return type temporarily. + RetType *types.FieldType + Function builtinFunc + hashcode []byte +} + +// VecEvalInt evaluates this expression in a vectorized manner. +func (sf *ScalarFunction) VecEvalInt(ctx sessionctx.Context, input *chunk.Chunk, result *chunk.Column) error { + return sf.Function.vecEvalInt(input, result) +} + +// VecEvalReal evaluates this expression in a vectorized manner. +func (sf *ScalarFunction) VecEvalReal(ctx sessionctx.Context, input *chunk.Chunk, result *chunk.Column) error { + return sf.Function.vecEvalReal(input, result) +} + +// VecEvalString evaluates this expression in a vectorized manner. +func (sf *ScalarFunction) VecEvalString(ctx sessionctx.Context, input *chunk.Chunk, result *chunk.Column) error { + return sf.Function.vecEvalString(input, result) +} + +// VecEvalDecimal evaluates this expression in a vectorized manner. +func (sf *ScalarFunction) VecEvalDecimal(ctx sessionctx.Context, input *chunk.Chunk, result *chunk.Column) error { + return sf.Function.vecEvalDecimal(input, result) +} + +// VecEvalTime evaluates this expression in a vectorized manner. +func (sf *ScalarFunction) VecEvalTime(ctx sessionctx.Context, input *chunk.Chunk, result *chunk.Column) error { + return sf.Function.vecEvalTime(input, result) +} + +// VecEvalDuration evaluates this expression in a vectorized manner. +func (sf *ScalarFunction) VecEvalDuration(ctx sessionctx.Context, input *chunk.Chunk, result *chunk.Column) error { + return sf.Function.vecEvalDuration(input, result) +} + +// GetArgs gets arguments of function. +func (sf *ScalarFunction) GetArgs() []Expression { + return sf.Function.getArgs() +} + +// Vectorized returns if this expression supports vectorized evaluation. +func (sf *ScalarFunction) Vectorized() bool { + return sf.Function.vectorized() && sf.Function.isChildrenVectorized() +} + +// GetCtx gets the context of function. +func (sf *ScalarFunction) GetCtx() sessionctx.Context { + return sf.Function.getCtx() +} + +// String implements fmt.Stringer interface. +func (sf *ScalarFunction) String() string { + var buffer bytes.Buffer + fmt.Fprintf(&buffer, "%s(", sf.FuncName.L) + for i, arg := range sf.GetArgs() { + buffer.WriteString(arg.String()) + if i+1 != len(sf.GetArgs()) { + buffer.WriteString(", ") + } + } + buffer.WriteString(")") + return buffer.String() +} + +// MarshalJSON implements json.Marshaler interface. +func (sf *ScalarFunction) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf("\"%s\"", sf)), nil +} + +// newFunctionImpl creates a new scalar function or constant. +func newFunctionImpl(ctx sessionctx.Context, fold bool, funcName string, retType *types.FieldType, args ...Expression) (Expression, error) { + if retType == nil { + return nil, errors.Errorf("RetType cannot be nil for ScalarFunction.") + } + fc, ok := funcs[funcName] + if !ok { + return nil, errFunctionNotExists.GenWithStackByArgs("FUNCTION", funcName) + } + funcArgs := make([]Expression, len(args)) + copy(funcArgs, args) + f, err := fc.getFunction(ctx, funcArgs) + if err != nil { + return nil, err + } + if builtinRetTp := f.getRetTp(); builtinRetTp.Tp != mysql.TypeUnspecified || retType.Tp == mysql.TypeUnspecified { + retType = builtinRetTp + } + sf := &ScalarFunction{ + FuncName: model.NewCIStr(funcName), + RetType: retType, + Function: f, + } + if fold { + return FoldConstant(sf), nil + } + return sf, nil +} + +// NewFunction creates a new scalar function or constant via a constant folding. +func NewFunction(ctx sessionctx.Context, funcName string, retType *types.FieldType, args ...Expression) (Expression, error) { + return newFunctionImpl(ctx, true, funcName, retType, args...) +} + +// NewFunctionBase creates a new scalar function with no constant folding. +func NewFunctionBase(ctx sessionctx.Context, funcName string, retType *types.FieldType, args ...Expression) (Expression, error) { + return newFunctionImpl(ctx, false, funcName, retType, args...) +} + +// NewFunctionInternal is similar to NewFunction, but do not returns error, should only be used internally. +func NewFunctionInternal(ctx sessionctx.Context, funcName string, retType *types.FieldType, args ...Expression) Expression { + expr, err := NewFunction(ctx, funcName, retType, args...) + terror.Log(err) + return expr +} + +// ScalarFuncs2Exprs converts []*ScalarFunction to []Expression. +func ScalarFuncs2Exprs(funcs []*ScalarFunction) []Expression { + result := make([]Expression, 0, len(funcs)) + for _, col := range funcs { + result = append(result, col) + } + return result +} + +// Clone implements Expression interface. +func (sf *ScalarFunction) Clone() Expression { + return &ScalarFunction{ + FuncName: sf.FuncName, + RetType: sf.RetType, + Function: sf.Function.Clone(), + hashcode: sf.hashcode, + } +} + +// GetType implements Expression interface. +func (sf *ScalarFunction) GetType() *types.FieldType { + return sf.RetType +} + +// Equal implements Expression interface. +func (sf *ScalarFunction) Equal(ctx sessionctx.Context, e Expression) bool { + fun, ok := e.(*ScalarFunction) + if !ok { + return false + } + if sf.FuncName.L != fun.FuncName.L { + return false + } + return sf.Function.equal(fun.Function) +} + +// IsCorrelated implements Expression interface. +func (sf *ScalarFunction) IsCorrelated() bool { + for _, arg := range sf.GetArgs() { + if arg.IsCorrelated() { + return true + } + } + return false +} + +// ConstItem implements Expression interface. +func (sf *ScalarFunction) ConstItem() bool { + // Note: some unfoldable functions are deterministic, we use unFoldableFunctions here for simplification. + if _, ok := unFoldableFunctions[sf.FuncName.L]; ok { + return false + } + for _, arg := range sf.GetArgs() { + if !arg.ConstItem() { + return false + } + } + return true +} + +// Decorrelate implements Expression interface. +func (sf *ScalarFunction) Decorrelate(schema *Schema) Expression { + for i, arg := range sf.GetArgs() { + sf.GetArgs()[i] = arg.Decorrelate(schema) + } + return sf +} + +// Eval implements Expression interface. +func (sf *ScalarFunction) Eval(row chunk.Row) (d types.Datum, err error) { + var ( + res interface{} + isNull bool + ) + switch tp, evalType := sf.GetType(), sf.GetType().EvalType(); evalType { + case types.ETInt: + var intRes int64 + intRes, isNull, err = sf.EvalInt(sf.GetCtx(), row) + if mysql.HasUnsignedFlag(tp.Flag) { + res = uint64(intRes) + } else { + res = intRes + } + case types.ETReal: + res, isNull, err = sf.EvalReal(sf.GetCtx(), row) + case types.ETString: + res, isNull, err = sf.EvalString(sf.GetCtx(), row) + } + + if isNull || err != nil { + d.SetValue(nil) + return d, err + } + d.SetValue(res) + return +} + +// EvalInt implements Expression interface. +func (sf *ScalarFunction) EvalInt(ctx sessionctx.Context, row chunk.Row) (int64, bool, error) { + return sf.Function.evalInt(row) +} + +// EvalReal implements Expression interface. +func (sf *ScalarFunction) EvalReal(ctx sessionctx.Context, row chunk.Row) (float64, bool, error) { + return sf.Function.evalReal(row) +} + +// EvalString implements Expression interface. +func (sf *ScalarFunction) EvalString(ctx sessionctx.Context, row chunk.Row) (string, bool, error) { + return sf.Function.evalString(row) +} + +// HashCode implements Expression interface. +func (sf *ScalarFunction) HashCode(sc *stmtctx.StatementContext) []byte { + if len(sf.hashcode) > 0 { + return sf.hashcode + } + sf.hashcode = append(sf.hashcode, scalarFunctionFlag) + sf.hashcode = codec.EncodeCompactBytes(sf.hashcode, hack.Slice(sf.FuncName.L)) + for _, arg := range sf.GetArgs() { + sf.hashcode = append(sf.hashcode, arg.HashCode(sc)...) + } + return sf.hashcode +} + +// ResolveIndices implements Expression interface. +func (sf *ScalarFunction) ResolveIndices(schema *Schema) (Expression, error) { + newSf := sf.Clone() + err := newSf.resolveIndices(schema) + return newSf, err +} + +func (sf *ScalarFunction) resolveIndices(schema *Schema) error { + for _, arg := range sf.GetArgs() { + err := arg.resolveIndices(schema) + if err != nil { + return err + } + } + return nil +} diff --git a/expression/schema.go b/expression/schema.go new file mode 100644 index 0000000..36a7d97 --- /dev/null +++ b/expression/schema.go @@ -0,0 +1,185 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "strings" +) + +// KeyInfo stores the columns of one unique key or primary key. +type KeyInfo []*Column + +// Clone copies the entire UniqueKey. +func (ki KeyInfo) Clone() KeyInfo { + result := make([]*Column, 0, len(ki)) + for _, col := range ki { + result = append(result, col.Clone().(*Column)) + } + return result +} + +// Schema stands for the row schema and unique key information get from input. +type Schema struct { + Columns []*Column + Keys []KeyInfo +} + +// String implements fmt.Stringer interface. +func (s *Schema) String() string { + colStrs := make([]string, 0, len(s.Columns)) + for _, col := range s.Columns { + colStrs = append(colStrs, col.String()) + } + ukStrs := make([]string, 0, len(s.Keys)) + for _, key := range s.Keys { + ukColStrs := make([]string, 0, len(key)) + for _, col := range key { + ukColStrs = append(ukColStrs, col.String()) + } + ukStrs = append(ukStrs, "["+strings.Join(ukColStrs, ",")+"]") + } + return "Column: [" + strings.Join(colStrs, ",") + "] Unique key: [" + strings.Join(ukStrs, ",") + "]" +} + +// Clone copies the total schema. +func (s *Schema) Clone() *Schema { + cols := make([]*Column, 0, s.Len()) + keys := make([]KeyInfo, 0, len(s.Keys)) + for _, col := range s.Columns { + cols = append(cols, col.Clone().(*Column)) + } + for _, key := range s.Keys { + keys = append(keys, key.Clone()) + } + schema := NewSchema(cols...) + schema.SetUniqueKeys(keys) + return schema +} + +// ExprFromSchema checks if all columns of this expression are from the same schema. +func ExprFromSchema(expr Expression, schema *Schema) bool { + switch v := expr.(type) { + case *Column: + return schema.Contains(v) + case *ScalarFunction: + for _, arg := range v.GetArgs() { + if !ExprFromSchema(arg, schema) { + return false + } + } + return true + case *Constant: + return true + } + return false +} + +// RetrieveColumn retrieves column in expression from the columns in schema. +func (s *Schema) RetrieveColumn(col *Column) *Column { + index := s.ColumnIndex(col) + if index != -1 { + return s.Columns[index] + } + return nil +} + +// IsUniqueKey checks if this column is a unique key. +func (s *Schema) IsUniqueKey(col *Column) bool { + for _, key := range s.Keys { + if len(key) == 1 && key[0].Equal(nil, col) { + return true + } + } + return false +} + +// ColumnIndex finds the index for a column. +func (s *Schema) ColumnIndex(col *Column) int { + for i, c := range s.Columns { + if c.UniqueID == col.UniqueID { + return i + } + } + return -1 +} + +// Contains checks if the schema contains the column. +func (s *Schema) Contains(col *Column) bool { + return s.ColumnIndex(col) != -1 +} + +// Len returns the number of columns in schema. +func (s *Schema) Len() int { + return len(s.Columns) +} + +// Append append new column to the columns stored in schema. +func (s *Schema) Append(col ...*Column) { + s.Columns = append(s.Columns, col...) +} + +// SetUniqueKeys will set the value of Schema.Keys. +func (s *Schema) SetUniqueKeys(keys []KeyInfo) { + s.Keys = keys +} + +// ColumnsIndices will return a slice which contains the position of each column in schema. +// If there is one column that doesn't match, nil will be returned. +func (s *Schema) ColumnsIndices(cols []*Column) (ret []int) { + ret = make([]int, 0, len(cols)) + for _, col := range cols { + pos := s.ColumnIndex(col) + if pos != -1 { + ret = append(ret, pos) + } else { + return nil + } + } + return +} + +// ColumnsByIndices returns columns by multiple offsets. +// Callers should guarantee that all the offsets provided should be valid, which means offset should: +// 1. not smaller than 0, and +// 2. not exceed len(s.Columns) +func (s *Schema) ColumnsByIndices(offsets []int) []*Column { + cols := make([]*Column, 0, len(offsets)) + for _, offset := range offsets { + cols = append(cols, s.Columns[offset]) + } + return cols +} + +// MergeSchema will merge two schema into one schema. We shouldn't need to consider unique keys. +// That will be processed in build_key_info.go. +func MergeSchema(lSchema, rSchema *Schema) *Schema { + if lSchema == nil && rSchema == nil { + return nil + } + if lSchema == nil { + return rSchema.Clone() + } + if rSchema == nil { + return lSchema.Clone() + } + tmpL := lSchema.Clone() + tmpR := rSchema.Clone() + ret := NewSchema(append(tmpL.Columns, tmpR.Columns...)...) + return ret +} + +// NewSchema returns a schema made by its parameter. +func NewSchema(cols ...*Column) *Schema { + return &Schema{Columns: cols} +} diff --git a/expression/schema_test.go b/expression/schema_test.go new file mode 100644 index 0000000..4a501a1 --- /dev/null +++ b/expression/schema_test.go @@ -0,0 +1,148 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "fmt" + + . "github.com/pingcap/check" +) + +// generateKeys4Schema will generate keys for a given schema. Used only in this file. +func generateKeys4Schema(schema *Schema) { + keyCount := len(schema.Columns) - 1 + keys := make([]KeyInfo, 0, keyCount) + for i := 0; i < keyCount; i++ { + keys = append(keys, []*Column{schema.Columns[i]}) + } + schema.Keys = keys +} + +var _ = Suite(&testEvalSuite{}) + +type testEvalSuite struct { + colID int64 +} + +func (s *testEvalSuite) SetUpSuite(c *C) { + s.colID = 0 +} + +func (s *testEvalSuite) allocColID() int64 { + s.colID++ + return s.colID +} + +func (s *testEvalSuite) TearDownTest(c *C) { + s.colID = 0 +} + +// generateSchema will generate a schema for test. Used only in this file. +func (s *testEvalSuite) generateSchema(colCount int) *Schema { + cols := make([]*Column, 0, colCount) + for i := 0; i < colCount; i++ { + cols = append(cols, &Column{ + UniqueID: s.allocColID(), + }) + } + return NewSchema(cols...) +} + +func (s *testEvalSuite) TestSchemaString(c *C) { + schema := s.generateSchema(5) + c.Assert(schema.String(), Equals, "Column: [Column#1,Column#2,Column#3,Column#4,Column#5] Unique key: []") + generateKeys4Schema(schema) + c.Assert(schema.String(), Equals, "Column: [Column#1,Column#2,Column#3,Column#4,Column#5] Unique key: [[Column#1],[Column#2],[Column#3],[Column#4]]") +} + +func (s *testEvalSuite) TestSchemaRetrieveColumn(c *C) { + schema := s.generateSchema(5) + colOutSchema := &Column{ + UniqueID: 100, + } + for _, col := range schema.Columns { + c.Assert(schema.RetrieveColumn(col), Equals, col) + } + c.Assert(schema.RetrieveColumn(colOutSchema), IsNil) +} + +func (s *testEvalSuite) TestSchemaIsUniqueKey(c *C) { + schema := s.generateSchema(5) + generateKeys4Schema(schema) + colOutSchema := &Column{ + UniqueID: 100, + } + for i, col := range schema.Columns { + if i < len(schema.Columns)-1 { + c.Assert(schema.IsUniqueKey(col), Equals, true) + } else { + c.Assert(schema.IsUniqueKey(col), Equals, false) + } + } + c.Assert(schema.IsUniqueKey(colOutSchema), Equals, false) +} + +func (s *testEvalSuite) TestSchemaContains(c *C) { + schema := s.generateSchema(5) + colOutSchema := &Column{ + UniqueID: 100, + } + for _, col := range schema.Columns { + c.Assert(schema.Contains(col), Equals, true) + } + c.Assert(schema.Contains(colOutSchema), Equals, false) +} + +func (s *testEvalSuite) TestSchemaColumnsIndices(c *C) { + schema := s.generateSchema(5) + colOutSchema := &Column{ + UniqueID: 100, + } + for i := 0; i < len(schema.Columns)-1; i++ { + colIndices := schema.ColumnsIndices([]*Column{schema.Columns[i], schema.Columns[i+1]}) + for j, res := range colIndices { + c.Assert(res, Equals, i+j) + } + } + c.Assert(schema.ColumnsIndices([]*Column{schema.Columns[0], schema.Columns[1], colOutSchema, schema.Columns[2]}), IsNil) +} + +func (s *testEvalSuite) TestSchemaColumnsByIndices(c *C) { + schema := s.generateSchema(5) + indices := []int{0, 1, 2, 3} + retCols := schema.ColumnsByIndices(indices) + for i, ret := range retCols { + c.Assert(fmt.Sprintf("%p", schema.Columns[i]), Equals, fmt.Sprintf("%p", ret)) + } +} + +func (s *testEvalSuite) TestSchemaMergeSchema(c *C) { + lSchema := s.generateSchema(5) + generateKeys4Schema(lSchema) + + rSchema := s.generateSchema(5) + generateKeys4Schema(rSchema) + + c.Assert(MergeSchema(nil, nil), IsNil) + c.Assert(MergeSchema(lSchema, nil).String(), Equals, lSchema.String()) + c.Assert(MergeSchema(nil, rSchema).String(), Equals, rSchema.String()) + + schema := MergeSchema(lSchema, rSchema) + for i := 0; i < len(lSchema.Columns); i++ { + c.Assert(schema.Columns[i].UniqueID, Equals, lSchema.Columns[i].UniqueID) + } + for i := 0; i < len(rSchema.Columns); i++ { + c.Assert(schema.Columns[i+len(lSchema.Columns)].UniqueID, Equals, rSchema.Columns[i].UniqueID) + } +} diff --git a/expression/testdata/expression_suite_in.json b/expression/testdata/expression_suite_in.json new file mode 100644 index 0000000..bb6ff28 --- /dev/null +++ b/expression/testdata/expression_suite_in.json @@ -0,0 +1,37 @@ +[ + { + "name": "TestOuterJoinPropConst", + "cases": [ + // Positive tests. + "explain select * from t1 left join t2 on t1.a > t2.a and t1.a = 1", + "explain select * from t1 left join t2 on t1.a > t2.a where t1.a = 1", + "explain select * from t1 left join t2 on t1.a = t2.a and t1.a > 1", + "explain select * from t1 left join t2 on t1.a = t2.a where t1.a > 1", + "explain select * from t1 right join t2 on t1.a > t2.a where t2.a = 1", + "explain select * from t1 right join t2 on t1.a = t2.a where t2.a > 1", + "explain select * from t1 right join t2 on t1.a = t2.a and t2.a > 1", + "explain select * from t1 right join t2 on t1.a > t2.a and t2.a = 1", + // Negative tests. + "explain select * from t1 left join t2 on t1.a = t2.a and t2.a > 1", + "explain select * from t1 left join t2 on t1.a > t2.a and t2.a = 1", + "explain select * from t1 right join t2 on t1.a > t2.a and t1.a = 1", + "explain select * from t1 right join t2 on t1.a = t2.a and t1.a > 1", + "explain select * from t1 left join t2 on t1.a = t1.b and t1.a > 1", + "explain select * from t1 left join t2 on t2.a = t2.b and t2.a > 1", + // Constant equal condition merge in outer join. + "explain select * from t1 left join t2 on true where t1.a = 1 and false", + "explain select * from t1 left join t2 on true where t1.a = 1 and null", + "explain select * from t1 left join t2 on true where t1.a = null", + "explain select * from t1 left join t2 on true where t1.a = 1 and t1.a = 2", + "explain select * from t1 left join t2 on true where t1.a = 1 and t1.a = 1", + "explain select * from t1 left join t2 on false", + "explain select * from t1 right join t2 on false", + "explain select * from t1 left join t2 on t1.a = 1 and t1.a = 2", + "explain select * from t1 left join t2 on t1.a =1 where t1.a = 2", + "explain select * from t1 left join t2 on t2.a = 1 and t2.a = 2", + // Constant propagation for DNF in outer join. + "explain select * from t1 left join t2 on t1.a = 1 or (t1.a = 2 and t1.a = 3)", + "explain select * from t1 left join t2 on true where t1.a = 1 or (t1.a = 2 and t1.a = 3)" + ] + } +] diff --git a/expression/testdata/expression_suite_out.json b/expression/testdata/expression_suite_out.json new file mode 100644 index 0000000..d8b190c --- /dev/null +++ b/expression/testdata/expression_suite_out.json @@ -0,0 +1,280 @@ +[ + { + "Name": "TestOuterJoinPropConst", + "Cases": [ + { + "SQL": "explain select * from t1 left join t2 on t1.a > t2.a and t1.a = 1", + "Result": [ + "HashLeftJoin_6 33233333.33 root CARTESIAN left outer join, left cond:[eq(test.t1.a, 1)]", + "├─TableReader_8 10000.00 root data:TableScan_7", + "│ └─TableScan_7 10000.00 cop table:t1, range:[-inf,+inf], keep order:false, stats:pseudo", + "└─TableReader_11 3323.33 root data:Selection_10", + " └─Selection_10 3323.33 cop gt(1, test.t2.a)", + " └─TableScan_9 10000.00 cop table:t2, range:[-inf,+inf], keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain select * from t1 left join t2 on t1.a > t2.a where t1.a = 1", + "Result": [ + "HashLeftJoin_7 33233.33 root CARTESIAN left outer join", + "├─TableReader_10 10.00 root data:Selection_9", + "│ └─Selection_9 10.00 cop eq(test.t1.a, 1)", + "│ └─TableScan_8 10000.00 cop table:t1, range:[-inf,+inf], keep order:false, stats:pseudo", + "└─TableReader_13 3323.33 root data:Selection_12", + " └─Selection_12 3323.33 cop gt(1, test.t2.a)", + " └─TableScan_11 10000.00 cop table:t2, range:[-inf,+inf], keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain select * from t1 left join t2 on t1.a = t2.a and t1.a > 1", + "Result": [ + "HashLeftJoin_6 10000.00 root left outer join, equal:[eq(test.t1.a, test.t2.a)], left cond:[gt(test.t1.a, 1)]", + "├─TableReader_8 10000.00 root data:TableScan_7", + "│ └─TableScan_7 10000.00 cop table:t1, range:[-inf,+inf], keep order:false, stats:pseudo", + "└─TableReader_11 3333.33 root data:Selection_10", + " └─Selection_10 3333.33 cop gt(test.t2.a, 1), not(isnull(test.t2.a))", + " └─TableScan_9 10000.00 cop table:t2, range:[-inf,+inf], keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain select * from t1 left join t2 on t1.a = t2.a where t1.a > 1", + "Result": [ + "HashLeftJoin_7 4166.67 root left outer join, equal:[eq(test.t1.a, test.t2.a)]", + "├─TableReader_10 3333.33 root data:Selection_9", + "│ └─Selection_9 3333.33 cop gt(test.t1.a, 1)", + "│ └─TableScan_8 10000.00 cop table:t1, range:[-inf,+inf], keep order:false, stats:pseudo", + "└─TableReader_13 3333.33 root data:Selection_12", + " └─Selection_12 3333.33 cop gt(test.t2.a, 1), not(isnull(test.t2.a))", + " └─TableScan_11 10000.00 cop table:t2, range:[-inf,+inf], keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain select * from t1 right join t2 on t1.a > t2.a where t2.a = 1", + "Result": [ + "HashRightJoin_7 33333.33 root CARTESIAN right outer join", + "├─TableReader_10 3333.33 root data:Selection_9", + "│ └─Selection_9 3333.33 cop gt(test.t1.a, 1)", + "│ └─TableScan_8 10000.00 cop table:t1, range:[-inf,+inf], keep order:false, stats:pseudo", + "└─TableReader_13 10.00 root data:Selection_12", + " └─Selection_12 10.00 cop eq(test.t2.a, 1)", + " └─TableScan_11 10000.00 cop table:t2, range:[-inf,+inf], keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain select * from t1 right join t2 on t1.a = t2.a where t2.a > 1", + "Result": [ + "HashRightJoin_7 4166.67 root right outer join, equal:[eq(test.t1.a, test.t2.a)]", + "├─TableReader_10 3333.33 root data:Selection_9", + "│ └─Selection_9 3333.33 cop gt(test.t1.a, 1), not(isnull(test.t1.a))", + "│ └─TableScan_8 10000.00 cop table:t1, range:[-inf,+inf], keep order:false, stats:pseudo", + "└─TableReader_13 3333.33 root data:Selection_12", + " └─Selection_12 3333.33 cop gt(test.t2.a, 1)", + " └─TableScan_11 10000.00 cop table:t2, range:[-inf,+inf], keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain select * from t1 right join t2 on t1.a = t2.a and t2.a > 1", + "Result": [ + "HashRightJoin_6 10000.00 root right outer join, equal:[eq(test.t1.a, test.t2.a)], right cond:gt(test.t2.a, 1)", + "├─TableReader_9 3333.33 root data:Selection_8", + "│ └─Selection_8 3333.33 cop gt(test.t1.a, 1), not(isnull(test.t1.a))", + "│ └─TableScan_7 10000.00 cop table:t1, range:[-inf,+inf], keep order:false, stats:pseudo", + "└─TableReader_11 10000.00 root data:TableScan_10", + " └─TableScan_10 10000.00 cop table:t2, range:[-inf,+inf], keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain select * from t1 right join t2 on t1.a > t2.a and t2.a = 1", + "Result": [ + "HashRightJoin_6 33333333.33 root CARTESIAN right outer join, right cond:eq(test.t2.a, 1)", + "├─TableReader_9 3333.33 root data:Selection_8", + "│ └─Selection_8 3333.33 cop gt(test.t1.a, 1)", + "│ └─TableScan_7 10000.00 cop table:t1, range:[-inf,+inf], keep order:false, stats:pseudo", + "└─TableReader_11 10000.00 root data:TableScan_10", + " └─TableScan_10 10000.00 cop table:t2, range:[-inf,+inf], keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain select * from t1 left join t2 on t1.a = t2.a and t2.a > 1", + "Result": [ + "HashLeftJoin_6 10000.00 root left outer join, equal:[eq(test.t1.a, test.t2.a)]", + "├─TableReader_8 10000.00 root data:TableScan_7", + "│ └─TableScan_7 10000.00 cop table:t1, range:[-inf,+inf], keep order:false, stats:pseudo", + "└─TableReader_11 3333.33 root data:Selection_10", + " └─Selection_10 3333.33 cop gt(test.t2.a, 1), not(isnull(test.t2.a))", + " └─TableScan_9 10000.00 cop table:t2, range:[-inf,+inf], keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain select * from t1 left join t2 on t1.a > t2.a and t2.a = 1", + "Result": [ + "HashLeftJoin_6 100000.00 root CARTESIAN left outer join, other cond:gt(test.t1.a, test.t2.a)", + "├─TableReader_8 10000.00 root data:TableScan_7", + "│ └─TableScan_7 10000.00 cop table:t1, range:[-inf,+inf], keep order:false, stats:pseudo", + "└─TableReader_11 10.00 root data:Selection_10", + " └─Selection_10 10.00 cop eq(test.t2.a, 1), not(isnull(test.t2.a))", + " └─TableScan_9 10000.00 cop table:t2, range:[-inf,+inf], keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain select * from t1 right join t2 on t1.a > t2.a and t1.a = 1", + "Result": [ + "HashRightJoin_6 100000.00 root CARTESIAN right outer join, other cond:gt(test.t1.a, test.t2.a)", + "├─TableReader_9 10.00 root data:Selection_8", + "│ └─Selection_8 10.00 cop eq(test.t1.a, 1), not(isnull(test.t1.a))", + "│ └─TableScan_7 10000.00 cop table:t1, range:[-inf,+inf], keep order:false, stats:pseudo", + "└─TableReader_11 10000.00 root data:TableScan_10", + " └─TableScan_10 10000.00 cop table:t2, range:[-inf,+inf], keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain select * from t1 right join t2 on t1.a = t2.a and t1.a > 1", + "Result": [ + "HashRightJoin_6 10000.00 root right outer join, equal:[eq(test.t1.a, test.t2.a)]", + "├─TableReader_9 3333.33 root data:Selection_8", + "│ └─Selection_8 3333.33 cop gt(test.t1.a, 1), not(isnull(test.t1.a))", + "│ └─TableScan_7 10000.00 cop table:t1, range:[-inf,+inf], keep order:false, stats:pseudo", + "└─TableReader_11 10000.00 root data:TableScan_10", + " └─TableScan_10 10000.00 cop table:t2, range:[-inf,+inf], keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain select * from t1 left join t2 on t1.a = t1.b and t1.a > 1", + "Result": [ + "HashLeftJoin_6 100000000.00 root CARTESIAN left outer join, left cond:[eq(test.t1.a, test.t1.b) gt(test.t1.a, 1)]", + "├─TableReader_8 10000.00 root data:TableScan_7", + "│ └─TableScan_7 10000.00 cop table:t1, range:[-inf,+inf], keep order:false, stats:pseudo", + "└─TableReader_10 10000.00 root data:TableScan_9", + " └─TableScan_9 10000.00 cop table:t2, range:[-inf,+inf], keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain select * from t1 left join t2 on t2.a = t2.b and t2.a > 1", + "Result": [ + "HashLeftJoin_6 26666666.67 root CARTESIAN left outer join", + "├─TableReader_8 10000.00 root data:TableScan_7", + "│ └─TableScan_7 10000.00 cop table:t1, range:[-inf,+inf], keep order:false, stats:pseudo", + "└─TableReader_11 2666.67 root data:Selection_10", + " └─Selection_10 2666.67 cop eq(test.t2.a, test.t2.b), gt(test.t2.a, 1)", + " └─TableScan_9 10000.00 cop table:t2, range:[-inf,+inf], keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain select * from t1 left join t2 on true where t1.a = 1 and false", + "Result": [ + "TableDual_8 0.00 root rows:0" + ] + }, + { + "SQL": "explain select * from t1 left join t2 on true where t1.a = 1 and null", + "Result": [ + "TableDual_8 0.00 root rows:0" + ] + }, + { + "SQL": "explain select * from t1 left join t2 on true where t1.a = null", + "Result": [ + "TableDual_8 0.00 root rows:0" + ] + }, + { + "SQL": "explain select * from t1 left join t2 on true where t1.a = 1 and t1.a = 2", + "Result": [ + "TableDual_8 0.00 root rows:0" + ] + }, + { + "SQL": "explain select * from t1 left join t2 on true where t1.a = 1 and t1.a = 1", + "Result": [ + "HashLeftJoin_7 80000.00 root CARTESIAN left outer join", + "├─TableReader_10 10.00 root data:Selection_9", + "│ └─Selection_9 10.00 cop eq(test.t1.a, 1)", + "│ └─TableScan_8 10000.00 cop table:t1, range:[-inf,+inf], keep order:false, stats:pseudo", + "└─TableReader_12 10000.00 root data:TableScan_11", + " └─TableScan_11 10000.00 cop table:t2, range:[-inf,+inf], keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain select * from t1 left join t2 on false", + "Result": [ + "HashLeftJoin_6 80000000.00 root CARTESIAN left outer join", + "├─TableReader_8 10000.00 root data:TableScan_7", + "│ └─TableScan_7 10000.00 cop table:t1, range:[-inf,+inf], keep order:false, stats:pseudo", + "└─TableDual_9 8000.00 root rows:0" + ] + }, + { + "SQL": "explain select * from t1 right join t2 on false", + "Result": [ + "HashRightJoin_6 80000000.00 root CARTESIAN right outer join", + "├─TableDual_7 8000.00 root rows:0", + "└─TableReader_9 10000.00 root data:TableScan_8", + " └─TableScan_8 10000.00 cop table:t2, range:[-inf,+inf], keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain select * from t1 left join t2 on t1.a = 1 and t1.a = 2", + "Result": [ + "HashLeftJoin_6 80000000.00 root CARTESIAN left outer join", + "├─TableReader_8 10000.00 root data:TableScan_7", + "│ └─TableScan_7 10000.00 cop table:t1, range:[-inf,+inf], keep order:false, stats:pseudo", + "└─TableDual_9 8000.00 root rows:0" + ] + }, + { + "SQL": "explain select * from t1 left join t2 on t1.a =1 where t1.a = 2", + "Result": [ + "HashLeftJoin_7 80000.00 root CARTESIAN left outer join", + "├─TableReader_10 10.00 root data:Selection_9", + "│ └─Selection_9 10.00 cop eq(test.t1.a, 2)", + "│ └─TableScan_8 10000.00 cop table:t1, range:[-inf,+inf], keep order:false, stats:pseudo", + "└─TableDual_11 8000.00 root rows:0" + ] + }, + { + "SQL": "explain select * from t1 left join t2 on t2.a = 1 and t2.a = 2", + "Result": [ + "HashLeftJoin_6 10000.00 root CARTESIAN left outer join", + "├─TableReader_8 10000.00 root data:TableScan_7", + "│ └─TableScan_7 10000.00 cop table:t1, range:[-inf,+inf], keep order:false, stats:pseudo", + "└─TableReader_11 0.00 root data:Selection_10", + " └─Selection_10 0.00 cop eq(test.t2.a, 1), eq(test.t2.a, 2)", + " └─TableScan_9 10000.00 cop table:t2, range:[-inf,+inf], keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain select * from t1 left join t2 on t1.a = 1 or (t1.a = 2 and t1.a = 3)", + "Result": [ + "HashLeftJoin_6 100000000.00 root CARTESIAN left outer join, left cond:[or(eq(test.t1.a, 1), 0)]", + "├─TableReader_8 10000.00 root data:TableScan_7", + "│ └─TableScan_7 10000.00 cop table:t1, range:[-inf,+inf], keep order:false, stats:pseudo", + "└─TableReader_10 10000.00 root data:TableScan_9", + " └─TableScan_9 10000.00 cop table:t2, range:[-inf,+inf], keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain select * from t1 left join t2 on true where t1.a = 1 or (t1.a = 2 and t1.a = 3)", + "Result": [ + "HashLeftJoin_7 80000.00 root CARTESIAN left outer join", + "├─TableReader_10 10.00 root data:Selection_9", + "│ └─Selection_9 10.00 cop or(eq(test.t1.a, 1), 0)", + "│ └─TableScan_8 10000.00 cop table:t1, range:[-inf,+inf], keep order:false, stats:pseudo", + "└─TableReader_12 10000.00 root data:TableScan_11", + " └─TableScan_11 10000.00 cop table:t2, range:[-inf,+inf], keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain select * from t1 where t1.b > 1 or t1.b in (select b from t2)", + "Result": [ + "Projection_7 8000.00 root test.t1.id, test.t1.a, test.t1.b", + "└─Selection_8 8000.00 root or(gt(test.t1.b, 1), Column#7)", + " └─HashLeftJoin_9 10000.00 root CARTESIAN left outer semi join, other cond:eq(test.t1.b, test.t2.b)", + " ├─TableReader_11 10000.00 root data:TableScan_10", + " │ └─TableScan_10 10000.00 cop table:t1, range:[-inf,+inf], keep order:false, stats:pseudo", + " └─TableReader_13 10000.00 root data:TableScan_12", + " └─TableScan_12 10000.00 cop table:t2, range:[-inf,+inf], keep order:false, stats:pseudo" + ] + } + ] + } +] diff --git a/expression/util.go b/expression/util.go new file mode 100644 index 0000000..2334145 --- /dev/null +++ b/expression/util.go @@ -0,0 +1,512 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "strconv" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" + "golang.org/x/tools/container/intsets" +) + +// cowExprRef is a copy-on-write slice ref util using in `ColumnSubstitute` +// to reduce unnecessary allocation for Expression arguments array +type cowExprRef struct { + ref []Expression + new []Expression +} + +// Set will allocate new array if changed flag true +func (c *cowExprRef) Set(i int, changed bool, val Expression) { + if c.new != nil { + c.new[i] = val + return + } + if !changed { + return + } + c.new = make([]Expression, len(c.ref)) + copy(c.new, c.ref[:i]) + c.new[i] = val +} + +// Result return the final reference +func (c *cowExprRef) Result() []Expression { + if c.new != nil { + return c.new + } + return c.ref +} + +// Filter the input expressions, append the results to result. +func Filter(result []Expression, input []Expression, filter func(Expression) bool) []Expression { + for _, e := range input { + if filter(e) { + result = append(result, e) + } + } + return result +} + +// FilterOutInPlace do the filtering out in place. +// The remained are the ones who doesn't match the filter, storing in the original slice. +// The filteredOut are the ones match the filter, storing in a new slice. +func FilterOutInPlace(input []Expression, filter func(Expression) bool) (remained, filteredOut []Expression) { + for i := len(input) - 1; i >= 0; i-- { + if filter(input[i]) { + filteredOut = append(filteredOut, input[i]) + input = append(input[:i], input[i+1:]...) + } + } + return input, filteredOut +} + +// ExtractColumns extracts all columns from an expression. +func ExtractColumns(expr Expression) []*Column { + // Pre-allocate a slice to reduce allocation, 8 doesn't have special meaning. + result := make([]*Column, 0, 8) + return extractColumns(result, expr, nil) +} + +// ExtractColumnsFromExpressions is a more efficient version of ExtractColumns for batch operation. +// filter can be nil, or a function to filter the result column. +// It's often observed that the pattern of the caller like this: +// +// cols := ExtractColumns(...) +// for _, col := range cols { +// if xxx(col) {...} +// } +// +// Provide an additional filter argument, this can be done in one step. +// To avoid allocation for cols that not need. +func ExtractColumnsFromExpressions(result []*Column, exprs []Expression, filter func(*Column) bool) []*Column { + for _, expr := range exprs { + result = extractColumns(result, expr, filter) + } + return result +} + +func extractColumns(result []*Column, expr Expression, filter func(*Column) bool) []*Column { + switch v := expr.(type) { + case *Column: + if filter == nil || filter(v) { + result = append(result, v) + } + case *ScalarFunction: + for _, arg := range v.GetArgs() { + result = extractColumns(result, arg, filter) + } + } + return result +} + +// ExtractColumnSet extracts the different values of `UniqueId` for columns in expressions. +func ExtractColumnSet(exprs []Expression) *intsets.Sparse { + set := &intsets.Sparse{} + for _, expr := range exprs { + extractColumnSet(expr, set) + } + return set +} + +func extractColumnSet(expr Expression, set *intsets.Sparse) { + switch v := expr.(type) { + case *Column: + set.Insert(int(v.UniqueID)) + case *ScalarFunction: + for _, arg := range v.GetArgs() { + extractColumnSet(arg, set) + } + } +} + +// ColumnSubstitute substitutes the columns in filter to expressions in select fields. +// e.g. select * from (select b as a from t) k where a < 10 => select * from (select b as a from t where b < 10) k. +func ColumnSubstitute(expr Expression, schema *Schema, newExprs []Expression) Expression { + _, resExpr := ColumnSubstituteImpl(expr, schema, newExprs) + return resExpr +} + +// ColumnSubstituteImpl tries to substitute column expr using newExprs, +// the newFunctionInternal is only called if its child is substituted +func ColumnSubstituteImpl(expr Expression, schema *Schema, newExprs []Expression) (bool, Expression) { + switch v := expr.(type) { + case *Column: + id := schema.ColumnIndex(v) + if id == -1 { + return false, v + } + newExpr := newExprs[id] + return true, newExpr + case *ScalarFunction: + // cowExprRef is a copy-on-write util, args array allocation happens only + // when expr in args is changed + refExprArr := cowExprRef{v.GetArgs(), nil} + substituted := false + for idx, arg := range v.GetArgs() { + changed, newFuncExpr := ColumnSubstituteImpl(arg, schema, newExprs) + refExprArr.Set(idx, changed, newFuncExpr) + if changed { + substituted = true + } + } + if substituted { + return true, NewFunctionInternal(v.GetCtx(), v.FuncName.L, v.RetType, refExprArr.Result()...) + } + } + return false, expr +} + +var oppositeOp = map[string]string{ + ast.LT: ast.GE, + ast.GE: ast.LT, + ast.GT: ast.LE, + ast.LE: ast.GT, + ast.EQ: ast.NE, + ast.NE: ast.EQ, + ast.LogicOr: ast.LogicAnd, + ast.LogicAnd: ast.LogicOr, +} + +func pushNotAcrossArgs(ctx sessionctx.Context, exprs []Expression, not bool) ([]Expression, bool) { + newExprs := make([]Expression, 0, len(exprs)) + flag := false + for _, expr := range exprs { + newExpr, changed := pushNotAcrossExpr(ctx, expr, not) + flag = changed || flag + newExprs = append(newExprs, newExpr) + } + return newExprs, flag +} + +// pushNotAcrossExpr try to eliminate the NOT expr in expression tree. It will records whether there's already NOT pushed. +func pushNotAcrossExpr(ctx sessionctx.Context, expr Expression, not bool) (Expression, bool) { + if f, ok := expr.(*ScalarFunction); ok { + switch f.FuncName.L { + case ast.UnaryNot: + return pushNotAcrossExpr(f.GetCtx(), f.GetArgs()[0], !not) + case ast.LT, ast.GE, ast.GT, ast.LE, ast.EQ, ast.NE: + if not { + return NewFunctionInternal(f.GetCtx(), oppositeOp[f.FuncName.L], f.GetType(), f.GetArgs()...), true + } + newArgs, changed := pushNotAcrossArgs(f.GetCtx(), f.GetArgs(), false) + if !changed { + return f, false + } + return NewFunctionInternal(f.GetCtx(), f.FuncName.L, f.GetType(), newArgs...), true + case ast.LogicAnd, ast.LogicOr: + var ( + newArgs []Expression + changed bool + ) + funcName := f.FuncName.L + if not { + newArgs, _ = pushNotAcrossArgs(f.GetCtx(), f.GetArgs(), true) + funcName = oppositeOp[f.FuncName.L] + changed = true + } else { + newArgs, changed = pushNotAcrossArgs(f.GetCtx(), f.GetArgs(), false) + } + if !changed { + return f, false + } + return NewFunctionInternal(f.GetCtx(), funcName, f.GetType(), newArgs...), true + } + } + if not { + expr = NewFunctionInternal(ctx, ast.UnaryNot, types.NewFieldType(mysql.TypeTiny), expr) + } + return expr, not +} + +// PushDownNot pushes the `not` function down to the expression's arguments. +func PushDownNot(ctx sessionctx.Context, expr Expression) Expression { + newExpr, _ := pushNotAcrossExpr(ctx, expr, false) + return newExpr +} + +// Contains tests if `exprs` contains `e`. +func Contains(exprs []Expression, e Expression) bool { + for _, expr := range exprs { + if e == expr { + return true + } + } + return false +} + +// ExtractFiltersFromDNFs checks whether the cond is DNF. If so, it will get the extracted part and the remained part. +// The original DNF will be replaced by the remained part or just be deleted if remained part is nil. +// And the extracted part will be appended to the end of the orignal slice. +func ExtractFiltersFromDNFs(ctx sessionctx.Context, conditions []Expression) []Expression { + var allExtracted []Expression + for i := len(conditions) - 1; i >= 0; i-- { + if sf, ok := conditions[i].(*ScalarFunction); ok && sf.FuncName.L == ast.LogicOr { + extracted, remained := extractFiltersFromDNF(ctx, sf) + allExtracted = append(allExtracted, extracted...) + if remained == nil { + conditions = append(conditions[:i], conditions[i+1:]...) + } else { + conditions[i] = remained + } + } + } + return append(conditions, allExtracted...) +} + +// extractFiltersFromDNF extracts the same condition that occurs in every DNF item and remove them from dnf leaves. +func extractFiltersFromDNF(ctx sessionctx.Context, dnfFunc *ScalarFunction) ([]Expression, Expression) { + dnfItems := FlattenDNFConditions(dnfFunc) + sc := ctx.GetSessionVars().StmtCtx + codeMap := make(map[string]int) + hashcode2Expr := make(map[string]Expression) + for i, dnfItem := range dnfItems { + innerMap := make(map[string]struct{}) + cnfItems := SplitCNFItems(dnfItem) + for _, cnfItem := range cnfItems { + code := cnfItem.HashCode(sc) + if i == 0 { + codeMap[string(code)] = 1 + hashcode2Expr[string(code)] = cnfItem + } else if _, ok := codeMap[string(code)]; ok { + // We need this check because there may be the case like `select * from t, t1 where (t.a=t1.a and t.a=t1.a) or (something). + // We should make sure that the two `t.a=t1.a` contributes only once. + // TODO: do this out of this function. + if _, ok = innerMap[string(code)]; !ok { + codeMap[string(code)]++ + innerMap[string(code)] = struct{}{} + } + } + } + } + // We should make sure that this item occurs in every DNF item. + for hashcode, cnt := range codeMap { + if cnt < len(dnfItems) { + delete(hashcode2Expr, hashcode) + } + } + if len(hashcode2Expr) == 0 { + return nil, dnfFunc + } + newDNFItems := make([]Expression, 0, len(dnfItems)) + onlyNeedExtracted := false + for _, dnfItem := range dnfItems { + cnfItems := SplitCNFItems(dnfItem) + newCNFItems := make([]Expression, 0, len(cnfItems)) + for _, cnfItem := range cnfItems { + code := cnfItem.HashCode(sc) + _, ok := hashcode2Expr[string(code)] + if !ok { + newCNFItems = append(newCNFItems, cnfItem) + } + } + // If the extracted part is just one leaf of the DNF expression. Then the value of the total DNF expression is + // always the same with the value of the extracted part. + if len(newCNFItems) == 0 { + onlyNeedExtracted = true + break + } + newDNFItems = append(newDNFItems, ComposeCNFCondition(ctx, newCNFItems...)) + } + extractedExpr := make([]Expression, 0, len(hashcode2Expr)) + for _, expr := range hashcode2Expr { + extractedExpr = append(extractedExpr, expr) + } + if onlyNeedExtracted { + return extractedExpr, nil + } + return extractedExpr, ComposeDNFCondition(ctx, newDNFItems...) +} + +// DeriveRelaxedFiltersFromDNF given a DNF expression, derive a relaxed DNF expression which only contains columns +// in specified schema; the derived expression is a superset of original expression, i.e, any tuple satisfying +// the original expression must satisfy the derived expression. Return nil when the derived expression is universal set. +// A running example is: for schema of t1, `(t1.a=1 and t2.a=1) or (t1.a=2 and t2.a=2)` would be derived as +// `t1.a=1 or t1.a=2`, while `t1.a=1 or t2.a=1` would get nil. +func DeriveRelaxedFiltersFromDNF(expr Expression, schema *Schema) Expression { + sf, ok := expr.(*ScalarFunction) + if !ok || sf.FuncName.L != ast.LogicOr { + return nil + } + ctx := sf.GetCtx() + dnfItems := FlattenDNFConditions(sf) + newDNFItems := make([]Expression, 0, len(dnfItems)) + for _, dnfItem := range dnfItems { + cnfItems := SplitCNFItems(dnfItem) + newCNFItems := make([]Expression, 0, len(cnfItems)) + for _, cnfItem := range cnfItems { + if itemSF, ok := cnfItem.(*ScalarFunction); ok && itemSF.FuncName.L == ast.LogicOr { + relaxedCNFItem := DeriveRelaxedFiltersFromDNF(cnfItem, schema) + if relaxedCNFItem != nil { + newCNFItems = append(newCNFItems, relaxedCNFItem) + } + // If relaxed expression for embedded DNF is universal set, just drop this CNF item + continue + } + // This cnfItem must be simple expression now + // If it cannot be fully covered by schema, just drop this CNF item + if ExprFromSchema(cnfItem, schema) { + newCNFItems = append(newCNFItems, cnfItem) + } + } + // If this DNF item involves no column of specified schema, the relaxed expression must be universal set + if len(newCNFItems) == 0 { + return nil + } + newDNFItems = append(newDNFItems, ComposeCNFCondition(ctx, newCNFItems...)) + } + return ComposeDNFCondition(ctx, newDNFItems...) +} + +// GetRowLen gets the length if the func is row, returns 1 if not row. +func GetRowLen(e Expression) int { + if f, ok := e.(*ScalarFunction); ok && f.FuncName.L == ast.RowFunc { + return len(f.GetArgs()) + } + return 1 +} + +// CheckArgsNotMultiColumnRow checks the args are not multi-column row. +func CheckArgsNotMultiColumnRow(args ...Expression) error { + for _, arg := range args { + if GetRowLen(arg) != 1 { + return ErrOperandColumns.GenWithStackByArgs(1) + } + } + return nil +} + +// GetFuncArg gets the argument of the function at idx. +func GetFuncArg(e Expression, idx int) Expression { + if f, ok := e.(*ScalarFunction); ok { + return f.GetArgs()[idx] + } + return nil +} + +// PopRowFirstArg pops the first element and returns the rest of row. +// e.g. After this function (1, 2, 3) becomes (2, 3). +func PopRowFirstArg(ctx sessionctx.Context, e Expression) (ret Expression, err error) { + if f, ok := e.(*ScalarFunction); ok && f.FuncName.L == ast.RowFunc { + args := f.GetArgs() + if len(args) == 2 { + return args[1], nil + } + ret, err = NewFunction(ctx, ast.RowFunc, f.GetType(), args[1:]...) + return ret, err + } + return +} + +// DatumToConstant generates a Constant expression from a Datum. +func DatumToConstant(d types.Datum, tp byte) *Constant { + return &Constant{Value: d, RetType: types.NewFieldType(tp)} +} + +// GetStringFromConstant gets a string value from the Constant expression. +func GetStringFromConstant(ctx sessionctx.Context, value Expression) (string, bool, error) { + con, ok := value.(*Constant) + if !ok { + err := errors.Errorf("Not a Constant expression %+v", value) + return "", true, err + } + str, isNull, err := con.EvalString(ctx, chunk.Row{}) + if err != nil || isNull { + return "", true, err + } + return str, false, nil +} + +// GetIntFromConstant gets an interger value from the Constant expression. +func GetIntFromConstant(ctx sessionctx.Context, value Expression) (int, bool, error) { + str, isNull, err := GetStringFromConstant(ctx, value) + if err != nil || isNull { + return 0, true, err + } + intNum, err := strconv.Atoi(str) + if err != nil { + return 0, true, nil + } + return intNum, false, nil +} + +// BuildNotNullExpr wraps up `not(isnull())` for given expression. +func BuildNotNullExpr(ctx sessionctx.Context, expr Expression) Expression { + isNull := NewFunctionInternal(ctx, ast.IsNull, types.NewFieldType(mysql.TypeTiny), expr) + notNull := NewFunctionInternal(ctx, ast.UnaryNot, types.NewFieldType(mysql.TypeTiny), isNull) + return notNull +} + +// IsMutableEffectsExpr checks if expr contains function which is mutable or has side effects. +func IsMutableEffectsExpr(expr Expression) bool { + switch x := expr.(type) { + case *ScalarFunction: + if _, ok := mutableEffectsFunctions[x.FuncName.L]; ok { + return true + } + for _, arg := range x.GetArgs() { + if IsMutableEffectsExpr(arg) { + return true + } + } + case *Column: + } + return false +} + +// RemoveDupExprs removes identical exprs. Not that if expr contains functions which +// are mutable or have side effects, we cannot remove it even if it has duplicates. +func RemoveDupExprs(ctx sessionctx.Context, exprs []Expression) []Expression { + res := make([]Expression, 0, len(exprs)) + exists := make(map[string]struct{}, len(exprs)) + sc := ctx.GetSessionVars().StmtCtx + for _, expr := range exprs { + key := string(expr.HashCode(sc)) + if _, ok := exists[key]; !ok || IsMutableEffectsExpr(expr) { + res = append(res, expr) + exists[key] = struct{}{} + } + } + return res +} + +// GetUint64FromConstant gets a uint64 from constant expression. +func GetUint64FromConstant(expr Expression) (uint64, bool, bool) { + con, ok := expr.(*Constant) + if !ok { + logutil.BgLogger().Warn("not a constant expression", zap.String("expression", expr.ExplainInfo())) + return 0, false, false + } + dt := con.Value + switch dt.Kind() { + case types.KindNull: + return 0, true, true + case types.KindInt64: + val := dt.GetInt64() + if val < 0 { + return 0, false, false + } + return uint64(val), false, true + case types.KindUint64: + return dt.GetUint64(), false, true + } + return 0, false, false +} diff --git a/expression/util_test.go b/expression/util_test.go new file mode 100644 index 0000000..9132ccb --- /dev/null +++ b/expression/util_test.go @@ -0,0 +1,236 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "testing" + "time" + + "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/codec" + "github.com/pingcap/tidb/util/mock" +) + +var _ = check.Suite(&testUtilSuite{}) + +type testUtilSuite struct { +} + +func newIntFieldType() *types.FieldType { + return &types.FieldType{ + Tp: mysql.TypeLonglong, + Flen: mysql.MaxIntWidth, + Decimal: 0, + Flag: mysql.BinaryFlag, + } +} + +func newStringFieldType() *types.FieldType { + return &types.FieldType{ + Tp: mysql.TypeVarString, + Flen: types.UnspecifiedLength, + } +} + +func (s *testUtilSuite) TestGetUint64FromConstant(c *check.C) { + con := &Constant{ + Value: types.NewDatum(nil), + } + _, isNull, ok := GetUint64FromConstant(con) + c.Assert(ok, check.IsTrue) + c.Assert(isNull, check.IsTrue) + + con = &Constant{ + Value: types.NewIntDatum(-1), + } + _, _, ok = GetUint64FromConstant(con) + c.Assert(ok, check.IsFalse) + + con.Value = types.NewIntDatum(1) + num, isNull, ok := GetUint64FromConstant(con) + c.Assert(ok, check.IsTrue) + c.Assert(isNull, check.IsFalse) + c.Assert(num, check.Equals, uint64(1)) + + con.Value = types.NewUintDatum(1) + num, _, _ = GetUint64FromConstant(con) + c.Assert(num, check.Equals, uint64(1)) +} + +func (s testUtilSuite) TestPopRowFirstArg(c *check.C) { + c1, c2, c3 := &Column{RetType: newIntFieldType()}, &Column{RetType: newIntFieldType()}, &Column{RetType: newIntFieldType()} + f, err := funcs[ast.RowFunc].getFunction(mock.NewContext(), []Expression{c1, c2, c3}) + c.Assert(err, check.IsNil) + fun := &ScalarFunction{Function: f, FuncName: model.NewCIStr(ast.RowFunc), RetType: newIntFieldType()} + fun2, err := PopRowFirstArg(mock.NewContext(), fun) + c.Assert(err, check.IsNil) + c.Assert(len(fun2.(*ScalarFunction).GetArgs()), check.Equals, 2) +} + +func (s testUtilSuite) TestGetStrIntFromConstant(c *check.C) { + col := &Column{} + _, _, err := GetStringFromConstant(mock.NewContext(), col) + c.Assert(err, check.NotNil) + + con := &Constant{RetType: &types.FieldType{Tp: mysql.TypeNull}} + _, isNull, err := GetStringFromConstant(mock.NewContext(), con) + c.Assert(err, check.IsNil) + c.Assert(isNull, check.IsTrue) + + con = &Constant{RetType: newIntFieldType(), Value: types.NewIntDatum(1)} + ret, _, _ := GetStringFromConstant(mock.NewContext(), con) + c.Assert(ret, check.Equals, "1") + + con = &Constant{RetType: &types.FieldType{Tp: mysql.TypeNull}} + _, isNull, _ = GetIntFromConstant(mock.NewContext(), con) + c.Assert(isNull, check.IsTrue) + + con = &Constant{RetType: newStringFieldType(), Value: types.NewStringDatum("abc")} + _, isNull, _ = GetIntFromConstant(mock.NewContext(), con) + c.Assert(isNull, check.IsTrue) + + con = &Constant{RetType: newStringFieldType(), Value: types.NewStringDatum("123")} + num, _, _ := GetIntFromConstant(mock.NewContext(), con) + c.Assert(num, check.Equals, 123) +} + +func (s *testUtilSuite) TestPushDownNot(c *check.C) { + ctx := mock.NewContext() + col := &Column{Index: 1, RetType: types.NewFieldType(mysql.TypeLonglong)} + // !((a=1||a=1)&&a=1) + eqFunc := newFunction(ast.EQ, col, One) + orFunc := newFunction(ast.LogicOr, eqFunc, eqFunc) + andFunc := newFunction(ast.LogicAnd, orFunc, eqFunc) + notFunc := newFunction(ast.UnaryNot, andFunc) + // (a!=1&&a!=1)||a=1 + neFunc := newFunction(ast.NE, col, One) + andFunc2 := newFunction(ast.LogicAnd, neFunc, neFunc) + orFunc2 := newFunction(ast.LogicOr, andFunc2, neFunc) + notFuncCopy := notFunc.Clone() + ret := PushDownNot(ctx, notFunc) + c.Assert(ret.Equal(ctx, orFunc2), check.IsTrue) + c.Assert(notFunc.Equal(ctx, notFuncCopy), check.IsTrue) +} + +func (s *testUtilSuite) TestFilter(c *check.C) { + conditions := []Expression{ + newFunction(ast.EQ, newColumn(0), newColumn(1)), + newFunction(ast.EQ, newColumn(1), newColumn(2)), + newFunction(ast.LogicOr, newLonglong(1), newColumn(0)), + } + result := make([]Expression, 0, 5) + result = Filter(result, conditions, isLogicOrFunction) + c.Assert(result, check.HasLen, 1) +} + +func (s *testUtilSuite) TestFilterOutInPlace(c *check.C) { + conditions := []Expression{ + newFunction(ast.EQ, newColumn(0), newColumn(1)), + newFunction(ast.EQ, newColumn(1), newColumn(2)), + newFunction(ast.LogicOr, newLonglong(1), newColumn(0)), + } + remained, filtered := FilterOutInPlace(conditions, isLogicOrFunction) + c.Assert(len(remained), check.Equals, 2) + c.Assert(remained[0].(*ScalarFunction).FuncName.L, check.Equals, "eq") + c.Assert(remained[1].(*ScalarFunction).FuncName.L, check.Equals, "eq") + c.Assert(len(filtered), check.Equals, 1) + c.Assert(filtered[0].(*ScalarFunction).FuncName.L, check.Equals, "or") +} + +func (s *testUtilSuite) TestHashGroupKey(c *check.C) { + ctx := mock.NewContext() + sc := &stmtctx.StatementContext{TimeZone: time.Local} + eTypes := []types.EvalType{types.ETInt, types.ETReal, types.ETString} + tNames := []string{"int", "real", "string"} + for i := 0; i < len(tNames); i++ { + ft := eType2FieldType(eTypes[i]) + colExpr := &Column{Index: 0, RetType: ft} + input := chunk.New([]*types.FieldType{ft}, 1024, 1024) + fillColumnWithGener(eTypes[i], input, 0, nil) + colBuf := chunk.NewColumn(ft, 1024) + bufs := make([][]byte, 1024) + for j := 0; j < 1024; j++ { + bufs[j] = bufs[j][:0] + } + var err error + err = VecEval(ctx, colExpr, input, colBuf) + if err != nil { + c.Fatal(err) + } + if bufs, err = codec.HashGroupKey(sc, 1024, colBuf, bufs, ft); err != nil { + c.Fatal(err) + } + + var buf []byte + for j := 0; j < input.NumRows(); j++ { + d, err := colExpr.Eval(input.GetRow(j)) + if err != nil { + c.Fatal(err) + } + buf, err = codec.EncodeValue(sc, buf[:0], d) + if err != nil { + c.Fatal(err) + } + c.Assert(string(bufs[j]), check.Equals, string(buf)) + } + } +} + +func isLogicOrFunction(e Expression) bool { + if f, ok := e.(*ScalarFunction); ok { + return f.FuncName.L == ast.LogicOr + } + return false +} + +func BenchmarkExtractColumns(b *testing.B) { + conditions := []Expression{ + newFunction(ast.EQ, newColumn(0), newColumn(1)), + newFunction(ast.EQ, newColumn(1), newColumn(2)), + newFunction(ast.EQ, newColumn(2), newColumn(3)), + newFunction(ast.EQ, newColumn(3), newLonglong(1)), + newFunction(ast.LogicOr, newLonglong(1), newColumn(0)), + } + expr := ComposeCNFCondition(mock.NewContext(), conditions...) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + ExtractColumns(expr) + } + b.ReportAllocs() +} + +func BenchmarkExprFromSchema(b *testing.B) { + conditions := []Expression{ + newFunction(ast.EQ, newColumn(0), newColumn(1)), + newFunction(ast.EQ, newColumn(1), newColumn(2)), + newFunction(ast.EQ, newColumn(2), newColumn(3)), + newFunction(ast.EQ, newColumn(3), newLonglong(1)), + newFunction(ast.LogicOr, newLonglong(1), newColumn(0)), + } + expr := ComposeCNFCondition(mock.NewContext(), conditions...) + schema := &Schema{Columns: ExtractColumns(expr)} + + b.ResetTimer() + for i := 0; i < b.N; i++ { + ExprFromSchema(expr, schema) + } + b.ReportAllocs() +} diff --git a/expression/vectorized.go b/expression/vectorized.go new file mode 100644 index 0000000..cdeb299 --- /dev/null +++ b/expression/vectorized.go @@ -0,0 +1,80 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package expression + +import ( + "github.com/pingcap/errors" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" +) + +func genVecFromConstExpr(ctx sessionctx.Context, expr Expression, targetType types.EvalType, input *chunk.Chunk, result *chunk.Column) error { + n := 1 + if input != nil { + n = input.NumRows() + if n == 0 { + result.Reset(targetType) + return nil + } + } + switch targetType { + case types.ETInt: + v, isNull, err := expr.EvalInt(ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + result.ResizeInt64(n, true) + return nil + } + result.ResizeInt64(n, false) + i64s := result.Int64s() + for i := range i64s { + i64s[i] = v + } + case types.ETReal: + v, isNull, err := expr.EvalReal(ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + result.ResizeFloat64(n, true) + return nil + } + result.ResizeFloat64(n, false) + f64s := result.Float64s() + for i := range f64s { + f64s[i] = v + } + case types.ETString: + result.ReserveString(n) + v, isNull, err := expr.EvalString(ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + for i := 0; i < n; i++ { + result.AppendNull() + } + } else { + for i := 0; i < n; i++ { + result.AppendString(v) + } + } + default: + return errors.Errorf("unsupported Constant type for vectorized evaluation") + } + return nil +} diff --git a/gitcookie.sh b/gitcookie.sh new file mode 100644 index 0000000..816a8e8 --- /dev/null +++ b/gitcookie.sh @@ -0,0 +1,10 @@ +touch ~/.gitcookies +chmod 0600 ~/.gitcookies + +git config --global http.cookiefile ~/.gitcookies + +tr , \\t <<\__END__ >>~/.gitcookies +go.googlesource.com,FALSE,/,TRUE,2147483647,o,git-shenli.pingcap.com=1/rGvVlvFq_x9rxOmXqQe_rfcrjbOk6NSOHIQKhhsfidM +go-review.googlesource.com,FALSE,/,TRUE,2147483647,o,git-shenli.pingcap.com=1/rGvVlvFq_x9rxOmXqQe_rfcrjbOk6NSOHIQKhhsfidM +__END__ + diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..a88f0a5 --- /dev/null +++ b/go.mod @@ -0,0 +1,40 @@ +module github.com/pingcap/tidb + +require ( + github.com/BurntSushi/toml v0.3.1 + github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 + github.com/cznic/parser v0.0.0-20181122101858-d773202d5b1f + github.com/cznic/sortutil v0.0.0-20181122101858-f5f958428db8 + github.com/cznic/strutil v0.0.0-20181122101858-275e90344537 + github.com/cznic/y v0.0.0-20181122101901-b05e8c2e8d7b + github.com/go-sql-driver/mysql v0.0.0-20170715192408-3955978caca4 + github.com/gogo/protobuf v1.3.1 + github.com/golang/protobuf v1.3.4 + github.com/google/btree v1.0.0 + github.com/google/uuid v1.1.1 + github.com/gorilla/mux v1.6.2 + github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7 + github.com/pingcap-incubator/tinykv v0.0.0-20200514052412-e01d729bd45c + github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712 + github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011 + github.com/pingcap/failpoint v0.0.0-20200210140405-f8f9fb234798 + github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 + github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd + github.com/pingcap/tipb v0.0.0-20200212061130-c4d518eb1d60 + github.com/sirupsen/logrus v1.2.0 + github.com/soheilhy/cmux v0.1.4 + github.com/spaolacci/murmur3 v1.1.0 + go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738 + go.uber.org/atomic v1.6.0 + go.uber.org/automaxprocs v1.2.0 + go.uber.org/zap v1.14.0 + golang.org/x/net v0.0.0-20200226121028-0de0cce0169b + golang.org/x/text v0.3.2 + golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb + google.golang.org/grpc v1.25.1 + gopkg.in/natefinch/lumberjack.v2 v2.0.0 +) + +go 1.13 + +replace github.com/pingcap/check => github.com/tiancaiamao/check v0.0.0-20191119042138-8e73d07b629d diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..b38317c --- /dev/null +++ b/go.sum @@ -0,0 +1,478 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Connor1996/badger v1.5.1-0.20200306031920-9bbcbd8ba570/go.mod h1:eDy3lZfjgEs4EC8pePI7y/Qx509ylx/S94y/dimtkxc= +github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/blacktear23/go-proxyprotocol v0.0.0-20180807104634-af7a81e8dd0d h1:rQlvB2AYWme2bIB18r/SipGiMEVJYE9U0z+MGoU/LtQ= +github.com/blacktear23/go-proxyprotocol v0.0.0-20180807104634-af7a81e8dd0d/go.mod h1:VKt7CNAQxpFpSDz3sXyj9hY/GbVsQCr0sB3w59nE7lU= +github.com/brianvoe/gofakeit v3.18.0+incompatible/go.mod h1:kfwdRA90vvNhPutZWfH7WPaDzUjz+CZFqG+rPkOjGOc= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20171208011716-f6d7a1f6fbf3/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/coocood/bbloom v0.0.0-20190830030839-58deb6228d64/go.mod h1:F86k/6c7aDUdwSUevnLpHS/3Q9hzYCE99jGk2xsHnt0= +github.com/coocood/rtutil v0.0.0-20190304133409-c84515f646f2/go.mod h1:7qG7YFnOALvsx6tKTNmQot8d7cGFXM9TidzvRFLWYwM= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7 h1:u9SHYsPQNyt5tgDm3YN7+9dYrpK96E5wFilTFWIDZOM= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142 h1:3jFq2xL4ZajGK4aZY8jz+DAF0FHjI51BXjjSwCzS1Dk= +github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf h1:CAKfRE2YtTUIjjh1bkBtyYFaUT/WmOqsJjgtihT0vMI= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/cznic/golex v0.0.0-20181122101858-9c343928389c h1:G8zTsaqyVfIHpgMFcGgdbhHSFhlNc77rAKkhVbQ9kQg= +github.com/cznic/golex v0.0.0-20181122101858-9c343928389c/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc= +github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso= +github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= +github.com/cznic/parser v0.0.0-20160622100904-31edd927e5b1/go.mod h1:2B43mz36vGZNZEwkWi8ayRSSUXLfjL8OkbzwW4NcPMM= +github.com/cznic/parser v0.0.0-20181122101858-d773202d5b1f h1:DUtr2TvhM9rmiHKVJWoLqDY2+MdxljW9hlaS/oYoi1c= +github.com/cznic/parser v0.0.0-20181122101858-d773202d5b1f/go.mod h1:2B43mz36vGZNZEwkWi8ayRSSUXLfjL8OkbzwW4NcPMM= +github.com/cznic/sortutil v0.0.0-20181122101858-f5f958428db8 h1:LpMLYGyy67BoAFGda1NeOBQwqlv7nUXpm+rIVHGxZZ4= +github.com/cznic/sortutil v0.0.0-20181122101858-f5f958428db8/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ= +github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc= +github.com/cznic/strutil v0.0.0-20181122101858-275e90344537 h1:MZRmHqDBd0vxNwenEbKSQqRVT24d3C05ft8kduSwlqM= +github.com/cznic/strutil v0.0.0-20181122101858-275e90344537/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc= +github.com/cznic/y v0.0.0-20170802143616-045f81c6662a/go.mod h1:1rk5VM7oSnA4vjp+hrLQ3HWHa+Y4yPCa3/CsJrcNnvs= +github.com/cznic/y v0.0.0-20181122101901-b05e8c2e8d7b h1:gvFsf4zJcnW6GRN+HPGTxwuw+7sTwzmoeoBQQCZDEnk= +github.com/cznic/y v0.0.0-20181122101901-b05e8c2e8d7b/go.mod h1:1rk5VM7oSnA4vjp+hrLQ3HWHa+Y4yPCa3/CsJrcNnvs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= +github.com/dgraph-io/ristretto v0.0.0-20191010170704-2ba187ef9534/go.mod h1:edzKIzGvqUCMzhTVWbiTSe75zD9Xxq0GtSBtFmaUTZs= +github.com/dgraph-io/ristretto v0.0.1/go.mod h1:T40EBc7CJke8TkpiYfGGKAeFjSaxuFXhuXRyumBd6RE= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 h1:clC1lXBpe2kTj2VHdaIu9ajZQe4kcEY9j0NsnDDBZ3o= +github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= +github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= +github.com/go-playground/overalls v0.0.0-20180201144345-22ec1a223b7c/go.mod h1:UqxAgEOt89sCiXlrc/ycnx00LVvUO/eS8tMUkWX4R7w= +github.com/go-sql-driver/mysql v0.0.0-20170715192408-3955978caca4 h1:3DFRjZdCDhzvxDf0U6/1qAryzOqD7Y5iAj0DJRRl1bs= +github.com/go-sql-driver/mysql v0.0.0-20170715192408-3955978caca4/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v0.0.0-20180717141946-636bf0302bc9/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v0.0.0-20180814211427-aa810b61a9c7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.4 h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20190930153522-6ce02741cba3/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf/go.mod h1:RpwtwJQFrIEPstU94h88MWPXP2ektJZ8cZ0YntAmXiE= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/context v0.0.0-20160226214623-1ea25387ff6f/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.2.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.12.1 h1:zCy2xE9ablevUOrUZc3Dl72Dt+ya2FNAvC2yLYMHzi4= +github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jeremywohl/flatten v0.0.0-20190921043622-d936035e55cf/go.mod h1:4AmD/VxjWcI5SRB0n6szE2A6s2fsNHDLO0nAlMHgfLQ= +github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= +github.com/juju/ratelimit v1.0.1 h1:+7AIFJVQ0EQgq/K9+0Krm7m530Du7tIz0METWzN0RgY= +github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/montanaflynn/stats v0.0.0-20151014174947-eeaced052adb/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808 h1:pmpDGKLw4n82EtrNiLqB+xSz/JQwFOaZuMALYUHwX5s= +github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/ncw/directio v1.0.4/go.mod h1:CKGdcN7StAaqjT7Qack3lAXeX4pjnyc46YeqZH1yWVY= +github.com/ngaut/log v0.0.0-20180314031856-b8e36e7ba5ac/go.mod h1:ueVCjKQllPmX7uEvCYnZD5b8qjidGf1TCH61arVe4SU= +github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7 h1:7KAv7KMGTTqSmYZtNdcNTgsos+vFzULLwyElndwn+5c= +github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7/go.mod h1:iWMfgwqYW+e8n5lC/jjNEhwcjbRDpl5NT7n2h+4UNcI= +github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef h1:K0Fn+DoFqNqktdZtdV3bPQ/0cuYh2H4rkg0tytX/07k= +github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef/go.mod h1:7WjlapSfwQyo6LNmIvEWzsW1hbBQfpUO4JWnuQRmva8= +github.com/nicksnyder/go-i18n v1.10.0/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2xBGjv0j365Q= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.2 h1:3mYCb7aPxS/RU7TI1y4rkEn1oKmPRjNJLNEXgw7MH2I= +github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.3.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= +github.com/petar/GoLLRB v0.0.0-20190514000832-33fb24c13b99/go.mod h1:HUpKUBZnpzkdx0kD/+Yfuft+uD3zHGtXF/XJB14TUr4= +github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d/go.mod h1:lXfE4PvvTW5xOjO6Mba8zDPyw8M93B6AQ7frTGnMlA8= +github.com/pingcap-incubator/tinykv v0.0.0-20200514052412-e01d729bd45c h1:hHXu6vMCsul2JaC8EvkR/hxHy5QCltQJaLpxLLUE+c0= +github.com/pingcap-incubator/tinykv v0.0.0-20200514052412-e01d729bd45c/go.mod h1:e+sPmTjmI5GuHGv0+sFdz8uvGw2WOrM9wRk0PJWs1VY= +github.com/pingcap/errcode v0.0.0-20180921232412-a1a7271709d9 h1:KH4f4Si9XK6/IW50HtoaiLIFHGkapOM6w83za47UYik= +github.com/pingcap/errcode v0.0.0-20180921232412-a1a7271709d9/go.mod h1:4b2X8xSqxIroj/IZ9MX/VGZhAwc11wB9wRIzHvz6SeM= +github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011 h1:58naV4XMEqm0hl9LcYo6cZoGBGiLtefMQMF/vo3XLgQ= +github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pingcap/failpoint v0.0.0-20191029060244-12f4ac2fd11d h1:F8vp38kTAckN+v8Jlc98uMBvKIzr1a+UhnLyVYn8Q5Q= +github.com/pingcap/failpoint v0.0.0-20191029060244-12f4ac2fd11d/go.mod h1:DNS3Qg7bEDhU6EXNHF+XSv/PGznQaMJ5FWvctpm6pQI= +github.com/pingcap/failpoint v0.0.0-20200210140405-f8f9fb234798 h1:6DMbRqPI1qzQ8N1xc3+nKY8IxSACd9VqQKkRVvbyoIg= +github.com/pingcap/failpoint v0.0.0-20200210140405-f8f9fb234798/go.mod h1:DNS3Qg7bEDhU6EXNHF+XSv/PGznQaMJ5FWvctpm6pQI= +github.com/pingcap/fn v0.0.0-20191016082858-07623b84a47d/go.mod h1:fMRU1BA1y+r89AxUoaAar4JjrhUkVDt0o0Np6V8XbDQ= +github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 h1:surzm05a8C9dN8dIUmo4Be2+pMRb6f55i+UIYrluu2E= +github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw= +github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= +github.com/pingcap/kvproto v0.0.0-20191213111810-93cb7c623c8b/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= +github.com/pingcap/kvproto v0.0.0-20200228095611-2cf9a243b8d5/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= +github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9 h1:AJD9pZYm72vMgPcQDww9rkZ1DnWfl0pXV3BOWlkYIjA= +github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= +github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd h1:CV3VsP3Z02MVtdpTMfEgRJ4T9NGgGTxdHpJerent7rM= +github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= +github.com/pingcap/parser v0.0.0-20200305120128-bde9faa0df84/go.mod h1:9v0Edh8IbgjGYW2ArJr19E+bvL8zKahsFp+ixWeId+4= +github.com/pingcap/pd v1.1.0-beta.0.20200106144140-f5a7aa985497/go.mod h1:cfT/xu4Zz+Tkq95QrLgEBZ9ikRcgzy4alHqqoaTftqI= +github.com/pingcap/sysutil v0.0.0-20191216090214-5f9620d22b3b/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= +github.com/pingcap/sysutil v0.0.0-20200309085538-962fd285f3bb/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= +github.com/pingcap/tidb v1.1.0-beta.0.20200309111804-d8264d47f760/go.mod h1:xyDkd7xcaFdM0VZKmvW27GeAv/BwHzN+5r066YWqpAg= +github.com/pingcap/tidb-tools v3.0.6-0.20191106033616-90632dda3863+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= +github.com/pingcap/tipb v0.0.0-20190428032612-535e1abaa330/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= +github.com/pingcap/tipb v0.0.0-20200212061130-c4d518eb1d60 h1:aJPXrT1u4VfUSGFA2oQVwl4pOXzqe+YI6wed01cjDH4= +github.com/pingcap/tipb v0.0.0-20200212061130-c4d518eb1d60/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 h1:HQagqIiBmr8YXawX/le3+O26N+vPPC1PtjaF3mwnook= +github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/sergi/go-diff v1.0.1-0.20180205163309-da645544ed44 h1:tB9NOR21++IjLyVx3/PCPhWMwqGNCMQEH96A6dMZ/gc= +github.com/sergi/go-diff v1.0.1-0.20180205163309-da645544ed44/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shirou/gopsutil v2.19.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/vfsgen v0.0.0-20181020040650-a97a25d856ca/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d h1:4J9HCZVpvDmj2tiKGSTUnb3Ok/9CEQb9oqu9LHKQQpc= +github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0= +github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2/go.mod h1:2PfKggNGDuadAa0LElHrByyrz4JPZ9fFx6Gs7nx7ZZU= +github.com/tiancaiamao/check v0.0.0-20191119042138-8e73d07b629d h1:TMYOU9yCm2egiuzypRgFlVDw/5LzkJZaTmT26GPyFtE= +github.com/tiancaiamao/check v0.0.0-20191119042138-8e73d07b629d/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6 h1:lYIiVDtZnyTWlNwiAxLj0bbpTcx1BWCFhXjfsvmPdNc= +github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/uber-go/atomic v1.3.2/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= +github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v1.5.0/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/unrolled/render v0.0.0-20171102162132-65450fb6b2d3/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg= +github.com/unrolled/render v0.0.0-20180914162206-b9786414de4d h1:ggUgChAeyge4NZ4QUw6lhHsVymzwSDJOZcE0s2X8S20= +github.com/unrolled/render v0.0.0-20180914162206-b9786414de4d/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/negroni v0.3.0 h1:PaXOb61mWeZJxc1Ji2xJjpVg9QfPo0rrB+lHyBxGNSU= +github.com/urfave/negroni v0.3.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yookoala/realpath v1.0.0/go.mod h1:gJJMA9wuX7AcqLy1+ffPatSCySA1FQ2S8Ya9AIoYBpE= +go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738 h1:lWF4f9Nypl1ZqSb4gLeh/DGvBYVaUYHuiB93teOmwgc= +go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0 h1:OI5t8sDa1Or+q8AeE+yKeB/SDYioSHAgcVljj9JIETY= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/automaxprocs v1.2.0 h1:+RUihKM+nmYUoB9w0D0Ov5TJ2PpFO2FgenTxMJiZBZA= +go.uber.org/automaxprocs v1.2.0/go.mod h1:YfO3fm683kQpzETxlTGZhGIVmXAhaw3gxeBADbpZtnU= +go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.4.0 h1:f3WCSC2KzAcBXGATIxAB1E2XuCpNU255wNKZ505qi3E= +go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.12.0 h1:dySoUQPFBGj6xwjmBzageVL8jGi8uxc6bEmJQjA06bw= +go.uber.org/zap v1.12.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.14.0 h1:/pduUoebOeeJzTDFuoMgC6nRkiasr1sBCIEorly7m4o= +go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 h1:gSbV7h1NRL2G1xTg/owz62CST1oJBmxy4QpMMregXVQ= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191107010934-f79515f33823 h1:akkRBeitX2EZP59KdtKw310CI4WGPCNPyrLbE7WZA8Y= +golang.org/x/tools v0.0.0-20191107010934-f79515f33823/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb h1:iKlO7ROJc6SttHKlxzwGytRtBUqX4VARrNTgP2YLX5M= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181004005441-af9cb2a35e7f/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c h1:hrpEMCZ2O7DR5gC1n2AJGVhrwiEjOi35+jxtIuZpTMo= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/grpc v0.0.0-20180607172857-7a6a684ca69e/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1 h1:q4XQuHFC6I28BKZpo6IYyb3mNO+l7lSOxRuYTCiDfXk= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +gopkg.in/alecthomas/gometalinter.v2 v2.0.12/go.mod h1:NDRytsqEZyolNuAgTzJkZMkSQM7FIKyzVzGhjB/qfYo= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/alecthomas/kingpin.v3-unstable v3.0.0-20180810215634-df19058c872c/go.mod h1:3HH7i1SgMqlzxCcBmUHW657sD4Kvv9sC3HpL3YukzwA= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3 h1:fvjTMHxHEw/mxHbtzPi3JCcKXQRAnQTBRo6YCJSVHKI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sourcegraph.com/sourcegraph/appdash v0.0.0-20180531100431-4c381bd170b4/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= +sourcegraph.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67/go.mod h1:L5q+DGLGOQFpo1snNEkLOJT2d1YTW66rWNzatr3He1k= diff --git a/infoschema/builder.go b/infoschema/builder.go new file mode 100644 index 0000000..fd0391a --- /dev/null +++ b/infoschema/builder.go @@ -0,0 +1,356 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package infoschema + +import ( + "fmt" + "sort" + "strings" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/meta" + "github.com/pingcap/tidb/meta/autoid" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/table/tables" +) + +// Builder builds a new InfoSchema. +type Builder struct { + is *infoSchema + handle *Handle +} + +// ApplyDiff applies SchemaDiff to the new InfoSchema. +// Return the detail updated table IDs that are produced from SchemaDiff and an error. +func (b *Builder) ApplyDiff(m *meta.Meta, diff *model.SchemaDiff) ([]int64, error) { + b.is.schemaMetaVersion = diff.Version + if diff.Type == model.ActionCreateSchema { + return nil, b.applyCreateSchema(m, diff) + } else if diff.Type == model.ActionDropSchema { + tblIDs := b.applyDropSchema(diff.SchemaID) + return tblIDs, nil + } else if diff.Type == model.ActionModifySchemaCharsetAndCollate { + return nil, b.applyModifySchemaCharsetAndCollate(m, diff) + } + roDBInfo, ok := b.is.SchemaByID(diff.SchemaID) + if !ok { + return nil, ErrDatabaseNotExists.GenWithStackByArgs( + fmt.Sprintf("(Schema ID %d)", diff.SchemaID), + ) + } + var oldTableID, newTableID int64 + tblIDs := make([]int64, 0, 2) + switch diff.Type { + case model.ActionCreateTable: + newTableID = diff.TableID + tblIDs = append(tblIDs, newTableID) + case model.ActionDropTable: + oldTableID = diff.TableID + tblIDs = append(tblIDs, oldTableID) + default: + oldTableID = diff.TableID + newTableID = diff.TableID + tblIDs = append(tblIDs, oldTableID) + } + dbInfo := b.copySchemaTables(roDBInfo.Name.L) + b.copySortedTables(oldTableID, newTableID) + + // We try to reuse the old allocator, so the cached auto ID can be reused. + var alloc autoid.Allocator + if tableIDIsValid(oldTableID) { + if oldTableID == newTableID && diff.Type != model.ActionRebaseAutoID { + alloc, _ = b.is.AllocByID(oldTableID) + } + b.applyDropTable(dbInfo, oldTableID) + } + if tableIDIsValid(newTableID) { + // All types except DropTableOrView. + err := b.applyCreateTable(m, dbInfo, newTableID, alloc, diff.Type) + if err != nil { + return nil, errors.Trace(err) + } + } + return tblIDs, nil +} + +// copySortedTables copies sortedTables for old table and new table for later modification. +func (b *Builder) copySortedTables(oldTableID, newTableID int64) { + if tableIDIsValid(oldTableID) { + b.copySortedTablesBucket(tableBucketIdx(oldTableID)) + } + if tableIDIsValid(newTableID) && newTableID != oldTableID { + b.copySortedTablesBucket(tableBucketIdx(newTableID)) + } +} + +func (b *Builder) applyCreateSchema(m *meta.Meta, diff *model.SchemaDiff) error { + di, err := m.GetDatabase(diff.SchemaID) + if err != nil { + return errors.Trace(err) + } + if di == nil { + // When we apply an old schema diff, the database may has been dropped already, so we need to fall back to + // full load. + return ErrDatabaseNotExists.GenWithStackByArgs( + fmt.Sprintf("(Schema ID %d)", diff.SchemaID), + ) + } + b.is.schemaMap[di.Name.L] = &schemaTables{dbInfo: di, tables: make(map[string]table.Table)} + return nil +} + +func (b *Builder) applyModifySchemaCharsetAndCollate(m *meta.Meta, diff *model.SchemaDiff) error { + di, err := m.GetDatabase(diff.SchemaID) + if err != nil { + return errors.Trace(err) + } + if di == nil { + // This should never happen. + return ErrDatabaseNotExists.GenWithStackByArgs( + fmt.Sprintf("(Schema ID %d)", diff.SchemaID), + ) + } + newDbInfo := b.copySchemaTables(di.Name.O) + newDbInfo.Charset = di.Charset + newDbInfo.Collate = di.Collate + return nil +} + +func (b *Builder) applyDropSchema(schemaID int64) []int64 { + di, ok := b.is.SchemaByID(schemaID) + if !ok { + return nil + } + delete(b.is.schemaMap, di.Name.L) + + // Copy the sortedTables that contain the table we are going to drop. + tableIDs := make([]int64, 0, len(di.Tables)) + bucketIdxMap := make(map[int]struct{}) + for _, tbl := range di.Tables { + bucketIdxMap[tableBucketIdx(tbl.ID)] = struct{}{} + // TODO: If the table ID doesn't exist. + tableIDs = append(tableIDs, tbl.ID) + } + for bucketIdx := range bucketIdxMap { + b.copySortedTablesBucket(bucketIdx) + } + + di = di.Clone() + for _, id := range tableIDs { + b.applyDropTable(di, id) + } + return tableIDs +} + +func (b *Builder) copySortedTablesBucket(bucketIdx int) { + oldSortedTables := b.is.sortedTablesBuckets[bucketIdx] + newSortedTables := make(sortedTables, len(oldSortedTables)) + copy(newSortedTables, oldSortedTables) + b.is.sortedTablesBuckets[bucketIdx] = newSortedTables +} + +func (b *Builder) applyCreateTable(m *meta.Meta, dbInfo *model.DBInfo, tableID int64, alloc autoid.Allocator, tp model.ActionType) error { + tblInfo, err := m.GetTable(dbInfo.ID, tableID) + if err != nil { + return errors.Trace(err) + } + if tblInfo == nil { + // When we apply an old schema diff, the table may has been dropped already, so we need to fall back to + // full load. + return ErrTableNotExists.GenWithStackByArgs( + fmt.Sprintf("(Schema ID %d)", dbInfo.ID), + fmt.Sprintf("(Table ID %d)", tableID), + ) + } + + ConvertCharsetCollateToLowerCaseIfNeed(tblInfo) + + if alloc == nil { + schemaID := dbInfo.ID + alloc = autoid.NewAllocator(b.handle.store, tblInfo.GetDBID(schemaID), tblInfo.IsAutoIncColUnsigned()) + } + tbl, err := tables.TableFromMeta(alloc, tblInfo) + if err != nil { + return errors.Trace(err) + } + tableNames := b.is.schemaMap[dbInfo.Name.L] + tableNames.tables[tblInfo.Name.L] = tbl + bucketIdx := tableBucketIdx(tableID) + sortedTbls := b.is.sortedTablesBuckets[bucketIdx] + sortedTbls = append(sortedTbls, tbl) + sort.Sort(sortedTbls) + b.is.sortedTablesBuckets[bucketIdx] = sortedTbls + + newTbl, ok := b.is.TableByID(tableID) + if ok { + dbInfo.Tables = append(dbInfo.Tables, newTbl.Meta()) + } + return nil +} + +// ConvertCharsetCollateToLowerCaseIfNeed convert the charset / collation of table and its columns to lower case, +// if the table's version is prior to TableInfoVersion3. +func ConvertCharsetCollateToLowerCaseIfNeed(tbInfo *model.TableInfo) { + if tbInfo.Version >= model.TableInfoVersion3 { + return + } + tbInfo.Charset = strings.ToLower(tbInfo.Charset) + tbInfo.Collate = strings.ToLower(tbInfo.Collate) + for _, col := range tbInfo.Columns { + col.Charset = strings.ToLower(col.Charset) + col.Collate = strings.ToLower(col.Collate) + } +} + +func (b *Builder) applyDropTable(dbInfo *model.DBInfo, tableID int64) { + bucketIdx := tableBucketIdx(tableID) + sortedTbls := b.is.sortedTablesBuckets[bucketIdx] + idx := sortedTbls.searchTable(tableID) + if idx == -1 { + return + } + if tableNames, ok := b.is.schemaMap[dbInfo.Name.L]; ok { + delete(tableNames.tables, sortedTbls[idx].Meta().Name.L) + } + // Remove the table in sorted table slice. + b.is.sortedTablesBuckets[bucketIdx] = append(sortedTbls[0:idx], sortedTbls[idx+1:]...) + + // The old DBInfo still holds a reference to old table info, we need to remove it. + for i, tblInfo := range dbInfo.Tables { + if tblInfo.ID == tableID { + if i == len(dbInfo.Tables)-1 { + dbInfo.Tables = dbInfo.Tables[:i] + } else { + dbInfo.Tables = append(dbInfo.Tables[:i], dbInfo.Tables[i+1:]...) + } + break + } + } +} + +// InitWithOldInfoSchema initializes an empty new InfoSchema by copies all the data from old InfoSchema. +func (b *Builder) InitWithOldInfoSchema() *Builder { + oldIS := b.handle.Get().(*infoSchema) + b.is.schemaMetaVersion = oldIS.schemaMetaVersion + b.copySchemasMap(oldIS) + copy(b.is.sortedTablesBuckets, oldIS.sortedTablesBuckets) + return b +} + +func (b *Builder) copySchemasMap(oldIS *infoSchema) { + for k, v := range oldIS.schemaMap { + b.is.schemaMap[k] = v + } +} + +// copySchemaTables creates a new schemaTables instance when a table in the database has changed. +// It also does modifications on the new one because old schemaTables must be read-only. +func (b *Builder) copySchemaTables(dbName string) *model.DBInfo { + oldSchemaTables := b.is.schemaMap[dbName] + newSchemaTables := &schemaTables{ + dbInfo: oldSchemaTables.dbInfo.Copy(), + tables: make(map[string]table.Table, len(oldSchemaTables.tables)), + } + for k, v := range oldSchemaTables.tables { + newSchemaTables.tables[k] = v + } + b.is.schemaMap[dbName] = newSchemaTables + return newSchemaTables.dbInfo +} + +// InitWithDBInfos initializes an empty new InfoSchema with a slice of DBInfo and schema version. +func (b *Builder) InitWithDBInfos(dbInfos []*model.DBInfo, schemaVersion int64) (*Builder, error) { + info := b.is + info.schemaMetaVersion = schemaVersion + for _, di := range dbInfos { + err := b.createSchemaTablesForDB(di, tables.TableFromMeta) + if err != nil { + return nil, errors.Trace(err) + } + } + + // Initialize virtual tables. + for _, driver := range drivers { + err := b.createSchemaTablesForDB(driver.DBInfo, driver.TableFromMeta) + if err != nil { + return nil, errors.Trace(err) + } + } + + // Sort all tables by `ID` + for _, v := range info.sortedTablesBuckets { + sort.Sort(v) + } + return b, nil +} + +type tableFromMetaFunc func(alloc autoid.Allocator, tblInfo *model.TableInfo) (table.Table, error) + +func (b *Builder) createSchemaTablesForDB(di *model.DBInfo, tableFromMeta tableFromMetaFunc) error { + schTbls := &schemaTables{ + dbInfo: di, + tables: make(map[string]table.Table, len(di.Tables)), + } + b.is.schemaMap[di.Name.L] = schTbls + for _, t := range di.Tables { + schemaID := di.ID + alloc := autoid.NewAllocator(b.handle.store, t.GetDBID(schemaID), t.IsAutoIncColUnsigned()) + var tbl table.Table + tbl, err := tableFromMeta(alloc, t) + if err != nil { + return errors.Trace(err) + } + schTbls.tables[t.Name.L] = tbl + sortedTbls := b.is.sortedTablesBuckets[tableBucketIdx(t.ID)] + b.is.sortedTablesBuckets[tableBucketIdx(t.ID)] = append(sortedTbls, tbl) + } + return nil +} + +type virtualTableDriver struct { + *model.DBInfo + TableFromMeta func(alloc autoid.Allocator, tblInfo *model.TableInfo) (table.Table, error) +} + +var drivers []*virtualTableDriver + +// RegisterVirtualTable register virtual tables to the builder. +func RegisterVirtualTable(dbInfo *model.DBInfo, tableFromMeta tableFromMetaFunc) { + drivers = append(drivers, &virtualTableDriver{dbInfo, tableFromMeta}) +} + +// Build sets new InfoSchema to the handle in the Builder. +func (b *Builder) Build() { + b.handle.value.Store(b.is) +} + +// NewBuilder creates a new Builder with a Handle. +func NewBuilder(handle *Handle) *Builder { + b := new(Builder) + b.handle = handle + b.is = &infoSchema{ + schemaMap: map[string]*schemaTables{}, + sortedTablesBuckets: make([]sortedTables, bucketCount), + } + return b +} + +func tableBucketIdx(tableID int64) int { + return int(tableID % bucketCount) +} + +func tableIDIsValid(tableID int64) bool { + return tableID != 0 +} diff --git a/infoschema/infoschema.go b/infoschema/infoschema.go new file mode 100644 index 0000000..224756d --- /dev/null +++ b/infoschema/infoschema.go @@ -0,0 +1,364 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package infoschema + +import ( + "fmt" + "sort" + "sync/atomic" + + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta/autoid" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/util" +) + +var ( + // ErrDatabaseExists returns for database already exists. + ErrDatabaseExists = terror.ClassSchema.New(mysql.ErrDBCreateExists, mysql.MySQLErrName[mysql.ErrDBCreateExists]) + // ErrDatabaseDropExists returns for dropping a non-existent database. + ErrDatabaseDropExists = terror.ClassSchema.New(mysql.ErrDBDropExists, mysql.MySQLErrName[mysql.ErrDBDropExists]) + // ErrDatabaseNotExists returns for database not exists. + ErrDatabaseNotExists = terror.ClassSchema.New(mysql.ErrBadDB, mysql.MySQLErrName[mysql.ErrBadDB]) + // ErrTableExists returns for table already exists. + ErrTableExists = terror.ClassSchema.New(mysql.ErrTableExists, mysql.MySQLErrName[mysql.ErrTableExists]) + // ErrTableDropExists returns for dropping a non-existent table. + ErrTableDropExists = terror.ClassSchema.New(mysql.ErrBadTable, mysql.MySQLErrName[mysql.ErrBadTable]) + // ErrColumnNotExists returns for column not exists. + ErrColumnNotExists = terror.ClassSchema.New(mysql.ErrBadField, mysql.MySQLErrName[mysql.ErrBadField]) + // ErrColumnExists returns for column already exists. + ErrColumnExists = terror.ClassSchema.New(mysql.ErrDupFieldName, mysql.MySQLErrName[mysql.ErrDupFieldName]) + // ErrMultiplePriKey returns for multiple primary keys. + ErrMultiplePriKey = terror.ClassSchema.New(mysql.ErrMultiplePriKey, mysql.MySQLErrName[mysql.ErrMultiplePriKey]) + // ErrTooManyKeyParts returns for too many key parts. + ErrTooManyKeyParts = terror.ClassSchema.New(mysql.ErrTooManyKeyParts, mysql.MySQLErrName[mysql.ErrTooManyKeyParts]) + // ErrTableNotExists returns for table not exists. + ErrTableNotExists = terror.ClassSchema.New(mysql.ErrNoSuchTable, mysql.MySQLErrName[mysql.ErrNoSuchTable]) +) + +// InfoSchema is the interface used to retrieve the schema information. +// It works as a in memory cache and doesn't handle any schema change. +// InfoSchema is read-only, and the returned value is a copy. +// TODO: add more methods to retrieve tables and columns. +type InfoSchema interface { + SchemaByName(schema model.CIStr) (*model.DBInfo, bool) + SchemaExists(schema model.CIStr) bool + TableByName(schema, table model.CIStr) (table.Table, error) + TableExists(schema, table model.CIStr) bool + SchemaByID(id int64) (*model.DBInfo, bool) + SchemaByTable(tableInfo *model.TableInfo) (*model.DBInfo, bool) + TableByID(id int64) (table.Table, bool) + AllocByID(id int64) (autoid.Allocator, bool) + AllSchemaNames() []string + AllSchemas() []*model.DBInfo + Clone() (result []*model.DBInfo) + SchemaTables(schema model.CIStr) []table.Table + SchemaMetaVersion() int64 +} + +// Information Schema Name. +const ( + Name = util.InformationSchemaName + LowerName = util.InformationSchemaLowerName +) + +type sortedTables []table.Table + +func (s sortedTables) Len() int { + return len(s) +} + +func (s sortedTables) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s sortedTables) Less(i, j int) bool { + return s[i].Meta().ID < s[j].Meta().ID +} + +func (s sortedTables) searchTable(id int64) int { + idx := sort.Search(len(s), func(i int) bool { + return s[i].Meta().ID >= id + }) + if idx == len(s) || s[idx].Meta().ID != id { + return -1 + } + return idx +} + +type schemaTables struct { + dbInfo *model.DBInfo + tables map[string]table.Table +} + +const bucketCount = 512 + +type infoSchema struct { + schemaMap map[string]*schemaTables + + // sortedTablesBuckets is a slice of sortedTables, a table's bucket index is (tableID % bucketCount). + sortedTablesBuckets []sortedTables + + // schemaMetaVersion is the version of schema, and we should check version when change schema. + schemaMetaVersion int64 +} + +// MockInfoSchema only serves for test. +func MockInfoSchema(tbList []*model.TableInfo) InfoSchema { + result := &infoSchema{} + result.schemaMap = make(map[string]*schemaTables) + result.sortedTablesBuckets = make([]sortedTables, bucketCount) + dbInfo := &model.DBInfo{ID: 0, Name: model.NewCIStr("test"), Tables: tbList} + tableNames := &schemaTables{ + dbInfo: dbInfo, + tables: make(map[string]table.Table), + } + result.schemaMap["test"] = tableNames + for _, tb := range tbList { + tbl := table.MockTableFromMeta(tb) + tableNames.tables[tb.Name.L] = tbl + bucketIdx := tableBucketIdx(tb.ID) + result.sortedTablesBuckets[bucketIdx] = append(result.sortedTablesBuckets[bucketIdx], tbl) + } + for i := range result.sortedTablesBuckets { + sort.Sort(result.sortedTablesBuckets[i]) + } + return result +} + +var _ InfoSchema = (*infoSchema)(nil) + +func (is *infoSchema) SchemaByName(schema model.CIStr) (val *model.DBInfo, ok bool) { + tableNames, ok := is.schemaMap[schema.L] + if !ok { + return + } + return tableNames.dbInfo, true +} + +func (is *infoSchema) SchemaMetaVersion() int64 { + return is.schemaMetaVersion +} + +func (is *infoSchema) SchemaExists(schema model.CIStr) bool { + _, ok := is.schemaMap[schema.L] + return ok +} + +func (is *infoSchema) TableByName(schema, table model.CIStr) (t table.Table, err error) { + if tbNames, ok := is.schemaMap[schema.L]; ok { + if t, ok = tbNames.tables[table.L]; ok { + return + } + } + return nil, ErrTableNotExists.GenWithStackByArgs(schema, table) +} + +func (is *infoSchema) TableExists(schema, table model.CIStr) bool { + if tbNames, ok := is.schemaMap[schema.L]; ok { + if _, ok = tbNames.tables[table.L]; ok { + return true + } + } + return false +} + +func (is *infoSchema) SchemaByID(id int64) (val *model.DBInfo, ok bool) { + for _, v := range is.schemaMap { + if v.dbInfo.ID == id { + return v.dbInfo, true + } + } + return nil, false +} + +func (is *infoSchema) SchemaByTable(tableInfo *model.TableInfo) (val *model.DBInfo, ok bool) { + if tableInfo == nil { + return nil, false + } + for _, v := range is.schemaMap { + if tbl, ok := v.tables[tableInfo.Name.L]; ok { + if tbl.Meta().ID == tableInfo.ID { + return v.dbInfo, true + } + } + } + return nil, false +} + +func (is *infoSchema) TableByID(id int64) (val table.Table, ok bool) { + slice := is.sortedTablesBuckets[tableBucketIdx(id)] + idx := slice.searchTable(id) + if idx == -1 { + return nil, false + } + return slice[idx], true +} + +func (is *infoSchema) AllocByID(id int64) (autoid.Allocator, bool) { + tbl, ok := is.TableByID(id) + if !ok { + return nil, false + } + return tbl.Allocator(nil), true +} + +func (is *infoSchema) AllSchemaNames() (names []string) { + for _, v := range is.schemaMap { + names = append(names, v.dbInfo.Name.O) + } + return +} + +func (is *infoSchema) AllSchemas() (schemas []*model.DBInfo) { + for _, v := range is.schemaMap { + schemas = append(schemas, v.dbInfo) + } + return +} + +func (is *infoSchema) SchemaTables(schema model.CIStr) (tables []table.Table) { + schemaTables, ok := is.schemaMap[schema.L] + if !ok { + return + } + for _, tbl := range schemaTables.tables { + tables = append(tables, tbl) + } + return +} + +func (is *infoSchema) Clone() (result []*model.DBInfo) { + for _, v := range is.schemaMap { + result = append(result, v.dbInfo.Clone()) + } + return +} + +// Handle handles information schema, including getting and setting. +type Handle struct { + value atomic.Value + store kv.Storage +} + +// NewHandle creates a new Handle. +func NewHandle(store kv.Storage) *Handle { + h := &Handle{ + store: store, + } + return h +} + +// Get gets information schema from Handle. +func (h *Handle) Get() InfoSchema { + v := h.value.Load() + schema, _ := v.(InfoSchema) + return schema +} + +// IsValid uses to check whether handle value is valid. +func (h *Handle) IsValid() bool { + return h.value.Load() != nil +} + +// EmptyClone creates a new Handle with the same store and memSchema, but the value is not set. +func (h *Handle) EmptyClone() *Handle { + newHandle := &Handle{ + store: h.store, + } + return newHandle +} + +func init() { + schemaMySQLErrCodes := map[terror.ErrCode]uint16{ + mysql.ErrDBCreateExists: mysql.ErrDBCreateExists, + mysql.ErrDBDropExists: mysql.ErrDBDropExists, + mysql.ErrAccessDenied: mysql.ErrAccessDenied, + mysql.ErrBadDB: mysql.ErrBadDB, + mysql.ErrTableExists: mysql.ErrTableExists, + mysql.ErrBadTable: mysql.ErrBadTable, + mysql.ErrBadField: mysql.ErrBadField, + mysql.ErrDupFieldName: mysql.ErrDupFieldName, + mysql.ErrDupKeyName: mysql.ErrDupKeyName, + mysql.ErrNonuniqTable: mysql.ErrNonuniqTable, + mysql.ErrMultiplePriKey: mysql.ErrMultiplePriKey, + mysql.ErrTooManyKeyParts: mysql.ErrTooManyKeyParts, + mysql.ErrCantDropFieldOrKey: mysql.ErrCantDropFieldOrKey, + mysql.ErrTableNotLockedForWrite: mysql.ErrTableNotLockedForWrite, + mysql.ErrTableNotLocked: mysql.ErrTableNotLocked, + mysql.ErrNoSuchTable: mysql.ErrNoSuchTable, + mysql.ErrKeyDoesNotExist: mysql.ErrKeyDoesNotExist, + mysql.ErrCannotAddForeign: mysql.ErrCannotAddForeign, + mysql.ErrWrongFkDef: mysql.ErrWrongFkDef, + mysql.ErrDupIndex: mysql.ErrDupIndex, + mysql.ErrBadUser: mysql.ErrBadUser, + mysql.ErrUserAlreadyExists: mysql.ErrUserAlreadyExists, + mysql.ErrTableLocked: mysql.ErrTableLocked, + } + terror.ErrClassToMySQLCodes[terror.ClassSchema] = schemaMySQLErrCodes + + // Initialize the information shema database and register the driver to `drivers` + dbID := autoid.InformationSchemaDBID + infoSchemaTables := make([]*model.TableInfo, 0, len(tableNameToColumns)) + for name, cols := range tableNameToColumns { + tableInfo := buildTableMeta(name, cols) + infoSchemaTables = append(infoSchemaTables, tableInfo) + var ok bool + tableInfo.ID, ok = tableIDMap[tableInfo.Name.O] + if !ok { + panic(fmt.Sprintf("get information_schema table id failed, unknown system table `%v`", tableInfo.Name.O)) + } + for i, c := range tableInfo.Columns { + c.ID = int64(i) + 1 + } + } + infoSchemaDB := &model.DBInfo{ + ID: dbID, + Name: model.NewCIStr(Name), + Charset: mysql.DefaultCharset, + Collate: mysql.DefaultCollationName, + Tables: infoSchemaTables, + } + RegisterVirtualTable(infoSchemaDB, createInfoSchemaTable) +} + +// IsMemoryDB checks if the db is in memory. +func IsMemoryDB(dbName string) bool { + for _, driver := range drivers { + if driver.DBInfo.Name.L == dbName { + return true + } + } + return false +} + +// HasAutoIncrementColumn checks whether the table has auto_increment columns, if so, return true and the column name. +func HasAutoIncrementColumn(tbInfo *model.TableInfo) (bool, string) { + for _, col := range tbInfo.Columns { + if mysql.HasAutoIncrementFlag(col.Flag) { + return true, col.Name.L + } + } + return false, "" +} + +// GetInfoSchema gets TxnCtx InfoSchema. +func GetInfoSchema(ctx sessionctx.Context) InfoSchema { + sessVar := ctx.GetSessionVars() + return sessVar.TxnCtx.InfoSchema.(InfoSchema) +} diff --git a/infoschema/infoschema_test.go b/infoschema/infoschema_test.go new file mode 100644 index 0000000..d5252da --- /dev/null +++ b/infoschema/infoschema_test.go @@ -0,0 +1,262 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package infoschema_test + +import ( + "sync" + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/testleak" + "github.com/pingcap/tidb/util/testutil" +) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + TestingT(t) +} + +var _ = Suite(&testSuite{}) + +type testSuite struct { +} + +func (*testSuite) TestT(c *C) { + defer testleak.AfterTest(c)() + store, err := mockstore.NewMockTikvStore() + c.Assert(err, IsNil) + defer store.Close() + // Make sure it calls perfschema.Init(). + dom, err := session.BootstrapSession(store) + c.Assert(err, IsNil) + defer dom.Close() + + handle := infoschema.NewHandle(store) + dbName := model.NewCIStr("Test") + tbName := model.NewCIStr("T") + colName := model.NewCIStr("A") + idxName := model.NewCIStr("idx") + noexist := model.NewCIStr("noexist") + + colID, err := genGlobalID(store) + c.Assert(err, IsNil) + colInfo := &model.ColumnInfo{ + ID: colID, + Name: colName, + Offset: 0, + FieldType: *types.NewFieldType(mysql.TypeLonglong), + State: model.StatePublic, + } + + idxInfo := &model.IndexInfo{ + Name: idxName, + Table: tbName, + Columns: []*model.IndexColumn{ + { + Name: colName, + Offset: 0, + Length: 10, + }, + }, + Unique: true, + Primary: true, + State: model.StatePublic, + } + + tbID, err := genGlobalID(store) + c.Assert(err, IsNil) + tblInfo := &model.TableInfo{ + ID: tbID, + Name: tbName, + Columns: []*model.ColumnInfo{colInfo}, + Indices: []*model.IndexInfo{idxInfo}, + State: model.StatePublic, + } + + dbID, err := genGlobalID(store) + c.Assert(err, IsNil) + dbInfo := &model.DBInfo{ + ID: dbID, + Name: dbName, + Tables: []*model.TableInfo{tblInfo}, + State: model.StatePublic, + } + + dbInfos := []*model.DBInfo{dbInfo} + err = kv.RunInNewTxn(store, true, func(txn kv.Transaction) error { + meta.NewMeta(txn).CreateDatabase(dbInfo) + return errors.Trace(err) + }) + c.Assert(err, IsNil) + + builder, err := infoschema.NewBuilder(handle).InitWithDBInfos(dbInfos, 1) + c.Assert(err, IsNil) + + txn, err := store.Begin() + c.Assert(err, IsNil) + checkApplyCreateNonExistsSchemaDoesNotPanic(c, txn, builder) + checkApplyCreateNonExistsTableDoesNotPanic(c, txn, builder, dbID) + txn.Rollback() + + builder.Build() + is := handle.Get() + + schemaNames := is.AllSchemaNames() + c.Assert(schemaNames, HasLen, 2) + c.Assert(testutil.CompareUnorderedStringSlice(schemaNames, []string{infoschema.Name, "Test"}), IsTrue) + + schemas := is.AllSchemas() + c.Assert(schemas, HasLen, 2) + schemas = is.Clone() + c.Assert(schemas, HasLen, 2) + + c.Assert(is.SchemaExists(dbName), IsTrue) + c.Assert(is.SchemaExists(noexist), IsFalse) + + schema, ok := is.SchemaByID(dbID) + c.Assert(ok, IsTrue) + c.Assert(schema, NotNil) + + schema, ok = is.SchemaByID(tbID) + c.Assert(ok, IsFalse) + c.Assert(schema, IsNil) + + schema, ok = is.SchemaByName(dbName) + c.Assert(ok, IsTrue) + c.Assert(schema, NotNil) + + schema, ok = is.SchemaByName(noexist) + c.Assert(ok, IsFalse) + c.Assert(schema, IsNil) + + schema, ok = is.SchemaByTable(tblInfo) + c.Assert(ok, IsTrue) + c.Assert(schema, NotNil) + + noexistTblInfo := &model.TableInfo{ID: 12345, Name: tblInfo.Name} + schema, ok = is.SchemaByTable(noexistTblInfo) + c.Assert(ok, IsFalse) + c.Assert(schema, IsNil) + + c.Assert(is.TableExists(dbName, tbName), IsTrue) + c.Assert(is.TableExists(dbName, noexist), IsFalse) + + tb, ok := is.TableByID(tbID) + c.Assert(ok, IsTrue) + c.Assert(tb, NotNil) + + tb, ok = is.TableByID(dbID) + c.Assert(ok, IsFalse) + c.Assert(tb, IsNil) + + alloc, ok := is.AllocByID(tbID) + c.Assert(ok, IsTrue) + c.Assert(alloc, NotNil) + + tb, err = is.TableByName(dbName, tbName) + c.Assert(err, IsNil) + c.Assert(tb, NotNil) + + _, err = is.TableByName(dbName, noexist) + c.Assert(err, NotNil) + + tbs := is.SchemaTables(dbName) + c.Assert(tbs, HasLen, 1) + + tbs = is.SchemaTables(noexist) + c.Assert(tbs, HasLen, 0) + + emptyHandle := handle.EmptyClone() + c.Assert(emptyHandle.Get(), IsNil) +} + +func (testSuite) TestMockInfoSchema(c *C) { + tblID := int64(1234) + tblName := model.NewCIStr("tbl_m") + tableInfo := &model.TableInfo{ + ID: tblID, + Name: tblName, + State: model.StatePublic, + } + colInfo := &model.ColumnInfo{ + State: model.StatePublic, + Offset: 0, + Name: model.NewCIStr("h"), + FieldType: *types.NewFieldType(mysql.TypeLong), + ID: 1, + } + tableInfo.Columns = []*model.ColumnInfo{colInfo} + is := infoschema.MockInfoSchema([]*model.TableInfo{tableInfo}) + tbl, ok := is.TableByID(tblID) + c.Assert(ok, IsTrue) + c.Assert(tbl.Meta().Name, Equals, tblName) + c.Assert(tbl.Cols()[0].ColumnInfo, Equals, colInfo) +} + +func checkApplyCreateNonExistsSchemaDoesNotPanic(c *C, txn kv.Transaction, builder *infoschema.Builder) { + m := meta.NewMeta(txn) + _, err := builder.ApplyDiff(m, &model.SchemaDiff{Type: model.ActionCreateSchema, SchemaID: 999}) + c.Assert(infoschema.ErrDatabaseNotExists.Equal(err), IsTrue) +} + +func checkApplyCreateNonExistsTableDoesNotPanic(c *C, txn kv.Transaction, builder *infoschema.Builder, dbID int64) { + m := meta.NewMeta(txn) + _, err := builder.ApplyDiff(m, &model.SchemaDiff{Type: model.ActionCreateTable, SchemaID: dbID, TableID: 999}) + c.Assert(infoschema.ErrTableNotExists.Equal(err), IsTrue) +} + +// TestConcurrent makes sure it is safe to concurrently create handle on multiple stores. +func (testSuite) TestConcurrent(c *C) { + defer testleak.AfterTest(c)() + storeCount := 5 + stores := make([]kv.Storage, storeCount) + for i := 0; i < storeCount; i++ { + store, err := mockstore.NewMockTikvStore() + c.Assert(err, IsNil) + stores[i] = store + } + defer func() { + for _, store := range stores { + store.Close() + } + }() + var wg sync.WaitGroup + wg.Add(storeCount) + for _, store := range stores { + go func(s kv.Storage) { + defer wg.Done() + _ = infoschema.NewHandle(s) + }(store) + } + wg.Wait() +} + +func genGlobalID(store kv.Storage) (int64, error) { + var globalID int64 + err := kv.RunInNewTxn(store, true, func(txn kv.Transaction) error { + var err error + globalID, err = meta.NewMeta(txn).GenGlobalID() + return errors.Trace(err) + }) + return globalID, errors.Trace(err) +} diff --git a/infoschema/tables.go b/infoschema/tables.go new file mode 100644 index 0000000..9ce6bdf --- /dev/null +++ b/infoschema/tables.go @@ -0,0 +1,1616 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package infoschema + +import ( + "fmt" + "sort" + "strconv" + "sync" + "time" + + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta/autoid" + "github.com/pingcap/tidb/parser/charset" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util" + "github.com/pingcap/tidb/util/sqlexec" +) + +const ( + tableSchemata = "SCHEMATA" + tableTables = "TABLES" + tableColumns = "COLUMNS" + tableColumnStatistics = "COLUMN_STATISTICS" + tableStatistics = "STATISTICS" + tableCharacterSets = "CHARACTER_SETS" + tableCollations = "COLLATIONS" + tableFiles = "FILES" + catalogVal = "def" + tableProfiling = "PROFILING" + tablePartitions = "PARTITIONS" + tableKeyColumm = "KEY_COLUMN_USAGE" + tableReferConst = "REFERENTIAL_CONSTRAINTS" + tableSessionVar = "SESSION_VARIABLES" + tablePlugins = "PLUGINS" + tableConstraints = "TABLE_CONSTRAINTS" + tableTriggers = "TRIGGERS" + tableUserPrivileges = "USER_PRIVILEGES" + tableSchemaPrivileges = "SCHEMA_PRIVILEGES" + tableTablePrivileges = "TABLE_PRIVILEGES" + tableColumnPrivileges = "COLUMN_PRIVILEGES" + tableEngines = "ENGINES" + tableRoutines = "ROUTINES" + tableParameters = "PARAMETERS" + tableEvents = "EVENTS" + tableGlobalStatus = "GLOBAL_STATUS" + tableGlobalVariables = "GLOBAL_VARIABLES" + tableSessionStatus = "SESSION_STATUS" + tableOptimizerTrace = "OPTIMIZER_TRACE" + tableTableSpaces = "TABLESPACES" + tableCollationCharacterSetApplicability = "COLLATION_CHARACTER_SET_APPLICABILITY" +) + +var tableIDMap = map[string]int64{ + tableSchemata: autoid.InformationSchemaDBID + 1, + tableTables: autoid.InformationSchemaDBID + 2, + tableColumns: autoid.InformationSchemaDBID + 3, + tableColumnStatistics: autoid.InformationSchemaDBID + 4, + tableStatistics: autoid.InformationSchemaDBID + 5, + tableCharacterSets: autoid.InformationSchemaDBID + 6, + tableCollations: autoid.InformationSchemaDBID + 7, + tableFiles: autoid.InformationSchemaDBID + 8, + catalogVal: autoid.InformationSchemaDBID + 9, + tableProfiling: autoid.InformationSchemaDBID + 10, + tablePartitions: autoid.InformationSchemaDBID + 11, + tableKeyColumm: autoid.InformationSchemaDBID + 12, + tableReferConst: autoid.InformationSchemaDBID + 13, + tableSessionVar: autoid.InformationSchemaDBID + 14, + tablePlugins: autoid.InformationSchemaDBID + 15, + tableConstraints: autoid.InformationSchemaDBID + 16, + tableTriggers: autoid.InformationSchemaDBID + 17, + tableUserPrivileges: autoid.InformationSchemaDBID + 18, + tableSchemaPrivileges: autoid.InformationSchemaDBID + 19, + tableTablePrivileges: autoid.InformationSchemaDBID + 20, + tableColumnPrivileges: autoid.InformationSchemaDBID + 21, + tableEngines: autoid.InformationSchemaDBID + 22, + tableRoutines: autoid.InformationSchemaDBID + 24, + tableParameters: autoid.InformationSchemaDBID + 25, + tableEvents: autoid.InformationSchemaDBID + 26, + tableGlobalStatus: autoid.InformationSchemaDBID + 27, + tableGlobalVariables: autoid.InformationSchemaDBID + 28, + tableSessionStatus: autoid.InformationSchemaDBID + 29, + tableOptimizerTrace: autoid.InformationSchemaDBID + 30, + tableTableSpaces: autoid.InformationSchemaDBID + 31, + tableCollationCharacterSetApplicability: autoid.InformationSchemaDBID + 32, +} + +type columnInfo struct { + name string + tp byte + size int + flag uint + deflt interface{} + elems []string +} + +func buildColumnInfo(tableName string, col columnInfo) *model.ColumnInfo { + mCharset := charset.CharsetBin + mCollation := charset.CharsetBin + mFlag := mysql.UnsignedFlag + if col.tp == mysql.TypeVarchar || col.tp == mysql.TypeBlob { + mCharset = charset.CharsetUTF8MB4 + mCollation = charset.CollationUTF8MB4 + mFlag = col.flag + } + fieldType := types.FieldType{ + Charset: mCharset, + Collate: mCollation, + Tp: col.tp, + Flen: col.size, + Flag: mFlag, + } + return &model.ColumnInfo{ + Name: model.NewCIStr(col.name), + FieldType: fieldType, + State: model.StatePublic, + } +} + +func buildTableMeta(tableName string, cs []columnInfo) *model.TableInfo { + cols := make([]*model.ColumnInfo, 0, len(cs)) + for _, c := range cs { + cols = append(cols, buildColumnInfo(tableName, c)) + } + for i, col := range cols { + col.Offset = i + } + return &model.TableInfo{ + Name: model.NewCIStr(tableName), + Columns: cols, + State: model.StatePublic, + Charset: mysql.DefaultCharset, + Collate: mysql.DefaultCollationName, + } +} + +var schemataCols = []columnInfo{ + {"CATALOG_NAME", mysql.TypeVarchar, 512, 0, nil, nil}, + {"SCHEMA_NAME", mysql.TypeVarchar, 64, 0, nil, nil}, + {"DEFAULT_CHARACTER_SET_NAME", mysql.TypeVarchar, 64, 0, nil, nil}, + {"DEFAULT_COLLATION_NAME", mysql.TypeVarchar, 32, 0, nil, nil}, + {"SQL_PATH", mysql.TypeVarchar, 512, 0, nil, nil}, +} + +var tablesCols = []columnInfo{ + {"TABLE_CATALOG", mysql.TypeVarchar, 512, 0, nil, nil}, + {"TABLE_SCHEMA", mysql.TypeVarchar, 64, 0, nil, nil}, + {"TABLE_NAME", mysql.TypeVarchar, 64, 0, nil, nil}, + {"TABLE_TYPE", mysql.TypeVarchar, 64, 0, nil, nil}, + {"ENGINE", mysql.TypeVarchar, 64, 0, nil, nil}, + {"VERSION", mysql.TypeLonglong, 21, 0, nil, nil}, + {"ROW_FORMAT", mysql.TypeVarchar, 10, 0, nil, nil}, + {"TABLE_ROWS", mysql.TypeLonglong, 21, 0, nil, nil}, + {"AVG_ROW_LENGTH", mysql.TypeLonglong, 21, 0, nil, nil}, + {"DATA_LENGTH", mysql.TypeLonglong, 21, 0, nil, nil}, + {"MAX_DATA_LENGTH", mysql.TypeLonglong, 21, 0, nil, nil}, + {"INDEX_LENGTH", mysql.TypeLonglong, 21, 0, nil, nil}, + {"DATA_FREE", mysql.TypeLonglong, 21, 0, nil, nil}, + {"AUTO_INCREMENT", mysql.TypeLonglong, 21, 0, nil, nil}, + {"CREATE_TIME", mysql.TypeDatetime, 19, 0, nil, nil}, + {"UPDATE_TIME", mysql.TypeDatetime, 19, 0, nil, nil}, + {"CHECK_TIME", mysql.TypeDatetime, 19, 0, nil, nil}, + {"TABLE_COLLATION", mysql.TypeVarchar, 32, mysql.NotNullFlag, "utf8_bin", nil}, + {"CHECKSUM", mysql.TypeLonglong, 21, 0, nil, nil}, + {"CREATE_OPTIONS", mysql.TypeVarchar, 255, 0, nil, nil}, + {"TABLE_COMMENT", mysql.TypeVarchar, 2048, 0, nil, nil}, + {"TIDB_TABLE_ID", mysql.TypeLonglong, 21, 0, nil, nil}, + {"TIDB_ROW_ID_SHARDING_INFO", mysql.TypeVarchar, 255, 0, nil, nil}, +} + +// See: http://dev.mysql.com/doc/refman/5.7/en/columns-table.html +var columnsCols = []columnInfo{ + {"TABLE_CATALOG", mysql.TypeVarchar, 512, 0, nil, nil}, + {"TABLE_SCHEMA", mysql.TypeVarchar, 64, 0, nil, nil}, + {"TABLE_NAME", mysql.TypeVarchar, 64, 0, nil, nil}, + {"COLUMN_NAME", mysql.TypeVarchar, 64, 0, nil, nil}, + {"ORDINAL_POSITION", mysql.TypeLonglong, 64, 0, nil, nil}, + {"COLUMN_DEFAULT", mysql.TypeBlob, 196606, 0, nil, nil}, + {"IS_NULLABLE", mysql.TypeVarchar, 3, 0, nil, nil}, + {"DATA_TYPE", mysql.TypeVarchar, 64, 0, nil, nil}, + {"CHARACTER_MAXIMUM_LENGTH", mysql.TypeLonglong, 21, 0, nil, nil}, + {"CHARACTER_OCTET_LENGTH", mysql.TypeLonglong, 21, 0, nil, nil}, + {"NUMERIC_PRECISION", mysql.TypeLonglong, 21, 0, nil, nil}, + {"NUMERIC_SCALE", mysql.TypeLonglong, 21, 0, nil, nil}, + {"DATETIME_PRECISION", mysql.TypeLonglong, 21, 0, nil, nil}, + {"CHARACTER_SET_NAME", mysql.TypeVarchar, 32, 0, nil, nil}, + {"COLLATION_NAME", mysql.TypeVarchar, 32, 0, nil, nil}, + {"COLUMN_TYPE", mysql.TypeBlob, 196606, 0, nil, nil}, + {"COLUMN_KEY", mysql.TypeVarchar, 3, 0, nil, nil}, + {"EXTRA", mysql.TypeVarchar, 30, 0, nil, nil}, + {"PRIVILEGES", mysql.TypeVarchar, 80, 0, nil, nil}, + {"COLUMN_COMMENT", mysql.TypeVarchar, 1024, 0, nil, nil}, + {"GENERATION_EXPRESSION", mysql.TypeBlob, 589779, mysql.NotNullFlag, nil, nil}, +} + +var columnStatisticsCols = []columnInfo{ + {"SCHEMA_NAME", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"TABLE_NAME", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"COLUMN_NAME", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"HISTOGRAM", mysql.TypeJSON, 51, 0, nil, nil}, +} + +var statisticsCols = []columnInfo{ + {"TABLE_CATALOG", mysql.TypeVarchar, 512, 0, nil, nil}, + {"TABLE_SCHEMA", mysql.TypeVarchar, 64, 0, nil, nil}, + {"TABLE_NAME", mysql.TypeVarchar, 64, 0, nil, nil}, + {"NON_UNIQUE", mysql.TypeVarchar, 1, 0, nil, nil}, + {"INDEX_SCHEMA", mysql.TypeVarchar, 64, 0, nil, nil}, + {"INDEX_NAME", mysql.TypeVarchar, 64, 0, nil, nil}, + {"SEQ_IN_INDEX", mysql.TypeLonglong, 2, 0, nil, nil}, + {"COLUMN_NAME", mysql.TypeVarchar, 21, 0, nil, nil}, + {"COLLATION", mysql.TypeVarchar, 1, 0, nil, nil}, + {"CARDINALITY", mysql.TypeLonglong, 21, 0, nil, nil}, + {"SUB_PART", mysql.TypeLonglong, 3, 0, nil, nil}, + {"PACKED", mysql.TypeVarchar, 10, 0, nil, nil}, + {"NULLABLE", mysql.TypeVarchar, 3, 0, nil, nil}, + {"INDEX_TYPE", mysql.TypeVarchar, 16, 0, nil, nil}, + {"COMMENT", mysql.TypeVarchar, 16, 0, nil, nil}, + {"INDEX_COMMENT", mysql.TypeVarchar, 1024, 0, nil, nil}, +} + +var profilingCols = []columnInfo{ + {"QUERY_ID", mysql.TypeLong, 20, 0, nil, nil}, + {"SEQ", mysql.TypeLong, 20, 0, nil, nil}, + {"STATE", mysql.TypeVarchar, 30, 0, nil, nil}, + {"DURATION", mysql.TypeNewDecimal, 9, 0, nil, nil}, + {"CPU_USER", mysql.TypeNewDecimal, 9, 0, nil, nil}, + {"CPU_SYSTEM", mysql.TypeNewDecimal, 9, 0, nil, nil}, + {"CONTEXT_VOLUNTARY", mysql.TypeLong, 20, 0, nil, nil}, + {"CONTEXT_INVOLUNTARY", mysql.TypeLong, 20, 0, nil, nil}, + {"BLOCK_OPS_IN", mysql.TypeLong, 20, 0, nil, nil}, + {"BLOCK_OPS_OUT", mysql.TypeLong, 20, 0, nil, nil}, + {"MESSAGES_SENT", mysql.TypeLong, 20, 0, nil, nil}, + {"MESSAGES_RECEIVED", mysql.TypeLong, 20, 0, nil, nil}, + {"PAGE_FAULTS_MAJOR", mysql.TypeLong, 20, 0, nil, nil}, + {"PAGE_FAULTS_MINOR", mysql.TypeLong, 20, 0, nil, nil}, + {"SWAPS", mysql.TypeLong, 20, 0, nil, nil}, + {"SOURCE_FUNCTION", mysql.TypeVarchar, 30, 0, nil, nil}, + {"SOURCE_FILE", mysql.TypeVarchar, 20, 0, nil, nil}, + {"SOURCE_LINE", mysql.TypeLong, 20, 0, nil, nil}, +} + +var charsetCols = []columnInfo{ + {"CHARACTER_SET_NAME", mysql.TypeVarchar, 32, 0, nil, nil}, + {"DEFAULT_COLLATE_NAME", mysql.TypeVarchar, 32, 0, nil, nil}, + {"DESCRIPTION", mysql.TypeVarchar, 60, 0, nil, nil}, + {"MAXLEN", mysql.TypeLonglong, 3, 0, nil, nil}, +} + +var collationsCols = []columnInfo{ + {"COLLATION_NAME", mysql.TypeVarchar, 32, 0, nil, nil}, + {"CHARACTER_SET_NAME", mysql.TypeVarchar, 32, 0, nil, nil}, + {"ID", mysql.TypeLonglong, 11, 0, nil, nil}, + {"IS_DEFAULT", mysql.TypeVarchar, 3, 0, nil, nil}, + {"IS_COMPILED", mysql.TypeVarchar, 3, 0, nil, nil}, + {"SORTLEN", mysql.TypeLonglong, 3, 0, nil, nil}, +} + +var keyColumnUsageCols = []columnInfo{ + {"CONSTRAINT_CATALOG", mysql.TypeVarchar, 512, mysql.NotNullFlag, nil, nil}, + {"CONSTRAINT_SCHEMA", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"CONSTRAINT_NAME", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"TABLE_CATALOG", mysql.TypeVarchar, 512, mysql.NotNullFlag, nil, nil}, + {"TABLE_SCHEMA", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"TABLE_NAME", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"COLUMN_NAME", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"ORDINAL_POSITION", mysql.TypeLonglong, 10, mysql.NotNullFlag, nil, nil}, + {"POSITION_IN_UNIQUE_CONSTRAINT", mysql.TypeLonglong, 10, 0, nil, nil}, + {"REFERENCED_TABLE_SCHEMA", mysql.TypeVarchar, 64, 0, nil, nil}, + {"REFERENCED_TABLE_NAME", mysql.TypeVarchar, 64, 0, nil, nil}, + {"REFERENCED_COLUMN_NAME", mysql.TypeVarchar, 64, 0, nil, nil}, +} + +// See http://dev.mysql.com/doc/refman/5.7/en/referential-constraints-table.html +var referConstCols = []columnInfo{ + {"CONSTRAINT_CATALOG", mysql.TypeVarchar, 512, mysql.NotNullFlag, nil, nil}, + {"CONSTRAINT_SCHEMA", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"CONSTRAINT_NAME", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"UNIQUE_CONSTRAINT_CATALOG", mysql.TypeVarchar, 512, mysql.NotNullFlag, nil, nil}, + {"UNIQUE_CONSTRAINT_SCHEMA", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"UNIQUE_CONSTRAINT_NAME", mysql.TypeVarchar, 64, 0, nil, nil}, + {"MATCH_OPTION", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"UPDATE_RULE", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"DELETE_RULE", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"TABLE_NAME", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"REFERENCED_TABLE_NAME", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, +} + +// See http://dev.mysql.com/doc/refman/5.7/en/variables-table.html +var sessionVarCols = []columnInfo{ + {"VARIABLE_NAME", mysql.TypeVarchar, 64, 0, nil, nil}, + {"VARIABLE_VALUE", mysql.TypeVarchar, 1024, 0, nil, nil}, +} + +// See https://dev.mysql.com/doc/refman/5.7/en/partitions-table.html +var partitionsCols = []columnInfo{ + {"TABLE_CATALOG", mysql.TypeVarchar, 512, 0, nil, nil}, + {"TABLE_SCHEMA", mysql.TypeVarchar, 64, 0, nil, nil}, + {"TABLE_NAME", mysql.TypeVarchar, 64, 0, nil, nil}, + {"PARTITION_NAME", mysql.TypeVarchar, 64, 0, nil, nil}, + {"SUBPARTITION_NAME", mysql.TypeVarchar, 64, 0, nil, nil}, + {"PARTITION_ORDINAL_POSITION", mysql.TypeLonglong, 21, 0, nil, nil}, + {"SUBPARTITION_ORDINAL_POSITION", mysql.TypeLonglong, 21, 0, nil, nil}, + {"PARTITION_METHOD", mysql.TypeVarchar, 18, 0, nil, nil}, + {"SUBPARTITION_METHOD", mysql.TypeVarchar, 12, 0, nil, nil}, + {"PARTITION_EXPRESSION", mysql.TypeLongBlob, types.UnspecifiedLength, 0, nil, nil}, + {"SUBPARTITION_EXPRESSION", mysql.TypeLongBlob, types.UnspecifiedLength, 0, nil, nil}, + {"PARTITION_DESCRIPTION", mysql.TypeLongBlob, types.UnspecifiedLength, 0, nil, nil}, + {"TABLE_ROWS", mysql.TypeLonglong, 21, 0, nil, nil}, + {"AVG_ROW_LENGTH", mysql.TypeLonglong, 21, 0, nil, nil}, + {"DATA_LENGTH", mysql.TypeLonglong, 21, 0, nil, nil}, + {"MAX_DATA_LENGTH", mysql.TypeLonglong, 21, 0, nil, nil}, + {"INDEX_LENGTH", mysql.TypeLonglong, 21, 0, nil, nil}, + {"DATA_FREE", mysql.TypeLonglong, 21, 0, nil, nil}, + {"CREATE_TIME", mysql.TypeDatetime, 0, 0, nil, nil}, + {"UPDATE_TIME", mysql.TypeDatetime, 0, 0, nil, nil}, + {"CHECK_TIME", mysql.TypeDatetime, 0, 0, nil, nil}, + {"CHECKSUM", mysql.TypeLonglong, 21, 0, nil, nil}, + {"PARTITION_COMMENT", mysql.TypeVarchar, 80, 0, nil, nil}, + {"NODEGROUP", mysql.TypeVarchar, 12, 0, nil, nil}, + {"TABLESPACE_NAME", mysql.TypeVarchar, 64, 0, nil, nil}, +} + +var tableConstraintsCols = []columnInfo{ + {"CONSTRAINT_CATALOG", mysql.TypeVarchar, 512, 0, nil, nil}, + {"CONSTRAINT_SCHEMA", mysql.TypeVarchar, 64, 0, nil, nil}, + {"CONSTRAINT_NAME", mysql.TypeVarchar, 64, 0, nil, nil}, + {"TABLE_SCHEMA", mysql.TypeVarchar, 64, 0, nil, nil}, + {"TABLE_NAME", mysql.TypeVarchar, 64, 0, nil, nil}, + {"CONSTRAINT_TYPE", mysql.TypeVarchar, 64, 0, nil, nil}, +} + +var tableTriggersCols = []columnInfo{ + {"TRIGGER_CATALOG", mysql.TypeVarchar, 512, 0, nil, nil}, + {"TRIGGER_SCHEMA", mysql.TypeVarchar, 64, 0, nil, nil}, + {"TRIGGER_NAME", mysql.TypeVarchar, 64, 0, nil, nil}, + {"EVENT_MANIPULATION", mysql.TypeVarchar, 6, 0, nil, nil}, + {"EVENT_OBJECT_CATALOG", mysql.TypeVarchar, 512, 0, nil, nil}, + {"EVENT_OBJECT_SCHEMA", mysql.TypeVarchar, 64, 0, nil, nil}, + {"EVENT_OBJECT_TABLE", mysql.TypeVarchar, 64, 0, nil, nil}, + {"ACTION_ORDER", mysql.TypeLonglong, 4, 0, nil, nil}, + {"ACTION_CONDITION", mysql.TypeBlob, -1, 0, nil, nil}, + {"ACTION_STATEMENT", mysql.TypeBlob, -1, 0, nil, nil}, + {"ACTION_ORIENTATION", mysql.TypeVarchar, 9, 0, nil, nil}, + {"ACTION_TIMING", mysql.TypeVarchar, 6, 0, nil, nil}, + {"ACTION_REFERENCE_OLD_TABLE", mysql.TypeVarchar, 64, 0, nil, nil}, + {"ACTION_REFERENCE_NEW_TABLE", mysql.TypeVarchar, 64, 0, nil, nil}, + {"ACTION_REFERENCE_OLD_ROW", mysql.TypeVarchar, 3, 0, nil, nil}, + {"ACTION_REFERENCE_NEW_ROW", mysql.TypeVarchar, 3, 0, nil, nil}, + {"CREATED", mysql.TypeDatetime, 2, 0, nil, nil}, + {"SQL_MODE", mysql.TypeVarchar, 8192, 0, nil, nil}, + {"DEFINER", mysql.TypeVarchar, 77, 0, nil, nil}, + {"CHARACTER_SET_CLIENT", mysql.TypeVarchar, 32, 0, nil, nil}, + {"COLLATION_CONNECTION", mysql.TypeVarchar, 32, 0, nil, nil}, + {"DATABASE_COLLATION", mysql.TypeVarchar, 32, 0, nil, nil}, +} + +var tableUserPrivilegesCols = []columnInfo{ + {"GRANTEE", mysql.TypeVarchar, 81, 0, nil, nil}, + {"TABLE_CATALOG", mysql.TypeVarchar, 512, 0, nil, nil}, + {"PRIVILEGE_TYPE", mysql.TypeVarchar, 64, 0, nil, nil}, + {"IS_GRANTABLE", mysql.TypeVarchar, 3, 0, nil, nil}, +} + +var tableSchemaPrivilegesCols = []columnInfo{ + {"GRANTEE", mysql.TypeVarchar, 81, mysql.NotNullFlag, nil, nil}, + {"TABLE_CATALOG", mysql.TypeVarchar, 512, mysql.NotNullFlag, nil, nil}, + {"TABLE_SCHEMA", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"PRIVILEGE_TYPE", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"IS_GRANTABLE", mysql.TypeVarchar, 3, mysql.NotNullFlag, nil, nil}, +} + +var tableTablePrivilegesCols = []columnInfo{ + {"GRANTEE", mysql.TypeVarchar, 81, mysql.NotNullFlag, nil, nil}, + {"TABLE_CATALOG", mysql.TypeVarchar, 512, mysql.NotNullFlag, nil, nil}, + {"TABLE_SCHEMA", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"TABLE_NAME", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"PRIVILEGE_TYPE", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"IS_GRANTABLE", mysql.TypeVarchar, 3, mysql.NotNullFlag, nil, nil}, +} + +var tableColumnPrivilegesCols = []columnInfo{ + {"GRANTEE", mysql.TypeVarchar, 81, mysql.NotNullFlag, nil, nil}, + {"TABLE_CATALOG", mysql.TypeVarchar, 512, mysql.NotNullFlag, nil, nil}, + {"TABLE_SCHEMA", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"TABLE_NAME", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"COLUMN_NAME", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"PRIVILEGE_TYPE", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"IS_GRANTABLE", mysql.TypeVarchar, 3, mysql.NotNullFlag, nil, nil}, +} + +var tableEnginesCols = []columnInfo{ + {"ENGINE", mysql.TypeVarchar, 64, 0, nil, nil}, + {"SUPPORT", mysql.TypeVarchar, 8, 0, nil, nil}, + {"COMMENT", mysql.TypeVarchar, 80, 0, nil, nil}, + {"TRANSACTIONS", mysql.TypeVarchar, 3, 0, nil, nil}, + {"XA", mysql.TypeVarchar, 3, 0, nil, nil}, + {"SAVEPOINTS", mysql.TypeVarchar, 3, 0, nil, nil}, +} + +var tableRoutinesCols = []columnInfo{ + {"SPECIFIC_NAME", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"ROUTINE_CATALOG", mysql.TypeVarchar, 512, mysql.NotNullFlag, nil, nil}, + {"ROUTINE_SCHEMA", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"ROUTINE_NAME", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"ROUTINE_TYPE", mysql.TypeVarchar, 9, mysql.NotNullFlag, nil, nil}, + {"DATA_TYPE", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"CHARACTER_MAXIMUM_LENGTH", mysql.TypeLong, 21, 0, nil, nil}, + {"CHARACTER_OCTET_LENGTH", mysql.TypeLong, 21, 0, nil, nil}, + {"NUMERIC_PRECISION", mysql.TypeLonglong, 21, 0, nil, nil}, + {"NUMERIC_SCALE", mysql.TypeLong, 21, 0, nil, nil}, + {"DATETIME_PRECISION", mysql.TypeLonglong, 21, 0, nil, nil}, + {"CHARACTER_SET_NAME", mysql.TypeVarchar, 64, 0, nil, nil}, + {"COLLATION_NAME", mysql.TypeVarchar, 64, 0, nil, nil}, + {"DTD_IDENTIFIER", mysql.TypeLongBlob, 0, 0, nil, nil}, + {"ROUTINE_BODY", mysql.TypeVarchar, 8, mysql.NotNullFlag, nil, nil}, + {"ROUTINE_DEFINITION", mysql.TypeLongBlob, 0, 0, nil, nil}, + {"EXTERNAL_NAME", mysql.TypeVarchar, 64, 0, nil, nil}, + {"EXTERNAL_LANGUAGE", mysql.TypeVarchar, 64, 0, nil, nil}, + {"PARAMETER_STYLE", mysql.TypeVarchar, 8, mysql.NotNullFlag, nil, nil}, + {"IS_DETERMINISTIC", mysql.TypeVarchar, 3, mysql.NotNullFlag, nil, nil}, + {"SQL_DATA_ACCESS", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"SQL_PATH", mysql.TypeVarchar, 64, 0, nil, nil}, + {"SECURITY_TYPE", mysql.TypeVarchar, 7, mysql.NotNullFlag, nil, nil}, + {"CREATED", mysql.TypeDatetime, 0, mysql.NotNullFlag, "0000-00-00 00:00:00", nil}, + {"LAST_ALTERED", mysql.TypeDatetime, 0, mysql.NotNullFlag, "0000-00-00 00:00:00", nil}, + {"SQL_MODE", mysql.TypeVarchar, 8192, mysql.NotNullFlag, nil, nil}, + {"ROUTINE_COMMENT", mysql.TypeLongBlob, 0, 0, nil, nil}, + {"DEFINER", mysql.TypeVarchar, 77, mysql.NotNullFlag, nil, nil}, + {"CHARACTER_SET_CLIENT", mysql.TypeVarchar, 32, mysql.NotNullFlag, nil, nil}, + {"COLLATION_CONNECTION", mysql.TypeVarchar, 32, mysql.NotNullFlag, nil, nil}, + {"DATABASE_COLLATION", mysql.TypeVarchar, 32, mysql.NotNullFlag, nil, nil}, +} + +var tableParametersCols = []columnInfo{ + {"SPECIFIC_CATALOG", mysql.TypeVarchar, 512, mysql.NotNullFlag, nil, nil}, + {"SPECIFIC_SCHEMA", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"SPECIFIC_NAME", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"ORDINAL_POSITION", mysql.TypeVarchar, 21, mysql.NotNullFlag, nil, nil}, + {"PARAMETER_MODE", mysql.TypeVarchar, 5, 0, nil, nil}, + {"PARAMETER_NAME", mysql.TypeVarchar, 64, 0, nil, nil}, + {"DATA_TYPE", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"CHARACTER_MAXIMUM_LENGTH", mysql.TypeVarchar, 21, 0, nil, nil}, + {"CHARACTER_OCTET_LENGTH", mysql.TypeVarchar, 21, 0, nil, nil}, + {"NUMERIC_PRECISION", mysql.TypeVarchar, 21, 0, nil, nil}, + {"NUMERIC_SCALE", mysql.TypeVarchar, 21, 0, nil, nil}, + {"DATETIME_PRECISION", mysql.TypeVarchar, 21, 0, nil, nil}, + {"CHARACTER_SET_NAME", mysql.TypeVarchar, 64, 0, nil, nil}, + {"COLLATION_NAME", mysql.TypeVarchar, 64, 0, nil, nil}, + {"DTD_IDENTIFIER", mysql.TypeLongBlob, 0, mysql.NotNullFlag, nil, nil}, + {"ROUTINE_TYPE", mysql.TypeVarchar, 9, mysql.NotNullFlag, nil, nil}, +} + +var tableEventsCols = []columnInfo{ + {"EVENT_CATALOG", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"EVENT_SCHEMA", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"EVENT_NAME", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"DEFINER", mysql.TypeVarchar, 77, mysql.NotNullFlag, nil, nil}, + {"TIME_ZONE", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"EVENT_BODY", mysql.TypeVarchar, 8, mysql.NotNullFlag, nil, nil}, + {"EVENT_DEFINITION", mysql.TypeLongBlob, 0, 0, nil, nil}, + {"EVENT_TYPE", mysql.TypeVarchar, 9, mysql.NotNullFlag, nil, nil}, + {"EXECUTE_AT", mysql.TypeDatetime, 0, 0, nil, nil}, + {"INTERVAL_VALUE", mysql.TypeVarchar, 256, 0, nil, nil}, + {"INTERVAL_FIELD", mysql.TypeVarchar, 18, 0, nil, nil}, + {"SQL_MODE", mysql.TypeVarchar, 8192, mysql.NotNullFlag, nil, nil}, + {"STARTS", mysql.TypeDatetime, 0, 0, nil, nil}, + {"ENDS", mysql.TypeDatetime, 0, 0, nil, nil}, + {"STATUS", mysql.TypeVarchar, 18, mysql.NotNullFlag, nil, nil}, + {"ON_COMPLETION", mysql.TypeVarchar, 12, mysql.NotNullFlag, nil, nil}, + {"CREATED", mysql.TypeDatetime, 0, mysql.NotNullFlag, "0000-00-00 00:00:00", nil}, + {"LAST_ALTERED", mysql.TypeDatetime, 0, mysql.NotNullFlag, "0000-00-00 00:00:00", nil}, + {"LAST_EXECUTED", mysql.TypeDatetime, 0, 0, nil, nil}, + {"EVENT_COMMENT", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"ORIGINATOR", mysql.TypeLong, 10, mysql.NotNullFlag, 0, nil}, + {"CHARACTER_SET_CLIENT", mysql.TypeVarchar, 32, mysql.NotNullFlag, nil, nil}, + {"COLLATION_CONNECTION", mysql.TypeVarchar, 32, mysql.NotNullFlag, nil, nil}, + {"DATABASE_COLLATION", mysql.TypeVarchar, 32, mysql.NotNullFlag, nil, nil}, +} + +var tableGlobalStatusCols = []columnInfo{ + {"VARIABLE_NAME", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"VARIABLE_VALUE", mysql.TypeVarchar, 1024, 0, nil, nil}, +} + +var tableGlobalVariablesCols = []columnInfo{ + {"VARIABLE_NAME", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"VARIABLE_VALUE", mysql.TypeVarchar, 1024, 0, nil, nil}, +} + +var tableSessionStatusCols = []columnInfo{ + {"VARIABLE_NAME", mysql.TypeVarchar, 64, mysql.NotNullFlag, nil, nil}, + {"VARIABLE_VALUE", mysql.TypeVarchar, 1024, 0, nil, nil}, +} + +var tableOptimizerTraceCols = []columnInfo{ + {"QUERY", mysql.TypeLongBlob, 0, mysql.NotNullFlag, "", nil}, + {"TRACE", mysql.TypeLongBlob, 0, mysql.NotNullFlag, "", nil}, + {"MISSING_BYTES_BEYOND_MAX_MEM_SIZE", mysql.TypeShort, 20, mysql.NotNullFlag, 0, nil}, + {"INSUFFICIENT_PRIVILEGES", mysql.TypeTiny, 1, mysql.NotNullFlag, 0, nil}, +} + +var tableTableSpacesCols = []columnInfo{ + {"TABLESPACE_NAME", mysql.TypeVarchar, 64, mysql.NotNullFlag, "", nil}, + {"ENGINE", mysql.TypeVarchar, 64, mysql.NotNullFlag, "", nil}, + {"TABLESPACE_TYPE", mysql.TypeVarchar, 64, 0, nil, nil}, + {"LOGFILE_GROUP_NAME", mysql.TypeVarchar, 64, 0, nil, nil}, + {"EXTENT_SIZE", mysql.TypeLonglong, 21, 0, nil, nil}, + {"AUTOEXTEND_SIZE", mysql.TypeLonglong, 21, 0, nil, nil}, + {"MAXIMUM_SIZE", mysql.TypeLonglong, 21, 0, nil, nil}, + {"NODEGROUP_ID", mysql.TypeLonglong, 21, 0, nil, nil}, + {"TABLESPACE_COMMENT", mysql.TypeVarchar, 2048, 0, nil, nil}, +} + +var tableCollationCharacterSetApplicabilityCols = []columnInfo{ + {"COLLATION_NAME", mysql.TypeVarchar, 32, mysql.NotNullFlag, nil, nil}, + {"CHARACTER_SET_NAME", mysql.TypeVarchar, 32, mysql.NotNullFlag, nil, nil}, +} + +func dataForCharacterSets() (records [][]types.Datum) { + + charsets := charset.GetSupportedCharsets() + + for _, charset := range charsets { + + records = append(records, + types.MakeDatums(charset.Name, charset.DefaultCollation, charset.Desc, charset.Maxlen), + ) + + } + + return records + +} + +func dataForCollations() (records [][]types.Datum) { + + collations := charset.GetSupportedCollations() + + for _, collation := range collations { + + isDefault := "" + if collation.IsDefault { + isDefault = "Yes" + } + + records = append(records, + types.MakeDatums(collation.Name, collation.CharsetName, collation.ID, isDefault, "Yes", 1), + ) + + } + + return records + +} + +func dataForCollationCharacterSetApplicability() (records [][]types.Datum) { + + collations := charset.GetSupportedCollations() + + for _, collation := range collations { + + records = append(records, + types.MakeDatums(collation.Name, collation.CharsetName), + ) + + } + + return records + +} + +func dataForSessionVar(ctx sessionctx.Context) (records [][]types.Datum, err error) { + sessionVars := ctx.GetSessionVars() + for _, v := range variable.SysVars { + var value string + value, err = variable.GetSessionSystemVar(sessionVars, v.Name) + if err != nil { + return nil, err + } + row := types.MakeDatums(v.Name, value) + records = append(records, row) + } + return +} + +func dataForUserPrivileges(ctx sessionctx.Context) [][]types.Datum { + return [][]types.Datum{} +} + +func dataForEngines() (records [][]types.Datum) { + records = append(records, + types.MakeDatums( + "InnoDB", // Engine + "DEFAULT", // Support + "Supports transactions, row-level locking, and foreign keys", // Comment + "YES", // Transactions + "YES", // XA + "YES", // Savepoints + ), + ) + return records +} + +var filesCols = []columnInfo{ + {"FILE_ID", mysql.TypeLonglong, 4, 0, nil, nil}, + {"FILE_NAME", mysql.TypeVarchar, 4000, 0, nil, nil}, + {"FILE_TYPE", mysql.TypeVarchar, 20, 0, nil, nil}, + {"TABLESPACE_NAME", mysql.TypeVarchar, 64, 0, nil, nil}, + {"TABLE_CATALOG", mysql.TypeVarchar, 64, 0, nil, nil}, + {"TABLE_SCHEMA", mysql.TypeVarchar, 64, 0, nil, nil}, + {"TABLE_NAME", mysql.TypeVarchar, 64, 0, nil, nil}, + {"LOGFILE_GROUP_NAME", mysql.TypeVarchar, 64, 0, nil, nil}, + {"LOGFILE_GROUP_NUMBER", mysql.TypeLonglong, 32, 0, nil, nil}, + {"ENGINE", mysql.TypeVarchar, 64, 0, nil, nil}, + {"FULLTEXT_KEYS", mysql.TypeVarchar, 64, 0, nil, nil}, + {"DELETED_ROWS", mysql.TypeLonglong, 4, 0, nil, nil}, + {"UPDATE_COUNT", mysql.TypeLonglong, 4, 0, nil, nil}, + {"FREE_EXTENTS", mysql.TypeLonglong, 4, 0, nil, nil}, + {"TOTAL_EXTENTS", mysql.TypeLonglong, 4, 0, nil, nil}, + {"EXTENT_SIZE", mysql.TypeLonglong, 4, 0, nil, nil}, + {"INITIAL_SIZE", mysql.TypeLonglong, 21, 0, nil, nil}, + {"MAXIMUM_SIZE", mysql.TypeLonglong, 21, 0, nil, nil}, + {"AUTOEXTEND_SIZE", mysql.TypeLonglong, 21, 0, nil, nil}, + {"CREATION_TIME", mysql.TypeDatetime, -1, 0, nil, nil}, + {"LAST_UPDATE_TIME", mysql.TypeDatetime, -1, 0, nil, nil}, + {"LAST_ACCESS_TIME", mysql.TypeDatetime, -1, 0, nil, nil}, + {"RECOVER_TIME", mysql.TypeLonglong, 4, 0, nil, nil}, + {"TRANSACTION_COUNTER", mysql.TypeLonglong, 4, 0, nil, nil}, + {"VERSION", mysql.TypeLonglong, 21, 0, nil, nil}, + {"ROW_FORMAT", mysql.TypeVarchar, 10, 0, nil, nil}, + {"TABLE_ROWS", mysql.TypeLonglong, 21, 0, nil, nil}, + {"AVG_ROW_LENGTH", mysql.TypeLonglong, 21, 0, nil, nil}, + {"DATA_LENGTH", mysql.TypeLonglong, 21, 0, nil, nil}, + {"MAX_DATA_LENGTH", mysql.TypeLonglong, 21, 0, nil, nil}, + {"INDEX_LENGTH", mysql.TypeLonglong, 21, 0, nil, nil}, + {"DATA_FREE", mysql.TypeLonglong, 21, 0, nil, nil}, + {"CREATE_TIME", mysql.TypeDatetime, -1, 0, nil, nil}, + {"UPDATE_TIME", mysql.TypeDatetime, -1, 0, nil, nil}, + {"CHECK_TIME", mysql.TypeDatetime, -1, 0, nil, nil}, + {"CHECKSUM", mysql.TypeLonglong, 21, 0, nil, nil}, + {"STATUS", mysql.TypeVarchar, 20, 0, nil, nil}, + {"EXTRA", mysql.TypeVarchar, 255, 0, nil, nil}, +} + +func dataForSchemata(ctx sessionctx.Context, schemas []*model.DBInfo) [][]types.Datum { + rows := make([][]types.Datum, 0, len(schemas)) + + for _, schema := range schemas { + + charset := mysql.DefaultCharset + collation := mysql.DefaultCollationName + + if len(schema.Charset) > 0 { + charset = schema.Charset // Overwrite default + } + + if len(schema.Collate) > 0 { + collation = schema.Collate // Overwrite default + } + + record := types.MakeDatums( + catalogVal, // CATALOG_NAME + schema.Name.O, // SCHEMA_NAME + charset, // DEFAULT_CHARACTER_SET_NAME + collation, // DEFAULT_COLLATION_NAME + nil, + ) + rows = append(rows, record) + } + return rows +} + +func getRowCountAllTable(ctx sessionctx.Context) (map[int64]uint64, error) { + rows, _, err := ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL("select table_id, count from mysql.stats_meta") + if err != nil { + return nil, err + } + rowCountMap := make(map[int64]uint64, len(rows)) + for _, row := range rows { + tableID := row.GetInt64(0) + rowCnt := row.GetUint64(1) + rowCountMap[tableID] = rowCnt + } + return rowCountMap, nil +} + +type tableHistID struct { + tableID int64 + histID int64 +} + +func getColLengthAllTables(ctx sessionctx.Context) (map[tableHistID]uint64, error) { + rows, _, err := ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL("select table_id, hist_id, tot_col_size from mysql.stats_histograms where is_index = 0") + if err != nil { + return nil, err + } + colLengthMap := make(map[tableHistID]uint64, len(rows)) + for _, row := range rows { + tableID := row.GetInt64(0) + histID := row.GetInt64(1) + totalSize := row.GetInt64(2) + if totalSize < 0 { + totalSize = 0 + } + colLengthMap[tableHistID{tableID: tableID, histID: histID}] = uint64(totalSize) + } + return colLengthMap, nil +} + +func getDataAndIndexLength(info *model.TableInfo, physicalID int64, rowCount uint64, columnLengthMap map[tableHistID]uint64) (uint64, uint64) { + columnLength := make(map[string]uint64) + for _, col := range info.Columns { + if col.State != model.StatePublic { + continue + } + length := col.FieldType.StorageLength() + if length != types.VarStorageLen { + columnLength[col.Name.L] = rowCount * uint64(length) + } else { + length := columnLengthMap[tableHistID{tableID: physicalID, histID: col.ID}] + columnLength[col.Name.L] = length + } + } + dataLength, indexLength := uint64(0), uint64(0) + for _, length := range columnLength { + dataLength += length + } + for _, idx := range info.Indices { + if idx.State != model.StatePublic { + continue + } + for _, col := range idx.Columns { + if col.Length == types.UnspecifiedLength { + indexLength += columnLength[col.Name.L] + } else { + indexLength += rowCount * uint64(col.Length) + } + } + } + return dataLength, indexLength +} + +type statsCache struct { + mu sync.Mutex + loading bool + modifyTime time.Time + tableRows map[int64]uint64 + colLength map[tableHistID]uint64 +} + +var tableStatsCache = &statsCache{} + +// TableStatsCacheExpiry is the expiry time for table stats cache. +var TableStatsCacheExpiry = 3 * time.Second + +func (c *statsCache) setLoading(loading bool) { + c.mu.Lock() + c.loading = loading + c.mu.Unlock() +} + +func (c *statsCache) get(ctx sessionctx.Context) (map[int64]uint64, map[tableHistID]uint64, error) { + c.mu.Lock() + if time.Since(c.modifyTime) < TableStatsCacheExpiry || c.loading { + tableRows, colLength := c.tableRows, c.colLength + c.mu.Unlock() + return tableRows, colLength, nil + } + c.loading = true + c.mu.Unlock() + + tableRows, err := getRowCountAllTable(ctx) + if err != nil { + c.setLoading(false) + return nil, nil, err + } + colLength, err := getColLengthAllTables(ctx) + if err != nil { + c.setLoading(false) + return nil, nil, err + } + + c.mu.Lock() + c.loading = false + c.tableRows = tableRows + c.colLength = colLength + c.modifyTime = time.Now() + c.mu.Unlock() + return tableRows, colLength, nil +} + +func getAutoIncrementID(ctx sessionctx.Context, schema *model.DBInfo, tblInfo *model.TableInfo) (int64, error) { + is := ctx.GetSessionVars().TxnCtx.InfoSchema.(InfoSchema) + tbl, err := is.TableByName(schema.Name, tblInfo.Name) + if err != nil { + return 0, err + } + return tbl.Allocator(ctx).Base() + 1, nil +} + +func dataForTables(ctx sessionctx.Context, schemas []*model.DBInfo) ([][]types.Datum, error) { + tableRowsMap, colLengthMap, err := tableStatsCache.get(ctx) + if err != nil { + return nil, err + } + + var rows [][]types.Datum + for _, schema := range schemas { + for _, table := range schema.Tables { + collation := table.Collate + if collation == "" { + collation = mysql.DefaultCollationName + } + + createOptions := "" + + var autoIncID interface{} + hasAutoIncID, _ := HasAutoIncrementColumn(table) + if hasAutoIncID { + autoIncID, err = getAutoIncrementID(ctx, schema, table) + if err != nil { + return nil, err + } + } + + var rowCount, dataLength, indexLength uint64 + rowCount = tableRowsMap[table.ID] + dataLength, indexLength = getDataAndIndexLength(table, table.ID, rowCount, colLengthMap) + avgRowLength := uint64(0) + if rowCount != 0 { + avgRowLength = dataLength / rowCount + } + + shardingInfo := GetShardingInfo(schema, table) + record := types.MakeDatums( + catalogVal, // TABLE_CATALOG + schema.Name.O, // TABLE_SCHEMA + table.Name.O, // TABLE_NAME + "BASE TABLE", // TABLE_TYPE + "InnoDB", // ENGINE + uint64(10), // VERSION + "Compact", // ROW_FORMAT + rowCount, // TABLE_ROWS + avgRowLength, // AVG_ROW_LENGTH + dataLength, // DATA_LENGTH + uint64(0), // MAX_DATA_LENGTH + indexLength, // INDEX_LENGTH + uint64(0), // DATA_FREE + autoIncID, // AUTO_INCREMENT + nil, // CREATE_TIME + nil, // UPDATE_TIME + nil, // CHECK_TIME + collation, // TABLE_COLLATION + nil, // CHECKSUM + createOptions, // CREATE_OPTIONS + table.Comment, // TABLE_COMMENT + table.ID, // TIDB_TABLE_ID + shardingInfo, // TIDB_ROW_ID_SHARDING_INFO + ) + rows = append(rows, record) + } + } + return rows, nil +} + +// GetShardingInfo returns a nil or description string for the sharding information of given TableInfo. +// The returned description string may be: +// - "NOT_SHARDED": for tables that SHARD_ROW_ID_BITS is not specified. +// - "NOT_SHARDED(PK_IS_HANDLE)": for tables that is primary key is row id. +// - "SHARD_BITS={bit_number}": for tables that with SHARD_ROW_ID_BITS. +// The returned nil indicates that sharding information is not suitable for the table(for example, when the table is a View). +// This function is exported for unit test. +func GetShardingInfo(dbInfo *model.DBInfo, tableInfo *model.TableInfo) interface{} { + if dbInfo == nil || tableInfo == nil || util.IsMemOrSysDB(dbInfo.Name.L) { + return nil + } + shardingInfo := "NOT_SHARDED" + if tableInfo.PKIsHandle { + shardingInfo = "NOT_SHARDED(PK_IS_HANDLE)" + } else { + if tableInfo.ShardRowIDBits > 0 { + shardingInfo = "SHARD_BITS=" + strconv.Itoa(int(tableInfo.ShardRowIDBits)) + } + } + return shardingInfo +} + +func dataForColumns(ctx sessionctx.Context, schemas []*model.DBInfo) [][]types.Datum { + var rows [][]types.Datum + for _, schema := range schemas { + for _, table := range schema.Tables { + rs := dataForColumnsInTable(schema, table) + rows = append(rows, rs...) + } + } + return rows +} + +func dataForColumnsInTable(schema *model.DBInfo, tbl *model.TableInfo) [][]types.Datum { + rows := make([][]types.Datum, 0, len(tbl.Columns)) + for i, col := range tbl.Columns { + if col.Hidden { + continue + } + var charMaxLen, charOctLen, numericPrecision, numericScale, datetimePrecision interface{} + colLen, decimal := col.Flen, col.Decimal + defaultFlen, defaultDecimal := mysql.GetDefaultFieldLengthAndDecimal(col.Tp) + if decimal == types.UnspecifiedLength { + decimal = defaultDecimal + } + if colLen == types.UnspecifiedLength { + colLen = defaultFlen + } + if col.Tp == mysql.TypeSet { + // Example: In MySQL set('a','bc','def','ghij') has length 13, because + // len('a')+len('bc')+len('def')+len('ghij')+len(ThreeComma)=13 + // Reference link: https://bugs.mysql.com/bug.php?id=22613 + colLen = 0 + for _, ele := range col.Elems { + colLen += len(ele) + } + if len(col.Elems) != 0 { + colLen += (len(col.Elems) - 1) + } + charMaxLen = colLen + charOctLen = colLen + } else if col.Tp == mysql.TypeEnum { + // Example: In MySQL enum('a', 'ab', 'cdef') has length 4, because + // the longest string in the enum is 'cdef' + // Reference link: https://bugs.mysql.com/bug.php?id=22613 + colLen = 0 + for _, ele := range col.Elems { + if len(ele) > colLen { + colLen = len(ele) + } + } + charMaxLen = colLen + charOctLen = colLen + } else if types.IsString(col.Tp) { + charMaxLen = colLen + charOctLen = colLen + } else if types.IsTypeNumeric(col.Tp) { + numericPrecision = colLen + if col.Tp != mysql.TypeFloat && col.Tp != mysql.TypeDouble { + numericScale = decimal + } else if decimal != -1 { + numericScale = decimal + } + } + columnType := col.FieldType.InfoSchemaStr() + columnDesc := table.NewColDesc(table.ToColumn(col)) + var columnDefault interface{} + if columnDesc.DefaultValue != nil { + columnDefault = fmt.Sprintf("%v", columnDesc.DefaultValue) + } + record := types.MakeDatums( + catalogVal, // TABLE_CATALOG + schema.Name.O, // TABLE_SCHEMA + tbl.Name.O, // TABLE_NAME + col.Name.O, // COLUMN_NAME + i+1, // ORIGINAL_POSITION + columnDefault, // COLUMN_DEFAULT + columnDesc.Null, // IS_NULLABLE + types.TypeToStr(col.Tp, col.Charset), // DATA_TYPE + charMaxLen, // CHARACTER_MAXIMUM_LENGTH + charOctLen, // CHARACTER_OCTET_LENGTH + numericPrecision, // NUMERIC_PRECISION + numericScale, // NUMERIC_SCALE + datetimePrecision, // DATETIME_PRECISION + columnDesc.Charset, // CHARACTER_SET_NAME + columnDesc.Collation, // COLLATION_NAME + columnType, // COLUMN_TYPE + columnDesc.Key, // COLUMN_KEY + columnDesc.Extra, // EXTRA + "select,insert,update,references", // PRIVILEGES + columnDesc.Comment, // COLUMN_COMMENT + "", // GENERATION_EXPRESSION + ) + rows = append(rows, record) + } + return rows +} + +func dataForStatistics(ctx sessionctx.Context, schemas []*model.DBInfo) [][]types.Datum { + var rows [][]types.Datum + for _, schema := range schemas { + for _, table := range schema.Tables { + rs := dataForStatisticsInTable(schema, table) + rows = append(rows, rs...) + } + } + return rows +} + +func dataForStatisticsInTable(schema *model.DBInfo, table *model.TableInfo) [][]types.Datum { + var rows [][]types.Datum + if table.PKIsHandle { + for _, col := range table.Columns { + if mysql.HasPriKeyFlag(col.Flag) { + record := types.MakeDatums( + catalogVal, // TABLE_CATALOG + schema.Name.O, // TABLE_SCHEMA + table.Name.O, // TABLE_NAME + "0", // NON_UNIQUE + schema.Name.O, // INDEX_SCHEMA + "PRIMARY", // INDEX_NAME + 1, // SEQ_IN_INDEX + col.Name.O, // COLUMN_NAME + "A", // COLLATION + nil, // CARDINALITY + nil, // SUB_PART + nil, // PACKED + "", // NULLABLE + "BTREE", // INDEX_TYPE + "", // COMMENT + "", // INDEX_COMMENT + ) + rows = append(rows, record) + } + } + } + nameToCol := make(map[string]*model.ColumnInfo, len(table.Columns)) + for _, c := range table.Columns { + nameToCol[c.Name.L] = c + } + for _, index := range table.Indices { + nonUnique := "1" + if index.Unique { + nonUnique = "0" + } + for i, key := range index.Columns { + col := nameToCol[key.Name.L] + nullable := "YES" + if mysql.HasNotNullFlag(col.Flag) { + nullable = "" + } + record := types.MakeDatums( + catalogVal, // TABLE_CATALOG + schema.Name.O, // TABLE_SCHEMA + table.Name.O, // TABLE_NAME + nonUnique, // NON_UNIQUE + schema.Name.O, // INDEX_SCHEMA + index.Name.O, // INDEX_NAME + i+1, // SEQ_IN_INDEX + key.Name.O, // COLUMN_NAME + "A", // COLLATION + nil, // CARDINALITY + nil, // SUB_PART + nil, // PACKED + nullable, // NULLABLE + "BTREE", // INDEX_TYPE + "", // COMMENT + "", // INDEX_COMMENT + ) + rows = append(rows, record) + } + } + return rows +} + +const ( + primaryKeyType = "PRIMARY KEY" + primaryConstraint = "PRIMARY" + uniqueKeyType = "UNIQUE" +) + +// dataForTableConstraints constructs data for table information_schema.constraints.See https://dev.mysql.com/doc/refman/5.7/en/table-constraints-table.html +func dataForTableConstraints(ctx sessionctx.Context, schemas []*model.DBInfo) [][]types.Datum { + var rows [][]types.Datum + for _, schema := range schemas { + for _, tbl := range schema.Tables { + if tbl.PKIsHandle { + record := types.MakeDatums( + catalogVal, // CONSTRAINT_CATALOG + schema.Name.O, // CONSTRAINT_SCHEMA + mysql.PrimaryKeyName, // CONSTRAINT_NAME + schema.Name.O, // TABLE_SCHEMA + tbl.Name.O, // TABLE_NAME + primaryKeyType, // CONSTRAINT_TYPE + ) + rows = append(rows, record) + } + + for _, idx := range tbl.Indices { + var cname, ctype string + if idx.Primary { + cname = mysql.PrimaryKeyName + ctype = primaryKeyType + } else if idx.Unique { + cname = idx.Name.O + ctype = uniqueKeyType + } else { + // The index has no constriant. + continue + } + record := types.MakeDatums( + catalogVal, // CONSTRAINT_CATALOG + schema.Name.O, // CONSTRAINT_SCHEMA + cname, // CONSTRAINT_NAME + schema.Name.O, // TABLE_SCHEMA + tbl.Name.O, // TABLE_NAME + ctype, // CONSTRAINT_TYPE + ) + rows = append(rows, record) + } + } + } + return rows +} + +// dataForPseudoProfiling returns pseudo data for table profiling when system variable `profiling` is set to `ON`. +func dataForPseudoProfiling() [][]types.Datum { + var rows [][]types.Datum + row := types.MakeDatums( + 0, // QUERY_ID + 0, // SEQ + "", // STATE + nil, // DURATION + nil, // CPU_USER + nil, // CPU_SYSTEM + 0, // CONTEXT_VOLUNTARY + 0, // CONTEXT_INVOLUNTARY + 0, // BLOCK_OPS_IN + 0, // BLOCK_OPS_OUT + 0, // MESSAGES_SENT + 0, // MESSAGES_RECEIVED + 0, // PAGE_FAULTS_MAJOR + 0, // PAGE_FAULTS_MINOR + 0, // SWAPS + "", // SOURCE_FUNCTION + "", // SOURCE_FILE + 0, // SOURCE_LINE + ) + rows = append(rows, row) + return rows +} + +func dataForKeyColumnUsage(schemas []*model.DBInfo) [][]types.Datum { + rows := make([][]types.Datum, 0, len(schemas)) // The capacity is not accurate, but it is not a big problem. + for _, schema := range schemas { + for _, table := range schema.Tables { + rs := keyColumnUsageInTable(schema, table) + rows = append(rows, rs...) + } + } + return rows +} + +func keyColumnUsageInTable(schema *model.DBInfo, table *model.TableInfo) [][]types.Datum { + var rows [][]types.Datum + if table.PKIsHandle { + for _, col := range table.Columns { + if mysql.HasPriKeyFlag(col.Flag) { + record := types.MakeDatums( + catalogVal, // CONSTRAINT_CATALOG + schema.Name.O, // CONSTRAINT_SCHEMA + primaryConstraint, // CONSTRAINT_NAME + catalogVal, // TABLE_CATALOG + schema.Name.O, // TABLE_SCHEMA + table.Name.O, // TABLE_NAME + col.Name.O, // COLUMN_NAME + 1, // ORDINAL_POSITION + 1, // POSITION_IN_UNIQUE_CONSTRAINT + nil, // REFERENCED_TABLE_SCHEMA + nil, // REFERENCED_TABLE_NAME + nil, // REFERENCED_COLUMN_NAME + ) + rows = append(rows, record) + break + } + } + } + nameToCol := make(map[string]*model.ColumnInfo, len(table.Columns)) + for _, c := range table.Columns { + nameToCol[c.Name.L] = c + } + for _, index := range table.Indices { + var idxName string + if index.Primary { + idxName = primaryConstraint + } else if index.Unique { + idxName = index.Name.O + } else { + // Only handle unique/primary key + continue + } + for i, key := range index.Columns { + col := nameToCol[key.Name.L] + record := types.MakeDatums( + catalogVal, // CONSTRAINT_CATALOG + schema.Name.O, // CONSTRAINT_SCHEMA + idxName, // CONSTRAINT_NAME + catalogVal, // TABLE_CATALOG + schema.Name.O, // TABLE_SCHEMA + table.Name.O, // TABLE_NAME + col.Name.O, // COLUMN_NAME + i+1, // ORDINAL_POSITION, + nil, // POSITION_IN_UNIQUE_CONSTRAINT + nil, // REFERENCED_TABLE_SCHEMA + nil, // REFERENCED_TABLE_NAME + nil, // REFERENCED_COLUMN_NAME + ) + rows = append(rows, record) + } + } + return rows +} + +// ServerInfo represents the basic server information of single cluster component +type ServerInfo struct { + ServerType string + Address string + StatusAddr string + Version string + GitHash string +} + +var tableNameToColumns = map[string][]columnInfo{ + tableSchemata: schemataCols, + tableTables: tablesCols, + tableColumns: columnsCols, + tableColumnStatistics: columnStatisticsCols, + tableStatistics: statisticsCols, + tableCharacterSets: charsetCols, + tableCollations: collationsCols, + tableFiles: filesCols, + tableProfiling: profilingCols, + tablePartitions: partitionsCols, + tableKeyColumm: keyColumnUsageCols, + tableReferConst: referConstCols, + tableSessionVar: sessionVarCols, + tableConstraints: tableConstraintsCols, + tableTriggers: tableTriggersCols, + tableUserPrivileges: tableUserPrivilegesCols, + tableSchemaPrivileges: tableSchemaPrivilegesCols, + tableTablePrivileges: tableTablePrivilegesCols, + tableColumnPrivileges: tableColumnPrivilegesCols, + tableEngines: tableEnginesCols, + tableRoutines: tableRoutinesCols, + tableParameters: tableParametersCols, + tableEvents: tableEventsCols, + tableGlobalStatus: tableGlobalStatusCols, + tableGlobalVariables: tableGlobalVariablesCols, + tableSessionStatus: tableSessionStatusCols, + tableOptimizerTrace: tableOptimizerTraceCols, + tableTableSpaces: tableTableSpacesCols, + tableCollationCharacterSetApplicability: tableCollationCharacterSetApplicabilityCols, +} + +func createInfoSchemaTable(_ autoid.Allocator, meta *model.TableInfo) (table.Table, error) { + columns := make([]*table.Column, len(meta.Columns)) + for i, col := range meta.Columns { + columns[i] = table.ToColumn(col) + } + tp := table.VirtualTable + return &infoschemaTable{meta: meta, cols: columns, tp: tp}, nil +} + +type infoschemaTable struct { + meta *model.TableInfo + cols []*table.Column + tp table.Type +} + +// schemasSorter implements the sort.Interface interface, sorts DBInfo by name. +type schemasSorter []*model.DBInfo + +func (s schemasSorter) Len() int { + return len(s) +} + +func (s schemasSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s schemasSorter) Less(i, j int) bool { + return s[i].Name.L < s[j].Name.L +} + +func (it *infoschemaTable) getRows(ctx sessionctx.Context, cols []*table.Column) (fullRows [][]types.Datum, err error) { + is := GetInfoSchema(ctx) + dbs := is.AllSchemas() + sort.Sort(schemasSorter(dbs)) + switch it.meta.Name.O { + case tableSchemata: + fullRows = dataForSchemata(ctx, dbs) + case tableTables: + fullRows, err = dataForTables(ctx, dbs) + case tableColumns: + fullRows = dataForColumns(ctx, dbs) + case tableStatistics: + fullRows = dataForStatistics(ctx, dbs) + case tableCharacterSets: + fullRows = dataForCharacterSets() + case tableCollations: + fullRows = dataForCollations() + case tableSessionVar: + fullRows, err = dataForSessionVar(ctx) + case tableConstraints: + fullRows = dataForTableConstraints(ctx, dbs) + case tableFiles: + case tableProfiling: + if v, ok := ctx.GetSessionVars().GetSystemVar("profiling"); ok && variable.TiDBOptOn(v) { + fullRows = dataForPseudoProfiling() + } + case tablePartitions: + case tableKeyColumm: + fullRows = dataForKeyColumnUsage(dbs) + case tableReferConst: + case tableTriggers: + case tableUserPrivileges: + fullRows = dataForUserPrivileges(ctx) + case tableEngines: + fullRows = dataForEngines() + case tableRoutines: + // TODO: Fill the following tables. + case tableSchemaPrivileges: + case tableTablePrivileges: + case tableColumnPrivileges: + case tableParameters: + case tableEvents: + case tableGlobalStatus: + case tableGlobalVariables: + case tableSessionStatus: + case tableOptimizerTrace: + case tableTableSpaces: + case tableCollationCharacterSetApplicability: + fullRows = dataForCollationCharacterSetApplicability() + } + if err != nil { + return nil, err + } + if len(cols) == len(it.cols) { + return + } + rows := make([][]types.Datum, len(fullRows)) + for i, fullRow := range fullRows { + row := make([]types.Datum, len(cols)) + for j, col := range cols { + row[j] = fullRow[col.Offset] + } + rows[i] = row + } + return rows, nil +} + +// IterRecords implements table.Table IterRecords interface. +func (it *infoschemaTable) IterRecords(ctx sessionctx.Context, startKey kv.Key, cols []*table.Column, + fn table.RecordIterFunc) error { + if len(startKey) != 0 { + return table.ErrUnsupportedOp + } + rows, err := it.getRows(ctx, cols) + if err != nil { + return err + } + for i, row := range rows { + more, err := fn(int64(i), row, cols) + if err != nil { + return err + } + if !more { + break + } + } + return nil +} + +// RowWithCols implements table.Table RowWithCols interface. +func (it *infoschemaTable) RowWithCols(ctx sessionctx.Context, h int64, cols []*table.Column) ([]types.Datum, error) { + return nil, table.ErrUnsupportedOp +} + +// Row implements table.Table Row interface. +func (it *infoschemaTable) Row(ctx sessionctx.Context, h int64) ([]types.Datum, error) { + return nil, table.ErrUnsupportedOp +} + +// Cols implements table.Table Cols interface. +func (it *infoschemaTable) Cols() []*table.Column { + return it.cols +} + +// WritableCols implements table.Table WritableCols interface. +func (it *infoschemaTable) WritableCols() []*table.Column { + return it.cols +} + +// Indices implements table.Table Indices interface. +func (it *infoschemaTable) Indices() []table.Index { + return nil +} + +// WritableIndices implements table.Table WritableIndices interface. +func (it *infoschemaTable) WritableIndices() []table.Index { + return nil +} + +// DeletableIndices implements table.Table DeletableIndices interface. +func (it *infoschemaTable) DeletableIndices() []table.Index { + return nil +} + +// RecordPrefix implements table.Table RecordPrefix interface. +func (it *infoschemaTable) RecordPrefix() kv.Key { + return nil +} + +// IndexPrefix implements table.Table IndexPrefix interface. +func (it *infoschemaTable) IndexPrefix() kv.Key { + return nil +} + +// FirstKey implements table.Table FirstKey interface. +func (it *infoschemaTable) FirstKey() kv.Key { + return nil +} + +// RecordKey implements table.Table RecordKey interface. +func (it *infoschemaTable) RecordKey(h int64) kv.Key { + return nil +} + +// AddRecord implements table.Table AddRecord interface. +func (it *infoschemaTable) AddRecord(ctx sessionctx.Context, r []types.Datum, opts ...table.AddRecordOption) (recordID int64, err error) { + return 0, table.ErrUnsupportedOp +} + +// RemoveRecord implements table.Table RemoveRecord interface. +func (it *infoschemaTable) RemoveRecord(ctx sessionctx.Context, h int64, r []types.Datum) error { + return table.ErrUnsupportedOp +} + +// UpdateRecord implements table.Table UpdateRecord interface. +func (it *infoschemaTable) UpdateRecord(ctx sessionctx.Context, h int64, oldData, newData []types.Datum, touched []bool) error { + return table.ErrUnsupportedOp +} + +// AllocHandle implements table.Table AllocHandle interface. +func (it *infoschemaTable) AllocHandle(ctx sessionctx.Context) (int64, error) { + return 0, table.ErrUnsupportedOp +} + +// AllocHandleIDs implements table.Table AllocHandleIDs interface. +func (it *infoschemaTable) AllocHandleIDs(ctx sessionctx.Context, n uint64) (int64, int64, error) { + return 0, 0, table.ErrUnsupportedOp +} + +// Allocator implements table.Table Allocator interface. +func (it *infoschemaTable) Allocator(ctx sessionctx.Context) autoid.Allocator { + return nil +} + +// RebaseAutoID implements table.Table RebaseAutoID interface. +func (it *infoschemaTable) RebaseAutoID(ctx sessionctx.Context, newBase int64, isSetStep bool) error { + return table.ErrUnsupportedOp +} + +// Meta implements table.Table Meta interface. +func (it *infoschemaTable) Meta() *model.TableInfo { + return it.meta +} + +// GetPhysicalID implements table.Table GetPhysicalID interface. +func (it *infoschemaTable) GetPhysicalID() int64 { + return it.meta.ID +} + +// Seek implements table.Table Seek interface. +func (it *infoschemaTable) Seek(ctx sessionctx.Context, h int64) (int64, bool, error) { + return 0, false, table.ErrUnsupportedOp +} + +// Type implements table.Table Type interface. +func (it *infoschemaTable) Type() table.Type { + return it.tp +} + +// VirtualTable is a dummy table.Table implementation. +type VirtualTable struct{} + +// IterRecords implements table.Table IterRecords interface. +func (vt *VirtualTable) IterRecords(ctx sessionctx.Context, startKey kv.Key, cols []*table.Column, + fn table.RecordIterFunc) error { + if len(startKey) != 0 { + return table.ErrUnsupportedOp + } + return nil +} + +// RowWithCols implements table.Table RowWithCols interface. +func (vt *VirtualTable) RowWithCols(ctx sessionctx.Context, h int64, cols []*table.Column) ([]types.Datum, error) { + return nil, table.ErrUnsupportedOp +} + +// Row implements table.Table Row interface. +func (vt *VirtualTable) Row(ctx sessionctx.Context, h int64) ([]types.Datum, error) { + return nil, table.ErrUnsupportedOp +} + +// Cols implements table.Table Cols interface. +func (vt *VirtualTable) Cols() []*table.Column { + return nil +} + +// WritableCols implements table.Table WritableCols interface. +func (vt *VirtualTable) WritableCols() []*table.Column { + return nil +} + +// Indices implements table.Table Indices interface. +func (vt *VirtualTable) Indices() []table.Index { + return nil +} + +// WritableIndices implements table.Table WritableIndices interface. +func (vt *VirtualTable) WritableIndices() []table.Index { + return nil +} + +// DeletableIndices implements table.Table DeletableIndices interface. +func (vt *VirtualTable) DeletableIndices() []table.Index { + return nil +} + +// RecordPrefix implements table.Table RecordPrefix interface. +func (vt *VirtualTable) RecordPrefix() kv.Key { + return nil +} + +// IndexPrefix implements table.Table IndexPrefix interface. +func (vt *VirtualTable) IndexPrefix() kv.Key { + return nil +} + +// FirstKey implements table.Table FirstKey interface. +func (vt *VirtualTable) FirstKey() kv.Key { + return nil +} + +// RecordKey implements table.Table RecordKey interface. +func (vt *VirtualTable) RecordKey(h int64) kv.Key { + return nil +} + +// AddRecord implements table.Table AddRecord interface. +func (vt *VirtualTable) AddRecord(ctx sessionctx.Context, r []types.Datum, opts ...table.AddRecordOption) (recordID int64, err error) { + return 0, table.ErrUnsupportedOp +} + +// RemoveRecord implements table.Table RemoveRecord interface. +func (vt *VirtualTable) RemoveRecord(ctx sessionctx.Context, h int64, r []types.Datum) error { + return table.ErrUnsupportedOp +} + +// UpdateRecord implements table.Table UpdateRecord interface. +func (vt *VirtualTable) UpdateRecord(ctx sessionctx.Context, h int64, oldData, newData []types.Datum, touched []bool) error { + return table.ErrUnsupportedOp +} + +// AllocHandle implements table.Table AllocHandle interface. +func (vt *VirtualTable) AllocHandle(ctx sessionctx.Context) (int64, error) { + return 0, table.ErrUnsupportedOp +} + +// AllocHandleIDs implements table.Table AllocHandleIDs interface. +func (vt *VirtualTable) AllocHandleIDs(ctx sessionctx.Context, n uint64) (int64, int64, error) { + return 0, 0, table.ErrUnsupportedOp +} + +// Allocator implements table.Table Allocator interface. +func (vt *VirtualTable) Allocator(ctx sessionctx.Context) autoid.Allocator { + return nil +} + +// RebaseAutoID implements table.Table RebaseAutoID interface. +func (vt *VirtualTable) RebaseAutoID(ctx sessionctx.Context, newBase int64, isSetStep bool) error { + return table.ErrUnsupportedOp +} + +// Meta implements table.Table Meta interface. +func (vt *VirtualTable) Meta() *model.TableInfo { + return nil +} + +// GetPhysicalID implements table.Table GetPhysicalID interface. +func (vt *VirtualTable) GetPhysicalID() int64 { + return 0 +} + +// Seek implements table.Table Seek interface. +func (vt *VirtualTable) Seek(ctx sessionctx.Context, h int64) (int64, bool, error) { + return 0, false, table.ErrUnsupportedOp +} + +// Type implements table.Table Type interface. +func (vt *VirtualTable) Type() table.Type { + return table.VirtualTable +} diff --git a/infoschema/tables_test.go b/infoschema/tables_test.go new file mode 100644 index 0000000..5bba8ff --- /dev/null +++ b/infoschema/tables_test.go @@ -0,0 +1,74 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package infoschema_test + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/util/testkit" + "github.com/pingcap/tidb/util/testleak" +) + +var _ = Suite(&testTableSuite{}) + +type testTableSuite struct { + store kv.Storage + dom *domain.Domain +} + +func (s *testTableSuite) SetUpSuite(c *C) { + testleak.BeforeTest() + + var err error + s.store, err = mockstore.NewMockTikvStore() + c.Assert(err, IsNil) + session.DisableStats4Test() + s.dom, err = session.BootstrapSession(s.store) + c.Assert(err, IsNil) +} + +func (s *testTableSuite) TearDownSuite(c *C) { + s.dom.Close() + s.store.Close() + testleak.AfterTest(c)() +} + +func (s *testTableSuite) TestColumnStatistics(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustQuery("select * from information_schema.column_statistics").Check(testkit.Rows()) +} + +func (s *testTableSuite) TestReloadDropDatabase(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("create database test_dbs") + tk.MustExec("use test_dbs") + tk.MustExec("create table t1 (a int)") + tk.MustExec("create table t2 (a int)") + tk.MustExec("create table t3 (a int)") + is := domain.GetDomain(tk.Se).InfoSchema() + t2, err := is.TableByName(model.NewCIStr("test_dbs"), model.NewCIStr("t2")) + c.Assert(err, IsNil) + tk.MustExec("drop database test_dbs") + is = domain.GetDomain(tk.Se).InfoSchema() + _, err = is.TableByName(model.NewCIStr("test_dbs"), model.NewCIStr("t2")) + c.Assert(terror.ErrorEqual(infoschema.ErrTableNotExists, err), IsTrue) + _, ok := is.TableByID(t2.Meta().ID) + c.Assert(ok, IsFalse) +} diff --git a/kv/buffer_store.go b/kv/buffer_store.go new file mode 100644 index 0000000..04ddf6b --- /dev/null +++ b/kv/buffer_store.go @@ -0,0 +1,115 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package kv + +import ( + "context" +) + +var ( + // DefaultTxnMembufCap is the default transaction membuf capability. + DefaultTxnMembufCap = 4 * 1024 + // TempTxnMemBufCap is the capability of temporary membuf. + TempTxnMemBufCap = 64 +) + +// BufferStore wraps a Retriever for read and a MemBuffer for buffered write. +// Common usage pattern: +// bs := NewBufferStore(r) // use BufferStore to wrap a Retriever +// // ... +// // read/write on bs +// // ... +// bs.SaveTo(m) // save above operations to a Mutator +type BufferStore struct { + MemBuffer + r Retriever +} + +// NewBufferStore creates a BufferStore using r for read. +func NewBufferStore(r Retriever, cap int) *BufferStore { + if cap <= 0 { + cap = DefaultTxnMembufCap + } + return &BufferStore{ + r: r, + MemBuffer: &lazyMemBuffer{cap: cap}, + } +} + +// Reset resets s.MemBuffer. +func (s *BufferStore) Reset() { + s.MemBuffer.Reset() +} + +// SetCap sets the MemBuffer capability. +func (s *BufferStore) SetCap(cap int) { + s.MemBuffer.SetCap(cap) +} + +// Get implements the Retriever interface. +func (s *BufferStore) Get(ctx context.Context, k Key) ([]byte, error) { + val, err := s.MemBuffer.Get(ctx, k) + if IsErrNotFound(err) { + val, err = s.r.Get(ctx, k) + } + if err != nil { + return nil, err + } + if len(val) == 0 { + return nil, ErrNotExist + } + return val, nil +} + +// Iter implements the Retriever interface. +func (s *BufferStore) Iter(k Key, upperBound Key) (Iterator, error) { + bufferIt, err := s.MemBuffer.Iter(k, upperBound) + if err != nil { + return nil, err + } + retrieverIt, err := s.r.Iter(k, upperBound) + if err != nil { + return nil, err + } + return NewUnionIter(bufferIt, retrieverIt, false) +} + +// IterReverse implements the Retriever interface. +func (s *BufferStore) IterReverse(k Key) (Iterator, error) { + bufferIt, err := s.MemBuffer.IterReverse(k) + if err != nil { + return nil, err + } + retrieverIt, err := s.r.IterReverse(k) + if err != nil { + return nil, err + } + return NewUnionIter(bufferIt, retrieverIt, true) +} + +// WalkBuffer iterates all buffered kv pairs. +func (s *BufferStore) WalkBuffer(f func(k Key, v []byte) error) error { + return WalkMemBuffer(s.MemBuffer, f) +} + +// SaveTo saves all buffered kv pairs into a Mutator. +func (s *BufferStore) SaveTo(m Mutator) error { + err := s.WalkBuffer(func(k Key, v []byte) error { + if len(v) == 0 { + return m.Delete(k) + } + return m.Set(k, v) + }) + return err +} diff --git a/kv/buffer_store_test.go b/kv/buffer_store_test.go new file mode 100644 index 0000000..b7d63dc --- /dev/null +++ b/kv/buffer_store_test.go @@ -0,0 +1,89 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package kv + +import ( + "bytes" + "context" + "fmt" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/terror" +) + +type testBufferStoreSuite struct{} + +var _ = Suite(testBufferStoreSuite{}) + +func (s testBufferStoreSuite) TestGetSet(c *C) { + bs := NewBufferStore(&mockSnapshot{NewMemDbBuffer(DefaultTxnMembufCap)}, DefaultTxnMembufCap) + key := Key("key") + _, err := bs.Get(context.TODO(), key) + c.Check(err, NotNil) + + err = bs.Set(key, []byte("value")) + c.Check(err, IsNil) + + value, err := bs.Get(context.TODO(), key) + c.Check(err, IsNil) + c.Check(bytes.Compare(value, []byte("value")), Equals, 0) +} + +func (s testBufferStoreSuite) TestSaveTo(c *C) { + bs := NewBufferStore(&mockSnapshot{NewMemDbBuffer(DefaultTxnMembufCap)}, DefaultTxnMembufCap) + var buf bytes.Buffer + for i := 0; i < 10; i++ { + fmt.Fprint(&buf, i) + err := bs.Set(buf.Bytes(), buf.Bytes()) + c.Check(err, IsNil) + buf.Reset() + } + err := bs.Set(Key("novalue"), []byte("novalue")) + c.Check(err, IsNil) + + mutator := NewMemDbBuffer(DefaultTxnMembufCap) + err = bs.SaveTo(mutator) + c.Check(err, IsNil) + + iter, err := mutator.Iter(nil, nil) + c.Check(err, IsNil) + for iter.Valid() { + cmp := bytes.Compare(iter.Key(), iter.Value()) + c.Check(cmp, Equals, 0) + err = iter.Next() + c.Check(err, IsNil) + } +} + +func (s testBufferStoreSuite) TestBufferStore(c *C) { + bs := NewBufferStore(&mockSnapshot{NewMemDbBuffer(DefaultTxnMembufCap)}, -1) + bs.SetCap(10) + key := Key("key") + err := bs.Set(key, []byte("value")) + c.Check(err, IsNil) + + err = bs.Set(key, []byte("")) + c.Check(terror.ErrorEqual(err, ErrCannotSetNilValue), IsTrue) + + err = bs.Delete(key) + c.Check(err, IsNil) + + _, err = bs.Get(context.TODO(), key) + c.Check(terror.ErrorEqual(err, ErrNotExist), IsTrue) + + bs.Reset() + _, err = bs.Get(context.TODO(), key) + c.Check(terror.ErrorEqual(err, ErrNotExist), IsTrue) + +} diff --git a/kv/checker.go b/kv/checker.go new file mode 100644 index 0000000..038f3d4 --- /dev/null +++ b/kv/checker.go @@ -0,0 +1,56 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package kv + +import "github.com/pingcap/tipb/go-tipb" + +// RequestTypeSupportedChecker is used to check expression can be pushed down. +type RequestTypeSupportedChecker struct{} + +// IsRequestTypeSupported checks whether reqType is supported. +func (d RequestTypeSupportedChecker) IsRequestTypeSupported(reqType, subType int64) bool { + switch reqType { + case ReqTypeSelect, ReqTypeIndex: + switch subType { + case ReqSubTypeGroupBy, ReqSubTypeBasic, ReqSubTypeTopN: + return true + default: + return d.supportExpr(tipb.ExprType(subType)) + } + case ReqTypeDAG: + return d.supportExpr(tipb.ExprType(subType)) + case ReqTypeAnalyze: + return true + } + return false +} + +func (d RequestTypeSupportedChecker) supportExpr(exprType tipb.ExprType) bool { + switch exprType { + case tipb.ExprType_Null, tipb.ExprType_Int64, tipb.ExprType_Uint64, tipb.ExprType_String, tipb.ExprType_Bytes, + tipb.ExprType_MysqlDuration, tipb.ExprType_MysqlTime, tipb.ExprType_MysqlDecimal, + tipb.ExprType_Float32, tipb.ExprType_Float64, tipb.ExprType_ColumnRef: + return true + // aggregate functions. + case tipb.ExprType_Count, tipb.ExprType_First, tipb.ExprType_Max, tipb.ExprType_Min, tipb.ExprType_Sum, tipb.ExprType_Avg, + tipb.ExprType_Agg_BitXor, tipb.ExprType_Agg_BitAnd, tipb.ExprType_Agg_BitOr: + return true + case ReqSubTypeDesc: + return true + case ReqSubTypeSignature: + return true + default: + return false + } +} diff --git a/kv/checker_test.go b/kv/checker_test.go new file mode 100644 index 0000000..4f36380 --- /dev/null +++ b/kv/checker_test.go @@ -0,0 +1,39 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package kv_test + +import ( + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/kv" +) + +type checkerSuite struct{} + +func TestIsRequestTypeSupported(t *testing.T) { + TestingT(t) +} + +var _ = Suite(&checkerSuite{}) + +func (s checkerSuite) TestIsRequestTypeSupported(c *C) { + checker := kv.RequestTypeSupportedChecker{}.IsRequestTypeSupported + c.Assert(checker(kv.ReqTypeSelect, kv.ReqSubTypeGroupBy), IsTrue) + c.Assert(checker(kv.ReqTypeDAG, kv.ReqSubTypeSignature), IsTrue) + c.Assert(checker(kv.ReqTypeDAG, kv.ReqSubTypeDesc), IsTrue) + c.Assert(checker(kv.ReqTypeDAG, kv.ReqSubTypeSignature), IsTrue) + c.Assert(checker(kv.ReqTypeDAG, kv.ReqSubTypeAnalyzeIdx), IsFalse) + c.Assert(checker(kv.ReqTypeAnalyze, 0), IsTrue) +} diff --git a/kv/error.go b/kv/error.go new file mode 100644 index 0000000..207232f --- /dev/null +++ b/kv/error.go @@ -0,0 +1,83 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package kv + +import ( + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" +) + +// TxnRetryableMark is used to uniform the commit error messages which could retry the transaction. +// *WARNING*: changing this string will affect the backward compatibility. +const TxnRetryableMark = "[try again later]" + +var ( + // ErrNotExist is used when try to get an entry with an unexist key from KV store. + ErrNotExist = terror.ClassKV.New(mysql.ErrNotExist, mysql.MySQLErrName[mysql.ErrNotExist]) + // ErrTxnRetryable is used when KV store occurs retryable error which SQL layer can safely retry the transaction. + // When using TiKV as the storage node, the error is returned ONLY when lock not found (txnLockNotFound) in Commit, + // subject to change it in the future. + ErrTxnRetryable = terror.ClassKV.New(mysql.ErrTxnRetryable, + mysql.MySQLErrName[mysql.ErrTxnRetryable]+TxnRetryableMark) + // ErrCannotSetNilValue is the error when sets an empty value. + ErrCannotSetNilValue = terror.ClassKV.New(mysql.ErrCannotSetNilValue, mysql.MySQLErrName[mysql.ErrCannotSetNilValue]) + // ErrInvalidTxn is the error when commits or rollbacks in an invalid transaction. + ErrInvalidTxn = terror.ClassKV.New(mysql.ErrInvalidTxn, mysql.MySQLErrName[mysql.ErrInvalidTxn]) + // ErrTxnTooLarge is the error when transaction is too large, lock time reached the maximum value. + ErrTxnTooLarge = terror.ClassKV.New(mysql.ErrTxnTooLarge, mysql.MySQLErrName[mysql.ErrTxnTooLarge]) + // ErrEntryTooLarge is the error when a key value entry is too large. + ErrEntryTooLarge = terror.ClassKV.New(mysql.ErrEntryTooLarge, mysql.MySQLErrName[mysql.ErrEntryTooLarge]) + // ErrKeyExists returns when key is already exist. + ErrKeyExists = terror.ClassKV.New(mysql.ErrDupEntry, mysql.MySQLErrName[mysql.ErrDupEntry]) + // ErrNotImplemented returns when a function is not implemented yet. + ErrNotImplemented = terror.ClassKV.New(mysql.ErrNotImplemented, mysql.MySQLErrName[mysql.ErrNotImplemented]) + // ErrWriteConflict is the error when the commit meets an write conflict error. + ErrWriteConflict = terror.ClassKV.New(mysql.ErrWriteConflict, + mysql.MySQLErrName[mysql.ErrWriteConflict]+" "+TxnRetryableMark) +) + +func init() { + kvMySQLErrCodes := map[terror.ErrCode]uint16{ + mysql.ErrNotExist: mysql.ErrNotExist, + mysql.ErrDupEntry: mysql.ErrDupEntry, + mysql.ErrTooBigRowsize: mysql.ErrTooBigRowsize, + mysql.ErrTxnTooLarge: mysql.ErrTxnTooLarge, + mysql.ErrTxnRetryable: mysql.ErrTxnRetryable, + mysql.ErrWriteConflict: mysql.ErrWriteConflict, + mysql.ErrWriteConflictInTiDB: mysql.ErrWriteConflictInTiDB, + mysql.ErrCannotSetNilValue: mysql.ErrCannotSetNilValue, + mysql.ErrInvalidTxn: mysql.ErrInvalidTxn, + mysql.ErrEntryTooLarge: mysql.ErrEntryTooLarge, + mysql.ErrNotImplemented: mysql.ErrNotImplemented, + } + terror.ErrClassToMySQLCodes[terror.ClassKV] = kvMySQLErrCodes +} + +// IsTxnRetryableError checks if the error could safely retry the transaction. +func IsTxnRetryableError(err error) bool { + if err == nil { + return false + } + + if ErrTxnRetryable.Equal(err) || ErrWriteConflict.Equal(err) { + return true + } + + return false +} + +// IsErrNotFound checks if err is a kind of NotFound error. +func IsErrNotFound(err error) bool { + return ErrNotExist.Equal(err) +} diff --git a/kv/error_test.go b/kv/error_test.go new file mode 100644 index 0000000..9e73cb8 --- /dev/null +++ b/kv/error_test.go @@ -0,0 +1,42 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package kv + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" +) + +type testErrorSuite struct{} + +var _ = Suite(testErrorSuite{}) + +func (s testErrorSuite) TestError(c *C) { + kvErrs := []*terror.Error{ + ErrNotExist, + ErrTxnRetryable, + ErrCannotSetNilValue, + ErrInvalidTxn, + ErrTxnTooLarge, + ErrEntryTooLarge, + ErrKeyExists, + ErrNotImplemented, + ErrWriteConflict, + } + for _, err := range kvErrs { + code := err.ToSQLError().Code + c.Assert(code != mysql.ErrUnknown && code == uint16(err.Code()), IsTrue, Commentf("err: %v", err)) + } +} diff --git a/kv/fault_injection.go b/kv/fault_injection.go new file mode 100644 index 0000000..c0bae0c --- /dev/null +++ b/kv/fault_injection.go @@ -0,0 +1,124 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package kv + +import ( + "context" + "sync" +) + +// InjectionConfig is used for fault injections for KV components. +type InjectionConfig struct { + sync.RWMutex + getError error // kv.Get() always return this error. + commitError error // Transaction.Commit() always return this error. +} + +// SetGetError injects an error for all kv.Get() methods. +func (c *InjectionConfig) SetGetError(err error) { + c.Lock() + defer c.Unlock() + + c.getError = err +} + +// SetCommitError injects an error for all Transaction.Commit() methods. +func (c *InjectionConfig) SetCommitError(err error) { + c.Lock() + defer c.Unlock() + c.commitError = err +} + +// InjectedStore wraps a Storage with injections. +type InjectedStore struct { + Storage + cfg *InjectionConfig +} + +// NewInjectedStore creates a InjectedStore with config. +func NewInjectedStore(store Storage, cfg *InjectionConfig) Storage { + return &InjectedStore{ + Storage: store, + cfg: cfg, + } +} + +// Begin creates an injected Transaction. +func (s *InjectedStore) Begin() (Transaction, error) { + txn, err := s.Storage.Begin() + return &InjectedTransaction{ + Transaction: txn, + cfg: s.cfg, + }, err +} + +// BeginWithStartTS creates an injected Transaction with startTS. +func (s *InjectedStore) BeginWithStartTS(startTS uint64) (Transaction, error) { + txn, err := s.Storage.BeginWithStartTS(startTS) + return &InjectedTransaction{ + Transaction: txn, + cfg: s.cfg, + }, err +} + +// GetSnapshot creates an injected Snapshot. +func (s *InjectedStore) GetSnapshot(ver Version) (Snapshot, error) { + snapshot, err := s.Storage.GetSnapshot(ver) + return &InjectedSnapshot{ + Snapshot: snapshot, + cfg: s.cfg, + }, err +} + +// InjectedTransaction wraps a Transaction with injections. +type InjectedTransaction struct { + Transaction + cfg *InjectionConfig +} + +// Get returns an error if cfg.getError is set. +func (t *InjectedTransaction) Get(ctx context.Context, k Key) ([]byte, error) { + t.cfg.RLock() + defer t.cfg.RUnlock() + if t.cfg.getError != nil { + return nil, t.cfg.getError + } + return t.Transaction.Get(ctx, k) +} + +// Commit returns an error if cfg.commitError is set. +func (t *InjectedTransaction) Commit(ctx context.Context) error { + t.cfg.RLock() + defer t.cfg.RUnlock() + if t.cfg.commitError != nil { + return t.cfg.commitError + } + return t.Transaction.Commit(ctx) +} + +// InjectedSnapshot wraps a Snapshot with injections. +type InjectedSnapshot struct { + Snapshot + cfg *InjectionConfig +} + +// Get returns an error if cfg.getError is set. +func (t *InjectedSnapshot) Get(ctx context.Context, k Key) ([]byte, error) { + t.cfg.RLock() + defer t.cfg.RUnlock() + if t.cfg.getError != nil { + return nil, t.cfg.getError + } + return t.Snapshot.Get(ctx, k) +} diff --git a/kv/fault_injection_test.go b/kv/fault_injection_test.go new file mode 100644 index 0000000..ab88ba6 --- /dev/null +++ b/kv/fault_injection_test.go @@ -0,0 +1,70 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package kv_test + +import ( + "context" + + . "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/terror" +) + +type testFaultInjectionSuite struct{} + +var _ = Suite(testFaultInjectionSuite{}) + +func (s testFaultInjectionSuite) TestFaultInjectionBasic(c *C) { + var cfg kv.InjectionConfig + err1 := errors.New("foo") + cfg.SetGetError(err1) + cfg.SetCommitError(err1) + + storage := kv.NewInjectedStore(kv.NewMockStorage(), &cfg) + txn, err := storage.Begin() + c.Assert(err, IsNil) + _, err = storage.BeginWithStartTS(0) + c.Assert(err, IsNil) + ver := kv.Version{Ver: 1} + snap, err := storage.GetSnapshot(ver) + c.Assert(err, IsNil) + b, err := txn.Get(context.TODO(), []byte{'a'}) + c.Assert(err.Error(), Equals, err1.Error()) + c.Assert(b, IsNil) + b, err = snap.Get(context.TODO(), []byte{'a'}) + c.Assert(err.Error(), Equals, err1.Error()) + c.Assert(b, IsNil) + + err = txn.Commit(context.Background()) + c.Assert(err.Error(), Equals, err1.Error()) + + cfg.SetGetError(nil) + cfg.SetCommitError(nil) + + storage = kv.NewInjectedStore(kv.NewMockStorage(), &cfg) + txn, err = storage.Begin() + c.Assert(err, IsNil) + snap, err = storage.GetSnapshot(ver) + c.Assert(err, IsNil) + + b, err = txn.Get(context.TODO(), []byte{'a'}) + c.Assert(err, IsNil) + c.Assert(b, IsNil) + + err = txn.Commit(context.Background()) + c.Assert(err, NotNil) + c.Assert(terror.ErrorEqual(err, kv.ErrTxnRetryable), IsTrue) + +} diff --git a/kv/iter.go b/kv/iter.go new file mode 100644 index 0000000..6cfb400 --- /dev/null +++ b/kv/iter.go @@ -0,0 +1,27 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package kv + +// NextUntil applies FnKeyCmp to each entry of the iterator until meets some condition. +// It will stop when fn returns true, or iterator is invalid or an error occurs. +func NextUntil(it Iterator, fn FnKeyCmp) error { + var err error + for it.Valid() && !fn(it.Key()) { + err = it.Next() + if err != nil { + return err + } + } + return nil +} diff --git a/kv/key.go b/kv/key.go new file mode 100644 index 0000000..ff01505 --- /dev/null +++ b/kv/key.go @@ -0,0 +1,121 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package kv + +import ( + "bytes" + "encoding/hex" +) + +// Key represents high-level Key type. +type Key []byte + +// Next returns the next key in byte-order. +func (k Key) Next() Key { + // add 0x0 to the end of key + buf := make([]byte, len(k)+1) + copy(buf, []byte(k)) + return buf +} + +// PrefixNext returns the next prefix key. +// +// Assume there are keys like: +// +// rowkey1 +// rowkey1_column1 +// rowkey1_column2 +// rowKey2 +// +// If we seek 'rowkey1' Next, we will get 'rowkey1_column1'. +// If we seek 'rowkey1' PrefixNext, we will get 'rowkey2'. +func (k Key) PrefixNext() Key { + buf := make([]byte, len(k)) + copy(buf, []byte(k)) + var i int + for i = len(k) - 1; i >= 0; i-- { + buf[i]++ + if buf[i] != 0 { + break + } + } + if i == -1 { + copy(buf, k) + buf = append(buf, 0) + } + return buf +} + +// Cmp returns the comparison result of two key. +// The result will be 0 if a==b, -1 if a < b, and +1 if a > b. +func (k Key) Cmp(another Key) int { + return bytes.Compare(k, another) +} + +// HasPrefix tests whether the Key begins with prefix. +func (k Key) HasPrefix(prefix Key) bool { + return bytes.HasPrefix(k, prefix) +} + +// Clone returns a deep copy of the Key. +func (k Key) Clone() Key { + ck := make([]byte, len(k)) + copy(ck, []byte(k)) + return ck +} + +// String implements fmt.Stringer interface. +func (k Key) String() string { + return hex.EncodeToString(k) +} + +// KeyRange represents a range where StartKey <= key < EndKey. +type KeyRange struct { + StartKey Key + EndKey Key +} + +// IsPoint checks if the key range represents a point. +func (r *KeyRange) IsPoint() bool { + if len(r.StartKey) != len(r.EndKey) { + // Works like + // return bytes.Equal(r.StartKey.Next(), r.EndKey) + + startLen := len(r.StartKey) + return startLen+1 == len(r.EndKey) && + r.EndKey[startLen] == 0 && + bytes.Equal(r.StartKey, r.EndKey[:startLen]) + } + // Works like + // return bytes.Equal(r.StartKey.PrefixNext(), r.EndKey) + + i := len(r.StartKey) - 1 + for ; i >= 0; i-- { + if r.StartKey[i] != 255 { + break + } + if r.EndKey[i] != 0 { + return false + } + } + if i < 0 { + // In case all bytes in StartKey are 255. + return false + } + // The byte at diffIdx in StartKey should be one less than the byte at diffIdx in EndKey. + // And bytes in StartKey and EndKey before diffIdx should be equal. + diffOneIdx := i + return r.StartKey[diffOneIdx]+1 == r.EndKey[diffOneIdx] && + bytes.Equal(r.StartKey[:diffOneIdx], r.EndKey[:diffOneIdx]) +} diff --git a/kv/key_test.go b/kv/key_test.go new file mode 100644 index 0000000..411743e --- /dev/null +++ b/kv/key_test.go @@ -0,0 +1,126 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package kv + +import ( + "bytes" + "errors" + "testing" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/codec" + "github.com/pingcap/tidb/util/testleak" +) + +var _ = Suite(&testKeySuite{}) + +type testKeySuite struct { +} + +func (s *testKeySuite) TestPartialNext(c *C) { + defer testleak.AfterTest(c)() + sc := &stmtctx.StatementContext{TimeZone: time.Local} + // keyA represents a multi column index. + keyA, err := codec.EncodeValue(sc, nil, types.NewDatum("abc"), types.NewDatum("def")) + c.Check(err, IsNil) + keyB, err := codec.EncodeValue(sc, nil, types.NewDatum("abca"), types.NewDatum("def")) + c.Check(err, IsNil) + + // We only use first column value to seek. + seekKey, err := codec.EncodeValue(sc, nil, types.NewDatum("abc")) + c.Check(err, IsNil) + + nextKey := Key(seekKey).Next() + cmp := bytes.Compare(nextKey, keyA) + c.Assert(cmp, Equals, -1) + + // Use next partial key, we can skip all index keys with first column value equal to "abc". + nextPartialKey := Key(seekKey).PrefixNext() + cmp = bytes.Compare(nextPartialKey, keyA) + c.Assert(cmp, Equals, 1) + + cmp = bytes.Compare(nextPartialKey, keyB) + c.Assert(cmp, Equals, -1) +} + +func (s *testKeySuite) TestIsPoint(c *C) { + tests := []struct { + start []byte + end []byte + isPoint bool + }{ + { + start: Key("rowkey1"), + end: Key("rowkey2"), + isPoint: true, + }, + { + start: Key("rowkey1"), + end: Key("rowkey3"), + isPoint: false, + }, + { + start: Key(""), + end: []byte{0}, + isPoint: true, + }, + { + start: []byte{123, 123, 255, 255}, + end: []byte{123, 124, 0, 0}, + isPoint: true, + }, + { + start: []byte{123, 123, 255, 255}, + end: []byte{123, 124, 0, 1}, + isPoint: false, + }, + { + start: []byte{123, 123}, + end: []byte{123, 123, 0}, + isPoint: true, + }, + { + start: []byte{255}, + end: []byte{0}, + isPoint: false, + }, + } + for _, tt := range tests { + kr := KeyRange{ + StartKey: tt.start, + EndKey: tt.end, + } + c.Check(kr.IsPoint(), Equals, tt.isPoint) + } +} + +func (s *testKeySuite) TestBasicFunc(c *C) { + c.Assert(IsTxnRetryableError(nil), IsFalse) + c.Assert(IsTxnRetryableError(ErrTxnRetryable), IsTrue) + c.Assert(IsTxnRetryableError(errors.New("test")), IsFalse) +} + +func BenchmarkIsPoint(b *testing.B) { + b.ReportAllocs() + kr := KeyRange{ + StartKey: []byte("rowkey1"), + EndKey: []byte("rowkey2"), + } + for i := 0; i < b.N; i++ { + kr.IsPoint() + } +} diff --git a/kv/kv.go b/kv/kv.go new file mode 100644 index 0000000..3714ff7 --- /dev/null +++ b/kv/kv.go @@ -0,0 +1,310 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package kv + +import ( + "context" + "time" + + "github.com/pingcap/tidb/config" + "github.com/pingcap/tidb/store/tikv/oracle" +) + +// Transaction options +const ( + // PresumeKeyNotExists indicates that when dealing with a Get operation but failing to read data from cache, + // we presume that the key does not exist in Store. The actual existence will be checked before the + // transaction's commit. + // This option is an optimization for frequent checks during a transaction, e.g. batch inserts. + PresumeKeyNotExists Option = iota + 1 + // PresumeKeyNotExistsError is the option key for error. + // When PresumeKeyNotExists is set and condition is not match, should throw the error. + PresumeKeyNotExistsError + // SchemaChecker is used for checking schema-validity. + SchemaChecker + // IsolationLevel sets isolation level for current transaction. The default level is SI. + IsolationLevel + // Priority marks the priority of this transaction. + Priority + // NotFillCache makes this request do not touch the LRU cache of the underlying storage. + NotFillCache + // SyncLog decides whether the WAL(write-ahead log) of this request should be synchronized. + SyncLog + // KeyOnly retrieve only keys, it can be used in scan now. + KeyOnly + // SnapshotTS is defined to set snapshot ts. + SnapshotTS + // Set replica read + ReplicaRead +) + +// Priority value for transaction priority. +const ( + PriorityNormal = iota + PriorityLow + PriorityHigh +) + +// UnCommitIndexKVFlag uses to indicate the index key/value is no need to commit. +// This is used in the situation of the index key/value was unchanged when do update. +// Usage: +// 1. For non-unique index: normally, the index value is '0'. +// Change the value to '1' indicate the index key/value is no need to commit. +// 2. For unique index: normally, the index value is the record handle ID, 8 bytes. +// Append UnCommitIndexKVFlag to the value indicate the index key/value is no need to commit. +const UnCommitIndexKVFlag byte = '1' + +// MaxTxnTimeUse is the max time a Txn may use (in ms) from its begin to commit. +// We use it to abort the transaction to guarantee GC worker will not influence it. +const MaxTxnTimeUse = 24 * 60 * 60 * 1000 + +// IsoLevel is the transaction's isolation level. +type IsoLevel int + +const ( + // SI stands for 'snapshot isolation'. + SI IsoLevel = iota + // RC stands for 'read committed'. + RC +) + +// ReplicaReadType is the type of replica to read data from +type ReplicaReadType byte + +const ( + // ReplicaReadLeader stands for 'read from leader'. + ReplicaReadLeader ReplicaReadType = 1 << iota + // ReplicaReadFollower stands for 'read from follower'. + ReplicaReadFollower +) + +// IsFollowerRead checks if leader is going to be used to read data. +func (r ReplicaReadType) IsFollowerRead() bool { + return r == ReplicaReadFollower +} + +// Those limits is enforced to make sure the transaction can be well handled by TiKV. +var ( + // TxnEntrySizeLimit is limit of single entry size (len(key) + len(value)). + TxnEntrySizeLimit = 6 * 1024 * 1024 + // TxnTotalSizeLimit is limit of the sum of all entry size. + TxnTotalSizeLimit uint64 = config.DefTxnTotalSizeLimit +) + +// Retriever is the interface wraps the basic Get and Seek methods. +type Retriever interface { + // Get gets the value for key k from kv store. + // If corresponding kv pair does not exist, it returns nil and ErrNotExist. + Get(ctx context.Context, k Key) ([]byte, error) + // Iter creates an Iterator positioned on the first entry that k <= entry's key. + // If such entry is not found, it returns an invalid Iterator with no error. + // It yields only keys that < upperBound. If upperBound is nil, it means the upperBound is unbounded. + // The Iterator must be Closed after use. + Iter(k Key, upperBound Key) (Iterator, error) + + // IterReverse creates a reversed Iterator positioned on the first entry which key is less than k. + // The returned iterator will iterate from greater key to smaller key. + // If k is nil, the returned iterator will be positioned at the last key. + // TODO: Add lower bound limit + IterReverse(k Key) (Iterator, error) +} + +// Mutator is the interface wraps the basic Set and Delete methods. +type Mutator interface { + // Set sets the value for key k as v into kv store. + // v must NOT be nil or empty, otherwise it returns ErrCannotSetNilValue. + Set(k Key, v []byte) error + // Delete removes the entry for key k from kv store. + Delete(k Key) error +} + +// RetrieverMutator is the interface that groups Retriever and Mutator interfaces. +type RetrieverMutator interface { + Retriever + Mutator +} + +// MemBuffer is an in-memory kv collection, can be used to buffer write operations. +type MemBuffer interface { + RetrieverMutator + // Size returns sum of keys and values length. + Size() int + // Len returns the number of entries in the DB. + Len() int + // Reset cleanup the MemBuffer + Reset() + // SetCap sets the MemBuffer capability, to reduce memory allocations. + // Please call it before you use the MemBuffer, otherwise it will not works. + SetCap(cap int) +} + +// Transaction defines the interface for operations inside a Transaction. +// This is not thread safe. +type Transaction interface { + MemBuffer + // Commit commits the transaction operations to KV store. + Commit(context.Context) error + // Rollback undoes the transaction operations to KV store. + Rollback() error + // String implements fmt.Stringer interface. + String() string + // LockKeys tries to lock the entries with the keys in KV store. + LockKeys(ctx context.Context, lockCtx *LockCtx, keys ...Key) error + // SetOption sets an option with a value, when val is nil, uses the default + // value of this option. + SetOption(opt Option, val interface{}) + // DelOption deletes an option. + DelOption(opt Option) + // IsReadOnly checks if the transaction has only performed read operations. + IsReadOnly() bool + // StartTS returns the transaction start timestamp. + StartTS() uint64 + // Valid returns if the transaction is valid. + // A transaction become invalid after commit or rollback. + Valid() bool + // GetMemBuffer return the MemBuffer binding to this transaction. + GetMemBuffer() MemBuffer + // SetVars sets variables to the transaction. + SetVars(vars *Variables) +} + +// LockCtx contains information for LockKeys method. +type LockCtx struct { + Killed *uint32 + ForUpdateTS uint64 +} + +// Client is used to send request to KV layer. +type Client interface { + // Send sends request to KV layer, returns a Response. + Send(ctx context.Context, req *Request, vars *Variables) Response + + // IsRequestTypeSupported checks if reqType and subType is supported. + IsRequestTypeSupported(reqType, subType int64) bool +} + +// ReqTypes. +const ( + ReqTypeSelect = 101 + ReqTypeIndex = 102 + ReqTypeDAG = 103 + ReqTypeAnalyze = 104 + + ReqSubTypeBasic = 0 + ReqSubTypeDesc = 10000 + ReqSubTypeGroupBy = 10001 + ReqSubTypeTopN = 10002 + ReqSubTypeSignature = 10003 + ReqSubTypeAnalyzeIdx = 10004 +) + +// Request represents a kv request. +type Request struct { + // Tp is the request type. + Tp int64 + StartTs uint64 + Data []byte + KeyRanges []KeyRange + + // Concurrency is 1, if it only sends the request to a single storage unit when + // ResponseIterator.Next is called. If concurrency is greater than 1, the request will be + // sent to multiple storage units concurrently. + Concurrency int + // IsolationLevel is the isolation level, default is SI. + IsolationLevel IsoLevel + // KeepOrder is true, if the response should be returned in order. + KeepOrder bool + // Desc is true, if the request is sent in descending order. + Desc bool + // NotFillCache makes this request do not touch the LRU cache of the underlying storage. + NotFillCache bool + // SyncLog decides whether the WAL(write-ahead log) of this request should be synchronized. + SyncLog bool + // ReplicaRead is used for reading data from replicas, only follower is supported at this time. + ReplicaRead ReplicaReadType +} + +// ResultSubset represents a result subset from a single storage unit. +// TODO: Find a better interface for ResultSubset that can reuse bytes. +type ResultSubset interface { + // GetData gets the data. + GetData() []byte + // MemSize returns how many bytes of memory this result use for tracing memory usage. + MemSize() int64 + // RespTime returns the response time for the request. + RespTime() time.Duration +} + +// Response represents the response returned from KV layer. +type Response interface { + // Next returns a resultSubset from a single storage unit. + // When full result set is returned, nil is returned. + Next(ctx context.Context) (resultSubset ResultSubset, err error) + // Close response. + Close() error +} + +// Snapshot defines the interface for the snapshot fetched from KV store. +type Snapshot interface { + Retriever +} + +// Driver is the interface that must be implemented by a KV storage. +type Driver interface { + // Open returns a new Storage. + // The path is the string for storage specific format. + Open(path string) (Storage, error) +} + +// Storage defines the interface for storage. +// Isolation should be at least SI(SNAPSHOT ISOLATION) +type Storage interface { + // Begin transaction + Begin() (Transaction, error) + // BeginWithStartTS begins transaction with startTS. + BeginWithStartTS(startTS uint64) (Transaction, error) + // GetSnapshot gets a snapshot that is able to read any data which data is <= ver. + // if ver is MaxVersion or > current max committed version, we will use current version for this snapshot. + GetSnapshot(ver Version) (Snapshot, error) + // GetClient gets a client instance. + GetClient() Client + // Close store + Close() error + // UUID return a unique ID which represents a Storage. + UUID() string + // CurrentVersion returns current max committed version. + CurrentVersion() (Version, error) + // GetOracle gets a timestamp oracle client. + GetOracle() oracle.Oracle + // SupportDeleteRange gets the storage support delete range or not. + SupportDeleteRange() (supported bool) + // Name gets the name of the storage engine + Name() string + // Describe returns of brief introduction of the storage + Describe() string + // ShowStatus returns the specified status of the storage + ShowStatus(ctx context.Context, key string) (interface{}, error) +} + +// FnKeyCmp is the function for iterator the keys +type FnKeyCmp func(key Key) bool + +// Iterator is the interface for a iterator on KV store. +type Iterator interface { + Valid() bool + Key() Key + Value() []byte + Next() error + Close() +} diff --git a/kv/mem_buffer_test.go b/kv/mem_buffer_test.go new file mode 100644 index 0000000..297e5da --- /dev/null +++ b/kv/mem_buffer_test.go @@ -0,0 +1,299 @@ +// Copyright 2015 PingCAP, Inc. +// +// Copyright 2015 Wenbin Xiao +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package kv + +import ( + "context" + "fmt" + "math/rand" + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/util/testleak" +) + +const ( + startIndex = 0 + testCount = 2 + indexStep = 2 +) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + TestingT(t) +} + +var _ = Suite(&testKVSuite{}) + +type testKVSuite struct { + bs []MemBuffer +} + +func (s *testKVSuite) SetUpSuite(c *C) { + s.bs = make([]MemBuffer, 1) + s.bs[0] = NewMemDbBuffer(DefaultTxnMembufCap) +} + +func (s *testKVSuite) ResetMembuffers() { + s.bs[0] = NewMemDbBuffer(DefaultTxnMembufCap) +} + +func insertData(c *C, buffer MemBuffer) { + for i := startIndex; i < testCount; i++ { + val := encodeInt(i * indexStep) + err := buffer.Set(val, val) + c.Assert(err, IsNil) + } +} + +func encodeInt(n int) []byte { + return []byte(fmt.Sprintf("%010d", n)) +} + +func decodeInt(s []byte) int { + var n int + fmt.Sscanf(string(s), "%010d", &n) + return n +} + +func valToStr(c *C, iter Iterator) string { + val := iter.Value() + return string(val) +} + +func checkNewIterator(c *C, buffer MemBuffer) { + for i := startIndex; i < testCount; i++ { + val := encodeInt(i * indexStep) + iter, err := buffer.Iter(val, nil) + c.Assert(err, IsNil) + c.Assert([]byte(iter.Key()), BytesEquals, val) + c.Assert(decodeInt([]byte(valToStr(c, iter))), Equals, i*indexStep) + iter.Close() + } + + // Test iterator Next() + for i := startIndex; i < testCount-1; i++ { + val := encodeInt(i * indexStep) + iter, err := buffer.Iter(val, nil) + c.Assert(err, IsNil) + c.Assert([]byte(iter.Key()), BytesEquals, val) + c.Assert(valToStr(c, iter), Equals, string(val)) + + err = iter.Next() + c.Assert(err, IsNil) + c.Assert(iter.Valid(), IsTrue) + + val = encodeInt((i + 1) * indexStep) + c.Assert([]byte(iter.Key()), BytesEquals, val) + c.Assert(valToStr(c, iter), Equals, string(val)) + iter.Close() + } + + // Non exist and beyond maximum seek test + iter, err := buffer.Iter(encodeInt(testCount*indexStep), nil) + c.Assert(err, IsNil) + c.Assert(iter.Valid(), IsFalse) + + // Non exist but between existing keys seek test, + // it returns the smallest key that larger than the one we are seeking + inBetween := encodeInt((testCount-1)*indexStep - 1) + last := encodeInt((testCount - 1) * indexStep) + iter, err = buffer.Iter(inBetween, nil) + c.Assert(err, IsNil) + c.Assert(iter.Valid(), IsTrue) + c.Assert([]byte(iter.Key()), Not(BytesEquals), inBetween) + c.Assert([]byte(iter.Key()), BytesEquals, last) + iter.Close() +} + +func mustGet(c *C, buffer MemBuffer) { + for i := startIndex; i < testCount; i++ { + s := encodeInt(i * indexStep) + val, err := buffer.Get(context.TODO(), s) + c.Assert(err, IsNil) + c.Assert(string(val), Equals, string(s)) + } +} + +func (s *testKVSuite) TestGetSet(c *C) { + defer testleak.AfterTest(c)() + for _, buffer := range s.bs { + insertData(c, buffer) + mustGet(c, buffer) + } + s.ResetMembuffers() +} + +func (s *testKVSuite) TestNewIterator(c *C) { + defer testleak.AfterTest(c)() + for _, buffer := range s.bs { + // should be invalid + iter, err := buffer.Iter(nil, nil) + c.Assert(err, IsNil) + c.Assert(iter.Valid(), IsFalse) + + insertData(c, buffer) + checkNewIterator(c, buffer) + } + s.ResetMembuffers() +} + +func (s *testKVSuite) TestIterNextUntil(c *C) { + defer testleak.AfterTest(c)() + buffer := NewMemDbBuffer(DefaultTxnMembufCap) + insertData(c, buffer) + + iter, err := buffer.Iter(nil, nil) + c.Assert(err, IsNil) + + err = NextUntil(iter, func(k Key) bool { + return false + }) + c.Assert(err, IsNil) + c.Assert(iter.Valid(), IsFalse) +} + +func (s *testKVSuite) TestBasicNewIterator(c *C) { + defer testleak.AfterTest(c)() + for _, buffer := range s.bs { + it, err := buffer.Iter([]byte("2"), nil) + c.Assert(err, IsNil) + c.Assert(it.Valid(), IsFalse) + } +} + +func (s *testKVSuite) TestNewIteratorMin(c *C) { + defer testleak.AfterTest(c)() + kvs := []struct { + key string + value string + }{ + {"DATA_test_main_db_tbl_tbl_test_record__00000000000000000001", "lock-version"}, + {"DATA_test_main_db_tbl_tbl_test_record__00000000000000000001_0002", "1"}, + {"DATA_test_main_db_tbl_tbl_test_record__00000000000000000001_0003", "hello"}, + {"DATA_test_main_db_tbl_tbl_test_record__00000000000000000002", "lock-version"}, + {"DATA_test_main_db_tbl_tbl_test_record__00000000000000000002_0002", "2"}, + {"DATA_test_main_db_tbl_tbl_test_record__00000000000000000002_0003", "hello"}, + } + for _, buffer := range s.bs { + for _, kv := range kvs { + err := buffer.Set([]byte(kv.key), []byte(kv.value)) + c.Assert(err, IsNil) + } + + cnt := 0 + it, err := buffer.Iter(nil, nil) + c.Assert(err, IsNil) + for it.Valid() { + cnt++ + err := it.Next() + c.Assert(err, IsNil) + } + c.Assert(cnt, Equals, 6) + + it, err = buffer.Iter([]byte("DATA_test_main_db_tbl_tbl_test_record__00000000000000000000"), nil) + c.Assert(err, IsNil) + c.Assert(string(it.Key()), Equals, "DATA_test_main_db_tbl_tbl_test_record__00000000000000000001") + } + s.ResetMembuffers() +} + +func (s *testKVSuite) TestBufferLimit(c *C) { + buffer := NewMemDbBuffer(DefaultTxnMembufCap).(*memDbBuffer) + buffer.bufferSizeLimit = 1000 + buffer.entrySizeLimit = 500 + + err := buffer.Set([]byte("x"), make([]byte, 500)) + c.Assert(err, NotNil) // entry size limit + + err = buffer.Set([]byte("x"), make([]byte, 499)) + c.Assert(err, IsNil) + err = buffer.Set([]byte("yz"), make([]byte, 499)) + c.Assert(err, NotNil) // buffer size limit +} + +var opCnt = 100000 + +func BenchmarkMemDbBufferSequential(b *testing.B) { + data := make([][]byte, opCnt) + for i := 0; i < opCnt; i++ { + data[i] = encodeInt(i) + } + buffer := NewMemDbBuffer(DefaultTxnMembufCap) + benchmarkSetGet(b, buffer, data) + b.ReportAllocs() +} + +func BenchmarkMemDbBufferRandom(b *testing.B) { + data := make([][]byte, opCnt) + for i := 0; i < opCnt; i++ { + data[i] = encodeInt(i) + } + shuffle(data) + buffer := NewMemDbBuffer(DefaultTxnMembufCap) + benchmarkSetGet(b, buffer, data) + b.ReportAllocs() +} + +func BenchmarkMemDbIter(b *testing.B) { + buffer := NewMemDbBuffer(DefaultTxnMembufCap) + benchIterator(b, buffer) + b.ReportAllocs() +} + +func BenchmarkMemDbCreation(b *testing.B) { + for i := 0; i < b.N; i++ { + NewMemDbBuffer(DefaultTxnMembufCap) + } + b.ReportAllocs() +} + +func shuffle(slc [][]byte) { + N := len(slc) + for i := 0; i < N; i++ { + // choose index uniformly in [i, N-1] + r := i + rand.Intn(N-i) + slc[r], slc[i] = slc[i], slc[r] + } +} +func benchmarkSetGet(b *testing.B, buffer MemBuffer, data [][]byte) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, k := range data { + buffer.Set(k, k) + } + for _, k := range data { + buffer.Get(context.TODO(), k) + } + } +} + +func benchIterator(b *testing.B, buffer MemBuffer) { + for k := 0; k < opCnt; k++ { + buffer.Set(encodeInt(k), encodeInt(k)) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + iter, err := buffer.Iter(nil, nil) + if err != nil { + b.Error(err) + } + for iter.Valid() { + iter.Next() + } + iter.Close() + } +} diff --git a/kv/memdb/arena.go b/kv/memdb/arena.go new file mode 100644 index 0000000..4494a8c --- /dev/null +++ b/kv/memdb/arena.go @@ -0,0 +1,131 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package memdb + +import "math" + +type arenaAddr struct { + blockIdx uint32 + blockOffset uint32 +} + +func (addr arenaAddr) isNull() bool { + return addr.blockIdx == 0 && addr.blockOffset == 0 +} + +func newArenaAddr(idx int, offset uint32) arenaAddr { + return arenaAddr{ + blockIdx: uint32(idx) + 1, + blockOffset: offset, + } +} + +const ( + nullBlockOffset = math.MaxUint32 + maxBlockSize = 128 << 20 +) + +type arena struct { + blockSize int + availIdx int + blocks []arenaBlock +} + +func newArenaLocator(initBlockSize int) *arena { + return &arena{ + blockSize: initBlockSize, + blocks: []arenaBlock{newArenaBlock(initBlockSize)}, + } +} + +func (a *arena) getFrom(addr arenaAddr) []byte { + return a.blocks[addr.blockIdx-1].getFrom(addr.blockOffset) +} + +func (a *arena) alloc(size int) (arenaAddr, []byte) { + if size >= maxBlockSize { + // Use a separate block to store entry which size larger than specified block size. + blk := newArenaBlock(size) + blk.length = size + a.blocks = append(a.blocks, blk) + + addr := newArenaAddr(len(a.blocks)-1, 0) + return addr, blk.buf + } + + addr, data := a.allocInBlock(a.availIdx, size) + if !addr.isNull() { + return addr, data + } + + a.enlarge(size) + return a.allocInBlock(a.availIdx, size) +} + +func (a *arena) enlarge(size int) { + a.blockSize <<= 1 + for a.blockSize <= size { + a.blockSize <<= 1 + } + // Size always less than maxBlockSize. + if a.blockSize > maxBlockSize { + a.blockSize = maxBlockSize + } + a.blocks = append(a.blocks, newArenaBlock(a.blockSize)) + a.availIdx = int(uint32(len(a.blocks) - 1)) +} + +func (a *arena) allocInBlock(idx, size int) (arenaAddr, []byte) { + offset, data := a.blocks[idx].alloc(size) + if offset == nullBlockOffset { + return arenaAddr{}, nil + } + return newArenaAddr(idx, offset), data +} + +func (a *arena) reset() { + a.availIdx = 0 + a.blockSize = len(a.blocks[0].buf) + a.blocks = []arenaBlock{a.blocks[0]} + a.blocks[0].reset() +} + +type arenaBlock struct { + buf []byte + length int +} + +func newArenaBlock(blockSize int) arenaBlock { + return arenaBlock{ + buf: make([]byte, blockSize), + } +} + +func (a *arenaBlock) getFrom(offset uint32) []byte { + return a.buf[offset:] +} + +func (a *arenaBlock) alloc(size int) (uint32, []byte) { + offset := a.length + newLen := offset + size + if newLen > len(a.buf) { + return nullBlockOffset, nil + } + a.length = newLen + return uint32(offset), a.buf[offset : offset+size] +} + +func (a *arenaBlock) reset() { + a.length = 0 +} diff --git a/kv/memdb/iterator.go b/kv/memdb/iterator.go new file mode 100644 index 0000000..ec50995 --- /dev/null +++ b/kv/memdb/iterator.go @@ -0,0 +1,102 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package memdb + +import "unsafe" + +// Iterator iterates the entries in the DB. +type Iterator struct { + db *DB + curr *node + key []byte + val []byte +} + +// NewIterator returns a new Iterator for the lock store. +func (db *DB) NewIterator() Iterator { + return Iterator{ + db: db, + } +} + +// Valid returns true if the iterator is positioned at a valid node. +func (it *Iterator) Valid() bool { return it.curr != nil } + +// Key returns the key at the current position. +func (it *Iterator) Key() []byte { + return it.key +} + +// Value returns value. +func (it *Iterator) Value() []byte { + return it.val +} + +// Next moves the iterator to the next entry. +func (it *Iterator) Next() { + it.changeToAddr(it.curr.nexts[0]) +} + +// Prev moves the iterator to the previous entry. +func (it *Iterator) Prev() { + it.changeToAddr(it.curr.prev) +} + +// Seek locates the iterator to the first entry with a key >= seekKey. +func (it *Iterator) Seek(seekKey []byte) { + node, nodeData, _ := it.db.findGreaterEqual(seekKey) // find >=. + it.updateState(node, nodeData) +} + +// SeekForPrev locates the iterator to the last entry with key <= target. +func (it *Iterator) SeekForPrev(target []byte) { + node, nodeData, _ := it.db.findLess(target, true) // find <=. + it.updateState(node, nodeData) +} + +// SeekForExclusivePrev locates the iterator to the last entry with key < target. +func (it *Iterator) SeekForExclusivePrev(target []byte) { + node, nodeData, _ := it.db.findLess(target, false) + it.updateState(node, nodeData) +} + +// SeekToFirst locates the iterator to the first entry. +func (it *Iterator) SeekToFirst() { + node, nodeData := it.db.getNext(it.db.head.node, 0) + it.updateState(node, nodeData) +} + +// SeekToLast locates the iterator to the last entry. +func (it *Iterator) SeekToLast() { + node, nodeData := it.db.findLast() + it.updateState(node, nodeData) +} + +func (it *Iterator) updateState(node *node, nodeData []byte) { + it.curr = node + if node != nil { + it.key = node.getKey(nodeData) + it.val = node.getValue(nodeData) + } +} + +func (it *Iterator) changeToAddr(addr arenaAddr) { + var data []byte + var n *node + if !addr.isNull() { + data = it.db.arena.getFrom(addr) + n = (*node)(unsafe.Pointer(&data[0])) + } + it.updateState(n, data) +} diff --git a/kv/memdb/memdb.go b/kv/memdb/memdb.go new file mode 100644 index 0000000..57c05c9 --- /dev/null +++ b/kv/memdb/memdb.go @@ -0,0 +1,364 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package memdb + +import ( + "bytes" + "math" + "unsafe" +) + +const ( + maxHeight = 16 + nodeHeaderSize = int(unsafe.Sizeof(nodeHeader{})) +) + +// DB is an in-memory key/value database. +type DB struct { + height int + head nodeWithAddr + arena *arena + + length int + size int +} + +// New creates a new initialized in-memory key/value DB. +// The initBlockSize is the size of first block. +// This DB is append-only, deleting an entry would remove entry node but not +// reclaim KV buffer. +func New(initBlockSize int) *DB { + return &DB{ + height: 1, + head: nodeWithAddr{node: new(node)}, + arena: newArenaLocator(initBlockSize), + } +} + +// Reset resets the DB to initial empty state. +// Release all blocks except the initial one. +func (db *DB) Reset() { + db.height = 1 + db.head.node = new(node) + db.length = 0 + db.size = 0 + db.arena.reset() +} + +// Get gets the value for the given key. It returns nil if the +// DB does not contain the key. +func (db *DB) Get(key []byte) []byte { + node, data, match := db.findGreaterEqual(key) + if !match { + return nil + } + return node.getValue(data) +} + +// Put sets the value for the given key. +// It overwrites any previous value for that key. +func (db *DB) Put(key []byte, v []byte) bool { + arena := db.arena + lsHeight := db.height + var prev [maxHeight + 1]nodeWithAddr + var next [maxHeight + 1]nodeWithAddr + prev[lsHeight] = db.head + + var exists bool + for i := lsHeight - 1; i >= 0; i-- { + // Use higher level to speed up for current level. + prev[i], next[i], exists = db.findSpliceForLevel(db.arena, key, prev[i+1], i) + } + + var height int + if !exists { + height = db.randomHeight() + } else { + height = db.prepareOverwrite(next[:]) + } + + x, addr := db.newNode(arena, key, v, height) + if height > lsHeight { + db.height = height + } + + // We always insert from the base level and up. After you add a node in base level, we cannot + // create a node in the level above because it would have discovered the node in the base level. + for i := 0; i < height; i++ { + x.nexts[i] = next[i].addr + if prev[i].node == nil { + prev[i] = db.head + } + prev[i].nexts[i] = addr + } + + x.prev = prev[0].addr + if next[0].node != nil { + next[0].prev = addr + } + + db.length++ + db.size += len(key) + len(v) + return true +} + +// The pointers in findSpliceForLevel may point to the node which going to be overwrite, +// prepareOverwrite update them to point to the next node, so we can link new node with the list correctly. +func (db *DB) prepareOverwrite(next []nodeWithAddr) int { + old := next[0] + + // Update necessary states. + db.size -= int(old.valLen) + int(old.keyLen) + db.length-- + + height := int(old.height) + for i := 0; i < height; i++ { + if next[i].addr == old.addr { + next[i].addr = old.nexts[i] + if !next[i].addr.isNull() { + data := db.arena.getFrom(next[i].addr) + next[i].node = (*node)(unsafe.Pointer(&data[0])) + } + } + } + return height +} + +// Delete deletes the value for the given key. +// It returns false if the DB does not contain the key. +func (db *DB) Delete(key []byte) bool { + listHeight := db.height + var prev [maxHeight + 1]nodeWithAddr + prev[listHeight] = db.head + + var keyNode nodeWithAddr + var match bool + for i := listHeight - 1; i >= 0; i-- { + prev[i], keyNode, match = db.findSpliceForLevel(db.arena, key, prev[i+1], i) + } + if !match { + return false + } + + for i := int(keyNode.height) - 1; i >= 0; i-- { + prev[i].nexts[i] = keyNode.nexts[i] + } + nextAddr := keyNode.nexts[0] + if !nextAddr.isNull() { + nextData := db.arena.getFrom(nextAddr) + next := (*node)(unsafe.Pointer(&nextData[0])) + next.prev = prev[0].addr + } + + db.length-- + db.size -= int(keyNode.keyLen) + int(keyNode.valLen) + return true +} + +// Len returns the number of entries in the DB. +func (db *DB) Len() int { + return db.length +} + +// Size returns sum of keys and values length. Note that deleted +// key/value will not be accounted for, but it will still consume +// the buffer, since the buffer is append only. +func (db *DB) Size() int { + return db.size +} + +// findSpliceForLevel returns (outBefore, outAfter) with outBefore.key < key <= outAfter.key. +// The input "before" tells us where to start looking. +// If we found a node with the same key, then we return true. +func (db *DB) findSpliceForLevel(arena *arena, key []byte, before nodeWithAddr, level int) (nodeWithAddr, nodeWithAddr, bool) { + for { + // Assume before.key < key. + nextAddr := before.nexts[level] + if nextAddr.isNull() { + return before, nodeWithAddr{}, false + } + data := arena.getFrom(nextAddr) + next := nodeWithAddr{(*node)(unsafe.Pointer(&data[0])), nextAddr} + nextKey := next.getKey(data) + cmp := bytes.Compare(nextKey, key) + if cmp >= 0 { + // before.key < key < next.key. We are done for this level. + return before, next, cmp == 0 + } + before = next // Keep moving right on this level. + } +} + +func (db *DB) findGreaterEqual(key []byte) (*node, []byte, bool) { + prev := db.head.node + level := db.height - 1 + + for { + var nextData []byte + var next *node + addr := prev.nexts[level] + if !addr.isNull() { + arena := db.arena + nextData = arena.getFrom(addr) + next = (*node)(unsafe.Pointer(&nextData[0])) + + nextKey := next.getKey(nextData) + cmp := bytes.Compare(nextKey, key) + if cmp < 0 { + // next key is still smaller, keep moving. + prev = next + continue + } + if cmp == 0 { + // prev.key < key == next.key. + return next, nextData, true + } + } + // next is greater than key or next is nil. go to the lower level. + if level > 0 { + level-- + continue + } + return next, nextData, false + } +} + +func (db *DB) findLess(key []byte, allowEqual bool) (*node, []byte, bool) { + var prevData []byte + prev := db.head.node + level := db.height - 1 + + for { + next, nextData := db.getNext(prev, level) + if next != nil { + cmp := bytes.Compare(key, next.getKey(nextData)) + if cmp > 0 { + // prev.key < next.key < key. We can continue to move right. + prev = next + prevData = nextData + continue + } + if cmp == 0 && allowEqual { + // prev.key < key == next.key. + return next, nextData, true + } + } + // get closer to the key in the lower level. + if level > 0 { + level-- + continue + } + break + } + + // We are not going to return head. + if prev == db.head.node { + return nil, nil, false + } + return prev, prevData, false +} + +// findLast returns the last element. If head (empty db), we return nil. All the find functions +// will NEVER return the head nodes. +func (db *DB) findLast() (*node, []byte) { + var nodeData []byte + node := db.head.node + level := db.height - 1 + + for { + next, nextData := db.getNext(node, level) + if next != nil { + node = next + nodeData = nextData + continue + } + if level == 0 { + if node == db.head.node { + return nil, nil + } + return node, nodeData + } + level-- + } +} + +func (db *DB) newNode(arena *arena, key []byte, v []byte, height int) (*node, arenaAddr) { + // The base level is already allocated in the node struct. + nodeSize := nodeHeaderSize + height*8 + 8 + len(key) + len(v) + addr, data := arena.alloc(nodeSize) + node := (*node)(unsafe.Pointer(&data[0])) + node.keyLen = uint16(len(key)) + node.height = uint16(height) + node.valLen = uint32(len(v)) + copy(data[node.nodeLen():], key) + copy(data[node.nodeLen()+int(node.keyLen):], v) + return node, addr +} + +// fastRand is a fast thread local random function. +//go:linkname fastRand runtime.fastrand +func fastRand() uint32 + +func (db *DB) randomHeight() int { + h := 1 + for h < maxHeight && fastRand() < uint32(math.MaxUint32)/4 { + h++ + } + return h +} + +type nodeHeader struct { + height uint16 + keyLen uint16 + valLen uint32 +} + +type node struct { + nodeHeader + + // Addr of previous node at base level. + prev arenaAddr + // Height of the nexts. + nexts [maxHeight]arenaAddr +} + +type nodeWithAddr struct { + *node + addr arenaAddr +} + +func (n *node) nodeLen() int { + return int(n.height)*8 + 8 + nodeHeaderSize +} + +func (n *node) getKey(buf []byte) []byte { + nodeLen := n.nodeLen() + return buf[nodeLen : nodeLen+int(n.keyLen)] +} + +func (n *node) getValue(buf []byte) []byte { + nodeLenKeyLen := n.nodeLen() + int(n.keyLen) + return buf[nodeLenKeyLen : nodeLenKeyLen+int(n.valLen)] +} + +func (db *DB) getNext(n *node, level int) (*node, []byte) { + addr := n.nexts[level] + if addr.isNull() { + return nil, nil + } + arena := db.arena + data := arena.getFrom(addr) + node := (*node)(unsafe.Pointer(&data[0])) + return node, data +} diff --git a/kv/memdb/memdb.s b/kv/memdb/memdb.s new file mode 100644 index 0000000..d57f144 --- /dev/null +++ b/kv/memdb/memdb.s @@ -0,0 +1,12 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. diff --git a/kv/memdb/memdb_test.go b/kv/memdb/memdb_test.go new file mode 100644 index 0000000..d1ce52b --- /dev/null +++ b/kv/memdb/memdb_test.go @@ -0,0 +1,389 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package memdb + +import ( + "encoding/binary" + "math/rand" + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/goleveldb/leveldb/comparer" + "github.com/pingcap/goleveldb/leveldb/memdb" +) + +const ( + keySize = 16 + valueSize = 128 +) + +func TestT(t *testing.T) { + TestingT(t) +} + +type testMemDBSuite struct{} + +var _ = Suite(testMemDBSuite{}) + +func (s testMemDBSuite) TestGetSet(c *C) { + const cnt = 10000 + p := s.fillDB(cnt) + + var buf [4]byte + for i := 0; i < cnt; i++ { + binary.BigEndian.PutUint32(buf[:], uint32(i)) + v := p.Get(buf[:]) + c.Check(v, BytesEquals, buf[:]) + } +} + +func (s testMemDBSuite) TestIterator(c *C) { + const cnt = 10000 + p := s.fillDB(cnt) + + var buf [4]byte + var i int + it := p.NewIterator() + + for it.SeekToFirst(); it.Valid(); it.Next() { + binary.BigEndian.PutUint32(buf[:], uint32(i)) + c.Check(it.Key(), BytesEquals, buf[:]) + c.Check(it.Value(), BytesEquals, buf[:]) + i++ + } + c.Check(i, Equals, cnt) + + i-- + for it.SeekToLast(); it.Valid(); it.Prev() { + binary.BigEndian.PutUint32(buf[:], uint32(i)) + c.Check(it.Key(), BytesEquals, buf[:]) + c.Check(it.Value(), BytesEquals, buf[:]) + i-- + } + c.Check(i, Equals, -1) +} + +func (s testMemDBSuite) TestOverwrite(c *C) { + const cnt = 10000 + p := s.fillDB(cnt) + var buf [4]byte + + sz := p.Size() + for i := 0; i < cnt; i += 3 { + var newBuf [4]byte + binary.BigEndian.PutUint32(buf[:], uint32(i)) + binary.BigEndian.PutUint32(newBuf[:], uint32(i*10)) + p.Put(buf[:], newBuf[:]) + } + c.Check(p.Len(), Equals, cnt) + c.Check(p.Size(), Equals, sz) + + for i := 0; i < cnt; i++ { + binary.BigEndian.PutUint32(buf[:], uint32(i)) + v := binary.BigEndian.Uint32(p.Get(buf[:])) + if i%3 == 0 { + c.Check(v, Equals, uint32(i*10)) + } else { + c.Check(v, Equals, uint32(i)) + } + } + + it := p.NewIterator() + var i int + + for it.SeekToFirst(); it.Valid(); it.Next() { + binary.BigEndian.PutUint32(buf[:], uint32(i)) + c.Check(it.Key(), BytesEquals, buf[:]) + v := binary.BigEndian.Uint32(it.Value()) + if i%3 == 0 { + c.Check(v, Equals, uint32(i*10)) + } else { + c.Check(v, Equals, uint32(i)) + } + i++ + } + c.Check(i, Equals, cnt) + + i-- + for it.SeekToLast(); it.Valid(); it.Prev() { + binary.BigEndian.PutUint32(buf[:], uint32(i)) + c.Check(it.Key(), BytesEquals, buf[:]) + v := binary.BigEndian.Uint32(it.Value()) + if i%3 == 0 { + c.Check(v, Equals, uint32(i*10)) + } else { + c.Check(v, Equals, uint32(i)) + } + i-- + } + c.Check(i, Equals, -1) +} + +func (s testMemDBSuite) TestDelete(c *C) { + const cnt = 10000 + p := s.fillDB(cnt) + var buf [4]byte + + for i := 0; i < cnt; i += 3 { + binary.BigEndian.PutUint32(buf[:], uint32(i)) + p.Delete(buf[:]) + } + + for i := 0; i < cnt; i++ { + binary.BigEndian.PutUint32(buf[:], uint32(i)) + v := p.Get(buf[:]) + if i%3 == 0 { + c.Check(v, IsNil) + } else { + c.Check(v, BytesEquals, buf[:]) + } + } + + it := p.NewIterator() + var i int + + for it.SeekToFirst(); it.Valid(); it.Next() { + if i%3 == 0 { + i++ + } + binary.BigEndian.PutUint32(buf[:], uint32(i)) + c.Check(it.Key(), BytesEquals, buf[:]) + c.Check(it.Value(), BytesEquals, buf[:]) + i++ + } + + i-- + for it.SeekToLast(); it.Valid(); it.Prev() { + if i%3 == 0 { + i-- + } + binary.BigEndian.PutUint32(buf[:], uint32(i)) + c.Check(it.Key(), BytesEquals, buf[:]) + c.Check(it.Value(), BytesEquals, buf[:]) + i-- + } +} + +func (s testMemDBSuite) TestKVLargeThanBlock(c *C) { + p := New(4 * 1024) + p.Put([]byte{1}, make([]byte, 1)) + p.Put([]byte{2}, make([]byte, 4096)) + c.Check(len(p.arena.blocks), Equals, 2) + p.Put([]byte{3}, make([]byte, 3000)) + c.Check(len(p.arena.blocks), Equals, 2) + c.Check(len(p.Get([]byte{3})), Equals, 3000) +} + +func (s testMemDBSuite) TestEmptyDB(c *C) { + p := New(4 * 1024) + c.Check(p.Get([]byte{0}), IsNil) + c.Check(p.Delete([]byte{0}), IsFalse) + it := p.NewIterator() + it.SeekToFirst() + c.Check(it.Valid(), IsFalse) + it.SeekToLast() + c.Check(it.Valid(), IsFalse) + it.SeekForPrev([]byte{0}) + c.Check(it.Valid(), IsFalse) + it.SeekForExclusivePrev([]byte{0}) + c.Check(it.Valid(), IsFalse) + it.Seek([]byte{0xff}) + c.Check(it.Valid(), IsFalse) +} + +func (s testMemDBSuite) TestReset(c *C) { + p := s.fillDB(10000) + p.Reset() + c.Check(p.Get([]byte{0}), IsNil) + c.Check(p.Delete([]byte{0}), IsFalse) + c.Check(p.Size(), Equals, 0) + c.Check(p.Len(), Equals, 0) + + key := []byte{0} + p.Put(key, key) + c.Check(p.Get(key), BytesEquals, key) + c.Check(p.arena.availIdx, Equals, 0) + + it := p.NewIterator() + it.SeekToFirst() + c.Check(it.Key(), BytesEquals, key) + c.Check(it.Value(), BytesEquals, key) + it.Next() + c.Check(it.Valid(), IsFalse) + + it.SeekToLast() + c.Check(it.Key(), BytesEquals, key) + c.Check(it.Value(), BytesEquals, key) + it.Prev() + c.Check(it.Valid(), IsFalse) +} + +func (s testMemDBSuite) TestRandom(c *C) { + const cnt = 10000 + keys := make([][]byte, cnt) + for i := range keys { + keys[i] = make([]byte, rand.Intn(19)+1) + rand.Read(keys[i]) + } + + p1 := New(4 * 1024) + p2 := memdb.New(comparer.DefaultComparer, 4*1024) + for _, k := range keys { + p1.Put(k, k) + _ = p2.Put(k, k) + } + + c.Check(p1.Len(), Equals, p2.Len()) + c.Check(p1.Size(), Equals, p2.Size()) + + rand.Shuffle(cnt, func(i, j int) { keys[i], keys[j] = keys[j], keys[i] }) + + for _, k := range keys { + switch rand.Intn(4) { + case 0, 1: + newValue := make([]byte, rand.Intn(19)+1) + rand.Read(newValue) + p1.Put(k, newValue) + _ = p2.Put(k, newValue) + case 2: + p1.Delete(k) + _ = p2.Delete(k) + } + } + + c.Check(p1.Len(), Equals, p2.Len()) + c.Check(p1.Size(), Equals, p2.Size()) + + it1 := p1.NewIterator() + it1.SeekToFirst() + + it2 := p2.NewIterator(nil) + + var prevKey, prevVal []byte + for it2.First(); it2.Valid(); it2.Next() { + c.Check(it1.Key(), BytesEquals, it2.Key()) + c.Check(it1.Value(), BytesEquals, it2.Value()) + + it := p1.NewIterator() + it.Seek(it2.Key()) + c.Check(it.Key(), BytesEquals, it2.Key()) + c.Check(it.Value(), BytesEquals, it2.Value()) + + it.SeekForPrev(it2.Key()) + c.Check(it.Key(), BytesEquals, it2.Key()) + c.Check(it.Value(), BytesEquals, it2.Value()) + + if prevKey != nil { + it.SeekForExclusivePrev(it2.Key()) + c.Check(it.Key(), BytesEquals, prevKey) + c.Check(it.Value(), BytesEquals, prevVal) + } + + it1.Next() + prevKey = it2.Key() + prevVal = it2.Value() + } + + it1.SeekToLast() + for it2.Last(); it2.Valid(); it2.Prev() { + c.Check(it1.Key(), BytesEquals, it2.Key()) + c.Check(it1.Value(), BytesEquals, it2.Value()) + it1.Prev() + } +} + +func (s testMemDBSuite) fillDB(cnt int) *DB { + p := New(4 * 1024) + var buf [4]byte + for i := 0; i < cnt; i++ { + binary.BigEndian.PutUint32(buf[:], uint32(i)) + p.Put(buf[:], buf[:]) + } + return p +} + +func BenchmarkLargeIndex(b *testing.B) { + buf := make([][valueSize]byte, 10000000) + for i := range buf { + binary.LittleEndian.PutUint32(buf[i][:], uint32(i)) + } + p := New(4 * 1024 * 1024) + b.ResetTimer() + + for i := range buf { + p.Put(buf[i][:keySize], buf[i][:]) + } +} + +func BenchmarkPut(b *testing.B) { + buf := make([][valueSize]byte, b.N) + for i := range buf { + binary.LittleEndian.PutUint32(buf[i][:], uint32(i)) + } + + p := New(4 * 1024 * 1024) + b.ResetTimer() + + for i := range buf { + p.Put(buf[i][:keySize], buf[i][:]) + } +} + +func BenchmarkPutRandom(b *testing.B) { + buf := make([][valueSize]byte, b.N) + for i := range buf { + binary.LittleEndian.PutUint32(buf[i][:], uint32(rand.Int())) + } + + p := New(4 * 1024 * 1024) + b.ResetTimer() + + for i := range buf { + p.Put(buf[i][:keySize], buf[i][:]) + } +} + +func BenchmarkGet(b *testing.B) { + buf := make([][valueSize]byte, b.N) + for i := range buf { + binary.LittleEndian.PutUint32(buf[i][:], uint32(i)) + } + + p := New(4 * 1024 * 1024) + for i := range buf { + p.Put(buf[i][:keySize], buf[i][:]) + } + + b.ResetTimer() + for i := range buf { + p.Get(buf[i][:]) + } +} + +func BenchmarkGetRandom(b *testing.B) { + buf := make([][valueSize]byte, b.N) + for i := range buf { + binary.LittleEndian.PutUint32(buf[i][:], uint32(i)) + } + + p := New(4 * 1024 * 1024) + for i := range buf { + p.Put(buf[i][:keySize], buf[i][:]) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + p.Get(buf[rand.Int()%b.N][:]) + } +} diff --git a/kv/memdb_buffer.go b/kv/memdb_buffer.go new file mode 100644 index 0000000..f4700dc --- /dev/null +++ b/kv/memdb_buffer.go @@ -0,0 +1,183 @@ +// Copyright 2015 PingCAP, Inc. +// +// Copyright 2015 Wenbin Xiao +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package kv + +import ( + "bytes" + "context" + "sync/atomic" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv/memdb" +) + +// memDbBuffer implements the MemBuffer interface. +type memDbBuffer struct { + db *memdb.DB + entrySizeLimit int + bufferSizeLimit uint64 +} + +type memDbIter struct { + iter memdb.Iterator + start []byte + end []byte + reverse bool +} + +// NewMemDbBuffer creates a new memDbBuffer. +func NewMemDbBuffer(initBlockSize int) MemBuffer { + return &memDbBuffer{ + db: memdb.New(initBlockSize), + entrySizeLimit: TxnEntrySizeLimit, + bufferSizeLimit: atomic.LoadUint64(&TxnTotalSizeLimit), + } +} + +// Iter creates an Iterator. +func (m *memDbBuffer) Iter(k Key, upperBound Key) (Iterator, error) { + i := &memDbIter{ + iter: m.db.NewIterator(), + start: k, + end: upperBound, + reverse: false, + } + + if k == nil { + i.iter.SeekToFirst() + } else { + i.iter.Seek(k) + } + return i, nil +} + +func (m *memDbBuffer) SetCap(cap int) { + +} + +func (m *memDbBuffer) IterReverse(k Key) (Iterator, error) { + i := &memDbIter{ + iter: m.db.NewIterator(), + end: k, + reverse: true, + } + if k == nil { + i.iter.SeekToLast() + } else { + i.iter.SeekForExclusivePrev(k) + } + return i, nil +} + +// Get returns the value associated with key. +func (m *memDbBuffer) Get(ctx context.Context, k Key) ([]byte, error) { + v := m.db.Get(k) + if v == nil { + return nil, ErrNotExist + } + return v, nil +} + +// Set associates key with value. +func (m *memDbBuffer) Set(k Key, v []byte) error { + if len(v) == 0 { + return errors.Trace(ErrCannotSetNilValue) + } + if len(k)+len(v) > m.entrySizeLimit { + return ErrEntryTooLarge.GenWithStackByArgs(m.entrySizeLimit, len(k)+len(v)) + } + + m.db.Put(k, v) + if m.Size() > int(m.bufferSizeLimit) { + return ErrTxnTooLarge.GenWithStackByArgs(m.Size()) + } + return nil +} + +// Delete removes the entry from buffer with provided key. +func (m *memDbBuffer) Delete(k Key) error { + m.db.Put(k, nil) + return nil +} + +// Size returns sum of keys and values length. +func (m *memDbBuffer) Size() int { + return m.db.Size() +} + +// Len returns the number of entries in the DB. +func (m *memDbBuffer) Len() int { + return m.db.Len() +} + +// Reset cleanup the MemBuffer. +func (m *memDbBuffer) Reset() { + m.db.Reset() +} + +// Next implements the Iterator Next. +func (i *memDbIter) Next() error { + if i.reverse { + i.iter.Prev() + } else { + i.iter.Next() + } + return nil +} + +// Valid implements the Iterator Valid. +func (i *memDbIter) Valid() bool { + if !i.reverse { + return i.iter.Valid() && (i.end == nil || bytes.Compare(i.Key(), i.end) < 0) + } + return i.iter.Valid() +} + +// Key implements the Iterator Key. +func (i *memDbIter) Key() Key { + return i.iter.Key() +} + +// Value implements the Iterator Value. +func (i *memDbIter) Value() []byte { + return i.iter.Value() +} + +// Close Implements the Iterator Close. +func (i *memDbIter) Close() { + +} + +// WalkMemBuffer iterates all buffered kv pairs in memBuf +func WalkMemBuffer(memBuf MemBuffer, f func(k Key, v []byte) error) error { + iter, err := memBuf.Iter(nil, nil) + if err != nil { + return errors.Trace(err) + } + + defer iter.Close() + for iter.Valid() { + if err = f(iter.Key(), iter.Value()); err != nil { + return errors.Trace(err) + } + err = iter.Next() + if err != nil { + return errors.Trace(err) + } + } + + return nil +} diff --git a/kv/mock.go b/kv/mock.go new file mode 100644 index 0000000..ba05a4c --- /dev/null +++ b/kv/mock.go @@ -0,0 +1,227 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package kv + +import ( + "context" + + "github.com/pingcap/tidb/store/tikv/oracle" +) + +// mockTxn is a txn that returns a retryAble error when called Commit. +type mockTxn struct { + opts map[Option]interface{} + valid bool +} + +// Commit always returns a retryable error. +func (t *mockTxn) Commit(ctx context.Context) error { + return ErrTxnRetryable +} + +func (t *mockTxn) Rollback() error { + t.valid = false + return nil +} + +func (t *mockTxn) String() string { + return "" +} + +func (t *mockTxn) LockKeys(_ context.Context, _ *LockCtx, _ ...Key) error { + return nil +} + +func (t *mockTxn) SetOption(opt Option, val interface{}) { + t.opts[opt] = val +} + +func (t *mockTxn) DelOption(opt Option) { + delete(t.opts, opt) +} + +func (t *mockTxn) GetOption(opt Option) interface{} { + return t.opts[opt] +} + +func (t *mockTxn) IsReadOnly() bool { + return true +} + +func (t *mockTxn) StartTS() uint64 { + return uint64(0) +} +func (t *mockTxn) Get(ctx context.Context, k Key) ([]byte, error) { + return nil, nil +} + +func (t *mockTxn) BatchGet(ctx context.Context, keys []Key) (map[string][]byte, error) { + return nil, nil +} + +func (t *mockTxn) Iter(k Key, upperBound Key) (Iterator, error) { + return nil, nil +} + +func (t *mockTxn) IterReverse(k Key) (Iterator, error) { + return nil, nil +} + +func (t *mockTxn) Set(k Key, v []byte) error { + return nil +} +func (t *mockTxn) Delete(k Key) error { + return nil +} + +func (t *mockTxn) Valid() bool { + return t.valid +} + +func (t *mockTxn) Len() int { + return 0 +} + +func (t *mockTxn) Size() int { + return 0 +} + +func (t *mockTxn) GetMemBuffer() MemBuffer { + return nil +} + +func (t *mockTxn) SetCap(cap int) { + +} + +func (t *mockTxn) Reset() { + t.valid = false +} + +func (t *mockTxn) SetVars(vars *Variables) { + +} + +// NewMockTxn new a mockTxn. +func NewMockTxn() Transaction { + return &mockTxn{ + opts: make(map[Option]interface{}), + valid: true, + } +} + +// mockStorage is used to start a must commit-failed txn. +type mockStorage struct { +} + +func (s *mockStorage) Begin() (Transaction, error) { + return NewMockTxn(), nil +} + +// BeginWithStartTS begins a transaction with startTS. +func (s *mockStorage) BeginWithStartTS(startTS uint64) (Transaction, error) { + return s.Begin() +} + +func (s *mockStorage) GetSnapshot(ver Version) (Snapshot, error) { + return &mockSnapshot{ + store: NewMemDbBuffer(DefaultTxnMembufCap), + }, nil +} + +func (s *mockStorage) Close() error { + return nil +} + +func (s *mockStorage) UUID() string { + return "" +} + +// CurrentVersion returns current max committed version. +func (s *mockStorage) CurrentVersion() (Version, error) { + return NewVersion(1), nil +} + +func (s *mockStorage) GetClient() Client { + return nil +} + +func (s *mockStorage) GetOracle() oracle.Oracle { + return nil +} + +func (s *mockStorage) SupportDeleteRange() (supported bool) { + return false +} + +func (s *mockStorage) Name() string { + return "KVMockStorage" +} + +func (s *mockStorage) Describe() string { + return "KVMockStorage is a mock Store implementation, only for unittests in KV package" +} + +func (s *mockStorage) ShowStatus(ctx context.Context, key string) (interface{}, error) { + return nil, nil +} + +// MockTxn is used for test cases that need more interfaces than Transaction. +type MockTxn interface { + Transaction + GetOption(opt Option) interface{} +} + +// NewMockStorage creates a new mockStorage. +func NewMockStorage() Storage { + return &mockStorage{} +} + +type mockSnapshot struct { + store MemBuffer +} + +func (s *mockSnapshot) Get(ctx context.Context, k Key) ([]byte, error) { + return s.store.Get(ctx, k) +} + +func (s *mockSnapshot) SetPriority(priority int) { + +} + +func (s *mockSnapshot) BatchGet(ctx context.Context, keys []Key) (map[string][]byte, error) { + m := make(map[string][]byte) + for _, k := range keys { + v, err := s.store.Get(ctx, k) + if IsErrNotFound(err) { + continue + } + if err != nil { + return nil, err + } + m[string(k)] = v + } + return m, nil +} + +func (s *mockSnapshot) Iter(k Key, upperBound Key) (Iterator, error) { + return s.store.Iter(k, upperBound) +} + +func (s *mockSnapshot) IterReverse(k Key) (Iterator, error) { + return s.store.IterReverse(k) +} + +func (s *mockSnapshot) SetOption(opt Option, val interface{}) {} +func (s *mockSnapshot) DelOption(opt Option) {} diff --git a/kv/mock_test.go b/kv/mock_test.go new file mode 100644 index 0000000..1cdce6c --- /dev/null +++ b/kv/mock_test.go @@ -0,0 +1,98 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package kv + +import ( + "context" + . "github.com/pingcap/check" +) + +var _ = Suite(testMockSuite{}) + +type testMockSuite struct { +} + +func (s testMockSuite) TestInterface(c *C) { + storage := NewMockStorage() + storage.GetClient() + storage.UUID() + + transaction, err := storage.Begin() + c.Check(err, IsNil) + err = transaction.LockKeys(context.Background(), new(LockCtx), Key("lock")) + c.Check(err, IsNil) + transaction.SetOption(Option(23), struct{}{}) + if mock, ok := transaction.(*mockTxn); ok { + mock.GetOption(Option(23)) + } + transaction.StartTS() + transaction.DelOption(Option(23)) + if transaction.IsReadOnly() { + _, err = transaction.Get(context.TODO(), Key("lock")) + c.Check(err, IsNil) + err = transaction.Set(Key("lock"), []byte{}) + c.Check(err, IsNil) + _, err = transaction.Iter(Key("lock"), nil) + c.Check(err, IsNil) + _, err = transaction.IterReverse(Key("lock")) + c.Check(err, IsNil) + } + transaction.Commit(context.Background()) + + transaction, err = storage.Begin() + c.Check(err, IsNil) + + // Test for mockTxn interface. + c.Assert(transaction.String(), Equals, "") + c.Assert(transaction.Valid(), Equals, true) + c.Assert(transaction.Len(), Equals, 0) + c.Assert(transaction.Size(), Equals, 0) + c.Assert(transaction.GetMemBuffer(), IsNil) + transaction.SetCap(0) + transaction.Reset() + err = transaction.Rollback() + c.Check(err, IsNil) + c.Assert(transaction.Valid(), Equals, false) + c.Assert(transaction.Delete(nil), IsNil) + + // Test for mockStorage interface. + c.Assert(storage.GetOracle(), IsNil) + c.Assert(storage.Name(), Equals, "KVMockStorage") + c.Assert(storage.Describe(), Equals, "KVMockStorage is a mock Store implementation, only for unittests in KV package") + c.Assert(storage.SupportDeleteRange(), IsFalse) + + status, err := storage.ShowStatus(nil, "") + c.Assert(status, IsNil) + c.Assert(err, IsNil) + + err = storage.Close() + c.Check(err, IsNil) +} + +func (s testMockSuite) TestIsPoint(c *C) { + kr := KeyRange{ + StartKey: Key("rowkey1"), + EndKey: Key("rowkey2"), + } + c.Check(kr.IsPoint(), IsTrue) + + kr.EndKey = Key("rowkey3") + c.Check(kr.IsPoint(), IsFalse) + + kr = KeyRange{ + StartKey: Key(""), + EndKey: Key([]byte{0}), + } + c.Check(kr.IsPoint(), IsTrue) +} diff --git a/kv/txn.go b/kv/txn.go new file mode 100644 index 0000000..e59bb78 --- /dev/null +++ b/kv/txn.go @@ -0,0 +1,113 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package kv + +import ( + "context" + "math" + "math/rand" + "sync/atomic" + "time" + + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +// RunInNewTxn will run the f in a new transaction environment. +func RunInNewTxn(store Storage, retryable bool, f func(txn Transaction) error) error { + var ( + err error + originalTxnTS uint64 + txn Transaction + ) + for i := uint(0); i < maxRetryCnt; i++ { + txn, err = store.Begin() + if err != nil { + logutil.BgLogger().Error("RunInNewTxn", zap.Error(err)) + return err + } + + // originalTxnTS is used to trace the original transaction when the function is retryable. + if i == 0 { + originalTxnTS = txn.StartTS() + } + + err = f(txn) + if err != nil { + err1 := txn.Rollback() + terror.Log(err1) + if retryable && IsTxnRetryableError(err) { + logutil.BgLogger().Warn("RunInNewTxn", + zap.Uint64("retry txn", txn.StartTS()), + zap.Uint64("original txn", originalTxnTS), + zap.Error(err)) + continue + } + return err + } + + err = txn.Commit(context.Background()) + if err == nil { + break + } + if retryable && IsTxnRetryableError(err) { + logutil.BgLogger().Warn("RunInNewTxn", + zap.Uint64("retry txn", txn.StartTS()), + zap.Uint64("original txn", originalTxnTS), + zap.Error(err)) + BackOff(i) + continue + } + return err + } + return err +} + +var ( + // maxRetryCnt represents maximum retry times in RunInNewTxn. + maxRetryCnt uint = 100 + // retryBackOffBase is the initial duration, in microsecond, a failed transaction stays dormancy before it retries + retryBackOffBase = 1 + // retryBackOffCap is the max amount of duration, in microsecond, a failed transaction stays dormancy before it retries + retryBackOffCap = 100 +) + +// BackOff Implements exponential backoff with full jitter. +// Returns real back off time in microsecond. +// See http://www.awsarchitectureblog.com/2015/03/backoff.html. +func BackOff(attempts uint) int { + upper := int(math.Min(float64(retryBackOffCap), float64(retryBackOffBase)*math.Pow(2.0, float64(attempts)))) + sleep := time.Duration(rand.Intn(upper)) * time.Millisecond + time.Sleep(sleep) + return int(sleep) +} + +// mockCommitErrorEnable uses to enable `mockCommitError` and only mock error once. +var mockCommitErrorEnable = int64(0) + +// MockCommitErrorEnable exports for gofail testing. +func MockCommitErrorEnable() { + atomic.StoreInt64(&mockCommitErrorEnable, 1) +} + +// MockCommitErrorDisable exports for gofail testing. +func MockCommitErrorDisable() { + atomic.StoreInt64(&mockCommitErrorEnable, 0) +} + +// IsMockCommitErrorEnable exports for gofail testing. +func IsMockCommitErrorEnable() bool { + return atomic.LoadInt64(&mockCommitErrorEnable) == 1 +} diff --git a/kv/txn_test.go b/kv/txn_test.go new file mode 100644 index 0000000..8a05cff --- /dev/null +++ b/kv/txn_test.go @@ -0,0 +1,91 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package kv + +import ( + "errors" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/util/testleak" +) + +var _ = Suite(&testTxnSuite{}) + +type testTxnSuite struct { +} + +func (s *testTxnSuite) SetUpTest(c *C) { +} + +func (s *testTxnSuite) TearDownTest(c *C) { +} + +func (s *testTxnSuite) TestBackOff(c *C) { + defer testleak.AfterTest(c)() + mustBackOff(c, 1, 2) + mustBackOff(c, 2, 4) + mustBackOff(c, 3, 8) + mustBackOff(c, 100000, 100) +} + +func mustBackOff(c *C, cnt uint, sleep int) { + c.Assert(BackOff(cnt), LessEqual, sleep*int(time.Millisecond)) +} + +func (s *testTxnSuite) TestRetryExceedCountError(c *C) { + defer testleak.AfterTest(c)() + defer func(cnt uint) { + maxRetryCnt = cnt + }(maxRetryCnt) + + maxRetryCnt = 5 + err := RunInNewTxn(&mockStorage{}, true, func(txn Transaction) error { + return nil + }) + c.Assert(err, NotNil) + + err = RunInNewTxn(&mockStorage{}, true, func(txn Transaction) error { + return ErrTxnRetryable + }) + c.Assert(err, NotNil) + + err = RunInNewTxn(&mockStorage{}, true, func(txn Transaction) error { + return errors.New("do not retry") + }) + c.Assert(err, NotNil) + + var cfg InjectionConfig + err1 := errors.New("foo") + cfg.SetGetError(err1) + cfg.SetCommitError(err1) + storage := NewInjectedStore(NewMockStorage(), &cfg) + err = RunInNewTxn(storage, true, func(txn Transaction) error { + return nil + }) + c.Assert(err, NotNil) +} + +func (s *testTxnSuite) TestBasicFunc(c *C) { + if IsMockCommitErrorEnable() { + defer MockCommitErrorEnable() + } else { + defer MockCommitErrorDisable() + } + + MockCommitErrorEnable() + c.Assert(IsMockCommitErrorEnable(), IsTrue) + MockCommitErrorDisable() + c.Assert(IsMockCommitErrorEnable(), IsFalse) +} diff --git a/kv/union_iter.go b/kv/union_iter.go new file mode 100644 index 0000000..4134d69 --- /dev/null +++ b/kv/union_iter.go @@ -0,0 +1,186 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package kv + +import ( + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +// UnionIter is the iterator on an UnionStore. +type UnionIter struct { + dirtyIt Iterator + snapshotIt Iterator + + dirtyValid bool + snapshotValid bool + + curIsDirty bool + isValid bool + reverse bool +} + +// NewUnionIter returns a union iterator for BufferStore. +func NewUnionIter(dirtyIt Iterator, snapshotIt Iterator, reverse bool) (*UnionIter, error) { + it := &UnionIter{ + dirtyIt: dirtyIt, + snapshotIt: snapshotIt, + dirtyValid: dirtyIt.Valid(), + snapshotValid: snapshotIt.Valid(), + reverse: reverse, + } + err := it.updateCur() + if err != nil { + return nil, err + } + return it, nil +} + +// dirtyNext makes iter.dirtyIt go and update valid status. +func (iter *UnionIter) dirtyNext() error { + err := iter.dirtyIt.Next() + iter.dirtyValid = iter.dirtyIt.Valid() + return err +} + +// snapshotNext makes iter.snapshotIt go and update valid status. +func (iter *UnionIter) snapshotNext() error { + err := iter.snapshotIt.Next() + iter.snapshotValid = iter.snapshotIt.Valid() + return err +} + +func (iter *UnionIter) updateCur() error { + iter.isValid = true + for { + if !iter.dirtyValid && !iter.snapshotValid { + iter.isValid = false + break + } + + if !iter.dirtyValid { + iter.curIsDirty = false + break + } + + if !iter.snapshotValid { + iter.curIsDirty = true + // if delete it + if len(iter.dirtyIt.Value()) == 0 { + if err := iter.dirtyNext(); err != nil { + return err + } + continue + } + break + } + + // both valid + if iter.snapshotValid && iter.dirtyValid { + snapshotKey := iter.snapshotIt.Key() + dirtyKey := iter.dirtyIt.Key() + cmp := dirtyKey.Cmp(snapshotKey) + if iter.reverse { + cmp = -cmp + } + // if equal, means both have value + if cmp == 0 { + if len(iter.dirtyIt.Value()) == 0 { + // snapshot has a record, but txn says we have deleted it + // just go next + if err := iter.dirtyNext(); err != nil { + return err + } + if err := iter.snapshotNext(); err != nil { + return err + } + continue + } + // both go next + if err := iter.snapshotNext(); err != nil { + return err + } + iter.curIsDirty = true + break + } else if cmp > 0 { + // record from snapshot comes first + iter.curIsDirty = false + break + } else { + // record from dirty comes first + if len(iter.dirtyIt.Value()) == 0 { + logutil.BgLogger().Warn("delete a record not exists?", + zap.Stringer("key", iter.dirtyIt.Key())) + // jump over this deletion + if err := iter.dirtyNext(); err != nil { + return err + } + continue + } + iter.curIsDirty = true + break + } + } + } + return nil +} + +// Next implements the Iterator Next interface. +func (iter *UnionIter) Next() error { + var err error + if !iter.curIsDirty { + err = iter.snapshotNext() + } else { + err = iter.dirtyNext() + } + if err != nil { + return err + } + err = iter.updateCur() + return err +} + +// Value implements the Iterator Value interface. +// Multi columns +func (iter *UnionIter) Value() []byte { + if !iter.curIsDirty { + return iter.snapshotIt.Value() + } + return iter.dirtyIt.Value() +} + +// Key implements the Iterator Key interface. +func (iter *UnionIter) Key() Key { + if !iter.curIsDirty { + return iter.snapshotIt.Key() + } + return iter.dirtyIt.Key() +} + +// Valid implements the Iterator Valid interface. +func (iter *UnionIter) Valid() bool { + return iter.isValid +} + +// Close implements the Iterator Close interface. +func (iter *UnionIter) Close() { + if iter.snapshotIt != nil { + iter.snapshotIt.Close() + iter.snapshotIt = nil + } + if iter.dirtyIt != nil { + iter.dirtyIt.Close() + iter.dirtyIt = nil + } +} diff --git a/kv/union_store.go b/kv/union_store.go new file mode 100644 index 0000000..94abb3a --- /dev/null +++ b/kv/union_store.go @@ -0,0 +1,256 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package kv + +import ( + "context" +) + +// UnionStore is a store that wraps a snapshot for read and a BufferStore for buffered write. +// Also, it provides some transaction related utilities. +type UnionStore interface { + MemBuffer + // GetKeyExistErrInfo gets the key exist error info for the lazy check. + GetKeyExistErrInfo(k Key) *existErrInfo + // DeleteKeyExistErrInfo deletes the key exist error info for the lazy check. + DeleteKeyExistErrInfo(k Key) + // WalkBuffer iterates all buffered kv pairs. + WalkBuffer(f func(k Key, v []byte) error) error + // SetOption sets an option with a value, when val is nil, uses the default + // value of this option. + SetOption(opt Option, val interface{}) + // DelOption deletes an option. + DelOption(opt Option) + // GetOption gets an option. + GetOption(opt Option) interface{} + // GetMemBuffer return the MemBuffer binding to this UnionStore. + GetMemBuffer() MemBuffer +} + +// AssertionType is the type of a assertion. +type AssertionType int + +// The AssertionType constants. +const ( + None AssertionType = iota + Exist + NotExist +) + +// Option is used for customizing kv store's behaviors during a transaction. +type Option int + +// Options is an interface of a set of options. Each option is associated with a value. +type Options interface { + // Get gets an option value. + Get(opt Option) (v interface{}, ok bool) +} + +type existErrInfo struct { + idxName string + value string +} + +// NewExistErrInfo is used to new an existErrInfo +func NewExistErrInfo(idxName string, value string) *existErrInfo { + return &existErrInfo{idxName: idxName, value: value} +} + +// GetIdxName gets the index name of the existed error. +func (e *existErrInfo) GetIdxName() string { + return e.idxName +} + +// GetValue gets the existed value of the existed error. +func (e *existErrInfo) GetValue() string { + return e.value +} + +// Err generates the error for existErrInfo +func (e *existErrInfo) Err() error { + return ErrKeyExists.FastGenByArgs(e.value, e.idxName) +} + +// unionStore is an in-memory Store which contains a buffer for write and a +// snapshot for read. +type unionStore struct { + *BufferStore + keyExistErrs map[string]*existErrInfo // for the lazy check + opts options +} + +// NewUnionStore builds a new UnionStore. +func NewUnionStore(snapshot Snapshot) UnionStore { + return &unionStore{ + BufferStore: NewBufferStore(snapshot, DefaultTxnMembufCap), + keyExistErrs: make(map[string]*existErrInfo), + opts: make(map[Option]interface{}), + } +} + +// invalidIterator implements Iterator interface. +// It is used for read-only transaction which has no data written, the iterator is always invalid. +type invalidIterator struct{} + +func (it invalidIterator) Valid() bool { + return false +} + +func (it invalidIterator) Next() error { + return nil +} + +func (it invalidIterator) Key() Key { + return nil +} + +func (it invalidIterator) Value() []byte { + return nil +} + +func (it invalidIterator) Close() {} + +// lazyMemBuffer wraps a MemBuffer which is to be initialized when it is modified. +type lazyMemBuffer struct { + mb MemBuffer + cap int +} + +func (lmb *lazyMemBuffer) Get(ctx context.Context, k Key) ([]byte, error) { + if lmb.mb == nil { + return nil, ErrNotExist + } + + return lmb.mb.Get(ctx, k) +} + +func (lmb *lazyMemBuffer) Set(key Key, value []byte) error { + if lmb.mb == nil { + lmb.mb = NewMemDbBuffer(lmb.cap) + } + + return lmb.mb.Set(key, value) +} + +func (lmb *lazyMemBuffer) Delete(k Key) error { + if lmb.mb == nil { + lmb.mb = NewMemDbBuffer(lmb.cap) + } + + return lmb.mb.Delete(k) +} + +func (lmb *lazyMemBuffer) Iter(k Key, upperBound Key) (Iterator, error) { + if lmb.mb == nil { + return invalidIterator{}, nil + } + return lmb.mb.Iter(k, upperBound) +} + +func (lmb *lazyMemBuffer) IterReverse(k Key) (Iterator, error) { + if lmb.mb == nil { + return invalidIterator{}, nil + } + return lmb.mb.IterReverse(k) +} + +func (lmb *lazyMemBuffer) Size() int { + if lmb.mb == nil { + return 0 + } + return lmb.mb.Size() +} + +func (lmb *lazyMemBuffer) Len() int { + if lmb.mb == nil { + return 0 + } + return lmb.mb.Len() +} + +func (lmb *lazyMemBuffer) Reset() { + if lmb.mb != nil { + lmb.mb.Reset() + } +} + +func (lmb *lazyMemBuffer) SetCap(cap int) { + lmb.cap = cap +} + +// Get implements the Retriever interface. +func (us *unionStore) Get(ctx context.Context, k Key) ([]byte, error) { + v, err := us.MemBuffer.Get(ctx, k) + if IsErrNotFound(err) { + v, err = us.BufferStore.r.Get(ctx, k) + } + e, ok := us.opts.Get(PresumeKeyNotExistsError) + if ok && len(v) > 0 { + return nil, e.(*existErrInfo).Err() + } + if err != nil { + return v, err + } + if len(v) == 0 { + return nil, ErrNotExist + } + return v, nil +} + +func (us *unionStore) GetKeyExistErrInfo(k Key) *existErrInfo { + if c, ok := us.keyExistErrs[string(k)]; ok { + return c + } + return nil +} + +func (us *unionStore) DeleteKeyExistErrInfo(k Key) { + delete(us.keyExistErrs, string(k)) +} + +// SetOption implements the UnionStore SetOption interface. +func (us *unionStore) SetOption(opt Option, val interface{}) { + us.opts[opt] = val +} + +// DelOption implements the UnionStore DelOption interface. +func (us *unionStore) DelOption(opt Option) { + delete(us.opts, opt) +} + +// GetOption implements the UnionStore GetOption interface. +func (us *unionStore) GetOption(opt Option) interface{} { + return us.opts[opt] +} + +// GetMemBuffer return the MemBuffer binding to this UnionStore. +func (us *unionStore) GetMemBuffer() MemBuffer { + return us.BufferStore.MemBuffer +} + +// SetCap sets membuffer capability. +func (us *unionStore) SetCap(cap int) { + us.BufferStore.SetCap(cap) +} + +func (us *unionStore) Reset() { + us.BufferStore.Reset() +} + +type options map[Option]interface{} + +func (opts options) Get(opt Option) (interface{}, bool) { + v, ok := opts[opt] + return v, ok +} diff --git a/kv/union_store_test.go b/kv/union_store_test.go new file mode 100644 index 0000000..aeb6a96 --- /dev/null +++ b/kv/union_store_test.go @@ -0,0 +1,151 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package kv + +import ( + "context" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/util/testleak" +) + +var _ = Suite(&testUnionStoreSuite{}) + +type testUnionStoreSuite struct { + store MemBuffer + us UnionStore +} + +func (s *testUnionStoreSuite) SetUpTest(c *C) { + s.store = NewMemDbBuffer(DefaultTxnMembufCap) + s.us = NewUnionStore(&mockSnapshot{s.store}) +} + +func (s *testUnionStoreSuite) TestGetSet(c *C) { + defer testleak.AfterTest(c)() + err := s.store.Set([]byte("1"), []byte("1")) + c.Assert(err, IsNil) + v, err := s.us.Get(context.TODO(), []byte("1")) + c.Assert(err, IsNil) + c.Assert(v, BytesEquals, []byte("1")) + err = s.us.Set([]byte("1"), []byte("2")) + c.Assert(err, IsNil) + v, err = s.us.Get(context.TODO(), []byte("1")) + c.Assert(err, IsNil) + c.Assert(v, BytesEquals, []byte("2")) + c.Assert(s.us.Size(), Equals, 2) + c.Assert(s.us.Len(), Equals, 1) +} + +func (s *testUnionStoreSuite) TestDelete(c *C) { + defer testleak.AfterTest(c)() + err := s.store.Set([]byte("1"), []byte("1")) + c.Assert(err, IsNil) + err = s.us.Delete([]byte("1")) + c.Assert(err, IsNil) + _, err = s.us.Get(context.TODO(), []byte("1")) + c.Assert(IsErrNotFound(err), IsTrue) + + err = s.us.Set([]byte("1"), []byte("2")) + c.Assert(err, IsNil) + v, err := s.us.Get(context.TODO(), []byte("1")) + c.Assert(err, IsNil) + c.Assert(v, BytesEquals, []byte("2")) +} + +func (s *testUnionStoreSuite) TestSeek(c *C) { + defer testleak.AfterTest(c)() + err := s.store.Set([]byte("1"), []byte("1")) + c.Assert(err, IsNil) + err = s.store.Set([]byte("2"), []byte("2")) + c.Assert(err, IsNil) + err = s.store.Set([]byte("3"), []byte("3")) + c.Assert(err, IsNil) + + iter, err := s.us.Iter(nil, nil) + c.Assert(err, IsNil) + checkIterator(c, iter, [][]byte{[]byte("1"), []byte("2"), []byte("3")}, [][]byte{[]byte("1"), []byte("2"), []byte("3")}) + + iter, err = s.us.Iter([]byte("2"), nil) + c.Assert(err, IsNil) + checkIterator(c, iter, [][]byte{[]byte("2"), []byte("3")}, [][]byte{[]byte("2"), []byte("3")}) + + err = s.us.Set([]byte("4"), []byte("4")) + c.Assert(err, IsNil) + iter, err = s.us.Iter([]byte("2"), nil) + c.Assert(err, IsNil) + checkIterator(c, iter, [][]byte{[]byte("2"), []byte("3"), []byte("4")}, [][]byte{[]byte("2"), []byte("3"), []byte("4")}) + + err = s.us.Delete([]byte("3")) + c.Assert(err, IsNil) + iter, err = s.us.Iter([]byte("2"), nil) + c.Assert(err, IsNil) + checkIterator(c, iter, [][]byte{[]byte("2"), []byte("4")}, [][]byte{[]byte("2"), []byte("4")}) +} + +func (s *testUnionStoreSuite) TestIterReverse(c *C) { + defer testleak.AfterTest(c)() + err := s.store.Set([]byte("1"), []byte("1")) + c.Assert(err, IsNil) + err = s.store.Set([]byte("2"), []byte("2")) + c.Assert(err, IsNil) + err = s.store.Set([]byte("3"), []byte("3")) + c.Assert(err, IsNil) + + iter, err := s.us.IterReverse(nil) + c.Assert(err, IsNil) + checkIterator(c, iter, [][]byte{[]byte("3"), []byte("2"), []byte("1")}, [][]byte{[]byte("3"), []byte("2"), []byte("1")}) + + iter, err = s.us.IterReverse([]byte("3")) + c.Assert(err, IsNil) + checkIterator(c, iter, [][]byte{[]byte("2"), []byte("1")}, [][]byte{[]byte("2"), []byte("1")}) + + err = s.us.Set([]byte("0"), []byte("0")) + c.Assert(err, IsNil) + iter, err = s.us.IterReverse([]byte("3")) + c.Assert(err, IsNil) + checkIterator(c, iter, [][]byte{[]byte("2"), []byte("1"), []byte("0")}, [][]byte{[]byte("2"), []byte("1"), []byte("0")}) + + err = s.us.Delete([]byte("1")) + c.Assert(err, IsNil) + iter, err = s.us.IterReverse([]byte("3")) + c.Assert(err, IsNil) + checkIterator(c, iter, [][]byte{[]byte("2"), []byte("0")}, [][]byte{[]byte("2"), []byte("0")}) +} + +func checkIterator(c *C, iter Iterator, keys [][]byte, values [][]byte) { + defer iter.Close() + c.Assert(len(keys), Equals, len(values)) + for i, k := range keys { + v := values[i] + c.Assert(iter.Valid(), IsTrue) + c.Assert([]byte(iter.Key()), BytesEquals, k) + c.Assert(iter.Value(), BytesEquals, v) + c.Assert(iter.Next(), IsNil) + } + c.Assert(iter.Valid(), IsFalse) +} + +func (s *testUnionStoreSuite) TestBasic(c *C) { + iter := invalidIterator{} + c.Assert(iter.Valid(), IsFalse) + c.Assert(iter.Next(), IsNil) + c.Assert(iter.Key(), IsNil) + c.Assert(iter.Value(), IsNil) + + s.us.SetOption(1, 1) + c.Assert(s.us.GetOption(1), Equals, 1) + s.us.DelOption(1) + c.Assert(s.us.GetOption(1), IsNil) +} diff --git a/kv/utils.go b/kv/utils.go new file mode 100644 index 0000000..e759bcf --- /dev/null +++ b/kv/utils.go @@ -0,0 +1,64 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package kv + +import ( + "context" + "strconv" + + "github.com/pingcap/errors" +) + +// IncInt64 increases the value for key k in kv store by step. +func IncInt64(rm RetrieverMutator, k Key, step int64) (int64, error) { + val, err := rm.Get(context.TODO(), k) + if IsErrNotFound(err) { + err = rm.Set(k, []byte(strconv.FormatInt(step, 10))) + if err != nil { + return 0, err + } + return step, nil + } + if err != nil { + return 0, err + } + + intVal, err := strconv.ParseInt(string(val), 10, 0) + if err != nil { + return 0, errors.Trace(err) + } + + intVal += step + err = rm.Set(k, []byte(strconv.FormatInt(intVal, 10))) + if err != nil { + return 0, err + } + return intVal, nil +} + +// GetInt64 get int64 value which created by IncInt64 method. +func GetInt64(ctx context.Context, r Retriever, k Key) (int64, error) { + val, err := r.Get(ctx, k) + if IsErrNotFound(err) { + return 0, nil + } + if err != nil { + return 0, err + } + intVal, err := strconv.ParseInt(string(val), 10, 0) + if err != nil { + return intVal, errors.Trace(err) + } + return intVal, nil +} diff --git a/kv/utils_test.go b/kv/utils_test.go new file mode 100644 index 0000000..1d3cab0 --- /dev/null +++ b/kv/utils_test.go @@ -0,0 +1,55 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package kv + +import ( + "context" + + . "github.com/pingcap/check" +) + +var _ = Suite(testUtilsSuite{}) + +type testUtilsSuite struct { +} + +func (s testUtilsSuite) TestIncInt64(c *C) { + mb := NewMemDbBuffer(DefaultTxnMembufCap) + key := Key("key") + v, err := IncInt64(mb, key, 1) + c.Check(err, IsNil) + c.Check(v, Equals, int64(1)) + v, err = IncInt64(mb, key, 10) + c.Check(err, IsNil) + c.Check(v, Equals, int64(11)) + + err = mb.Set(key, []byte("not int")) + c.Check(err, IsNil) + _, err = IncInt64(mb, key, 1) + c.Check(err, NotNil) +} + +func (s testUtilsSuite) TestGetInt64(c *C) { + mb := NewMemDbBuffer(DefaultTxnMembufCap) + key := Key("key") + v, err := GetInt64(context.TODO(), mb, key) + c.Check(v, Equals, int64(0)) + c.Check(err, IsNil) + + _, err = IncInt64(mb, key, 15) + c.Check(err, IsNil) + v, err = GetInt64(context.TODO(), mb, key) + c.Check(v, Equals, int64(15)) + c.Check(err, IsNil) +} diff --git a/kv/variables.go b/kv/variables.go new file mode 100644 index 0000000..3734218 --- /dev/null +++ b/kv/variables.go @@ -0,0 +1,43 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package kv + +// Variables defines the variables used by KV storage. +type Variables struct { + // BackoffLockFast specifies the LockFast backoff base duration in milliseconds. + BackoffLockFast int + + // BackOffWeight specifies the weight of the max back off time duration. + BackOffWeight int + + // Hook is used for test to verify the variable take effect. + Hook func(name string, vars *Variables) +} + +// NewVariables create a new Variables instance with default values. +func NewVariables() *Variables { + return &Variables{ + BackoffLockFast: DefBackoffLockFast, + BackOffWeight: DefBackOffWeight, + } +} + +// DefaultVars is the default variables instance. +var DefaultVars = NewVariables() + +// Default values +const ( + DefBackoffLockFast = 100 + DefBackOffWeight = 2 +) diff --git a/kv/version.go b/kv/version.go new file mode 100644 index 0000000..f009215 --- /dev/null +++ b/kv/version.go @@ -0,0 +1,51 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package kv + +import "math" + +// VersionProvider provides increasing IDs. +type VersionProvider interface { + CurrentVersion() (Version, error) +} + +// Version is the wrapper of KV's version. +type Version struct { + Ver uint64 +} + +var ( + // MaxVersion is the maximum version, notice that it's not a valid version. + MaxVersion = Version{Ver: math.MaxUint64} + // MinVersion is the minimum version, it's not a valid version, too. + MinVersion = Version{Ver: 0} +) + +// NewVersion creates a new Version struct. +func NewVersion(v uint64) Version { + return Version{ + Ver: v, + } +} + +// Cmp returns the comparison result of two versions. +// The result will be 0 if a==b, -1 if a < b, and +1 if a > b. +func (v Version) Cmp(another Version) int { + if v.Ver > another.Ver { + return 1 + } else if v.Ver < another.Ver { + return -1 + } + return 0 +} diff --git a/kv/version_test.go b/kv/version_test.go new file mode 100644 index 0000000..56219f8 --- /dev/null +++ b/kv/version_test.go @@ -0,0 +1,34 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package kv + +import ( + . "github.com/pingcap/check" +) + +var _ = Suite(testVersionSuite{}) + +type testVersionSuite struct{} + +func (s testVersionSuite) TestVersion(c *C) { + le := NewVersion(42).Cmp(NewVersion(43)) + gt := NewVersion(42).Cmp(NewVersion(41)) + eq := NewVersion(42).Cmp(NewVersion(42)) + + c.Assert(le < 0, IsTrue) + c.Assert(gt > 0, IsTrue) + c.Assert(eq == 0, IsTrue) + + c.Check(MinVersion.Cmp(MaxVersion) < 0, IsTrue) +} diff --git a/meta/autoid/autoid.go b/meta/autoid/autoid.go new file mode 100644 index 0000000..a320f9d --- /dev/null +++ b/meta/autoid/autoid.go @@ -0,0 +1,380 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package autoid + +import ( + "context" + "math" + "sync" + "time" + + "github.com/cznic/mathutil" + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta" + + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +// Attention: +// For reading cluster TiDB memory tables, the system schema/table should be same. +// Once the system schema/table id been allocated, it can't be changed any more. +// Change the system schema/table id may have the compatibility problem. +const ( + // SystemSchemaIDFlag is the system schema/table id flag, uses the highest bit position as system schema ID flag, it's exports for test. + SystemSchemaIDFlag = 1 << 62 + // InformationSchemaDBID is the information_schema schema id, it's exports for test. + InformationSchemaDBID int64 = SystemSchemaIDFlag | 1 +) + +const ( + minStep = 30000 + maxStep = 2000000 + defaultConsumeTime = 10 * time.Second +) + +// Test needs to change it, so it's a variable. +var step = int64(30000) + +// Allocator is an auto increment id generator. +// Just keep id unique actually. +type Allocator interface { + // Alloc allocs N consecutive autoID for table with tableID, returning (min, max] of the allocated autoID batch. + // It gets a batch of autoIDs at a time. So it does not need to access storage for each call. + // The consecutive feature is used to insert multiple rows in a statement. + Alloc(tableID int64, n uint64) (int64, int64, error) + // Rebase rebases the autoID base for table with tableID and the new base value. + // If allocIDs is true, it will allocate some IDs and save to the cache. + // If allocIDs is false, it will not allocate IDs. + Rebase(tableID, newBase int64, allocIDs bool) error + // Base return the current base of Allocator. + Base() int64 + // End is only used for test. + End() int64 + // NextGlobalAutoID returns the next global autoID. + NextGlobalAutoID(tableID int64) (int64, error) +} + +type allocator struct { + mu sync.Mutex + base int64 + end int64 + store kv.Storage + // dbID is current database's ID. + dbID int64 + isUnsigned bool + lastAllocTime time.Time + step int64 +} + +// GetStep is only used by tests +func GetStep() int64 { + return step +} + +// SetStep is only used by tests +func SetStep(s int64) { + step = s +} + +// Base implements autoid.Allocator Base interface. +func (alloc *allocator) Base() int64 { + return alloc.base +} + +// End implements autoid.Allocator End interface. +func (alloc *allocator) End() int64 { + return alloc.end +} + +// NextGlobalAutoID implements autoid.Allocator NextGlobalAutoID interface. +func (alloc *allocator) NextGlobalAutoID(tableID int64) (int64, error) { + var autoID int64 + err := kv.RunInNewTxn(alloc.store, true, func(txn kv.Transaction) error { + var err1 error + m := meta.NewMeta(txn) + autoID, err1 = m.GetAutoTableID(alloc.dbID, tableID) + if err1 != nil { + return errors.Trace(err1) + } + return nil + }) + + if alloc.isUnsigned { + return int64(uint64(autoID) + 1), err + } + return autoID + 1, err +} + +func (alloc *allocator) rebase4Unsigned(tableID int64, requiredBase uint64, allocIDs bool) error { + // Satisfied by alloc.base, nothing to do. + if requiredBase <= uint64(alloc.base) { + return nil + } + // Satisfied by alloc.end, need to update alloc.base. + if requiredBase <= uint64(alloc.end) { + alloc.base = int64(requiredBase) + return nil + } + var newBase, newEnd uint64 + err := kv.RunInNewTxn(alloc.store, true, func(txn kv.Transaction) error { + m := meta.NewMeta(txn) + currentEnd, err1 := m.GetAutoTableID(alloc.dbID, tableID) + if err1 != nil { + return err1 + } + uCurrentEnd := uint64(currentEnd) + if allocIDs { + newBase = mathutil.MaxUint64(uCurrentEnd, requiredBase) + newEnd = mathutil.MinUint64(math.MaxUint64-uint64(alloc.step), newBase) + uint64(alloc.step) + } else { + if uCurrentEnd >= requiredBase { + newBase = uCurrentEnd + newEnd = uCurrentEnd + // Required base satisfied, we don't need to update KV. + return nil + } + // If we don't want to allocate IDs, for example when creating a table with a given base value, + // We need to make sure when other TiDB server allocates ID for the first time, requiredBase + 1 + // will be allocated, so we need to increase the end to exactly the requiredBase. + newBase = requiredBase + newEnd = requiredBase + } + _, err1 = m.GenAutoTableID(alloc.dbID, tableID, int64(newEnd-uCurrentEnd)) + return err1 + }) + + if err != nil { + return err + } + alloc.base, alloc.end = int64(newBase), int64(newEnd) + return nil +} + +func (alloc *allocator) rebase4Signed(tableID, requiredBase int64, allocIDs bool) error { + // Satisfied by alloc.base, nothing to do. + if requiredBase <= alloc.base { + return nil + } + // Satisfied by alloc.end, need to update alloc.base. + if requiredBase <= alloc.end { + alloc.base = requiredBase + return nil + } + var newBase, newEnd int64 + err := kv.RunInNewTxn(alloc.store, true, func(txn kv.Transaction) error { + m := meta.NewMeta(txn) + currentEnd, err1 := m.GetAutoTableID(alloc.dbID, tableID) + if err1 != nil { + return err1 + } + if allocIDs { + newBase = mathutil.MaxInt64(currentEnd, requiredBase) + newEnd = mathutil.MinInt64(math.MaxInt64-alloc.step, newBase) + alloc.step + } else { + if currentEnd >= requiredBase { + newBase = currentEnd + newEnd = currentEnd + // Required base satisfied, we don't need to update KV. + return nil + } + // If we don't want to allocate IDs, for example when creating a table with a given base value, + // We need to make sure when other TiDB server allocates ID for the first time, requiredBase + 1 + // will be allocated, so we need to increase the end to exactly the requiredBase. + newBase = requiredBase + newEnd = requiredBase + } + _, err1 = m.GenAutoTableID(alloc.dbID, tableID, newEnd-currentEnd) + return err1 + }) + + if err != nil { + return err + } + alloc.base, alloc.end = newBase, newEnd + return nil +} + +// Rebase implements autoid.Allocator Rebase interface. +// The requiredBase is the minimum base value after Rebase. +// The real base may be greater than the required base. +func (alloc *allocator) Rebase(tableID, requiredBase int64, allocIDs bool) error { + if tableID == 0 { + return errInvalidTableID.GenWithStack("Invalid tableID") + } + + alloc.mu.Lock() + defer alloc.mu.Unlock() + + if alloc.isUnsigned { + return alloc.rebase4Unsigned(tableID, uint64(requiredBase), allocIDs) + } + return alloc.rebase4Signed(tableID, requiredBase, allocIDs) +} + +// NextStep return new auto id step according to previous step and consuming time. +func NextStep(curStep int64, consumeDur time.Duration) int64 { + failpoint.Inject("mockAutoIDChange", func(val failpoint.Value) { + if val.(bool) { + failpoint.Return(step) + } + }) + + consumeRate := defaultConsumeTime.Seconds() / consumeDur.Seconds() + res := int64(float64(curStep) * consumeRate) + if res < minStep { + return minStep + } else if res > maxStep { + return maxStep + } + return res +} + +// NewAllocator returns a new auto increment id generator on the store. +func NewAllocator(store kv.Storage, dbID int64, isUnsigned bool) Allocator { + return &allocator{ + store: store, + dbID: dbID, + isUnsigned: isUnsigned, + step: step, + lastAllocTime: time.Now(), + } +} + +// Alloc implements autoid.Allocator Alloc interface. +func (alloc *allocator) Alloc(tableID int64, n uint64) (int64, int64, error) { + if tableID == 0 { + return 0, 0, errInvalidTableID.GenWithStackByArgs("Invalid tableID") + } + if n == 0 { + return 0, 0, nil + } + alloc.mu.Lock() + defer alloc.mu.Unlock() + if alloc.isUnsigned { + return alloc.alloc4Unsigned(tableID, n) + } + return alloc.alloc4Signed(tableID, n) +} + +func (alloc *allocator) alloc4Signed(tableID int64, n uint64) (int64, int64, error) { + n1 := int64(n) + // Condition alloc.base+N1 > alloc.end will overflow when alloc.base + N1 > MaxInt64. So need this. + if math.MaxInt64-alloc.base <= n1 { + return 0, 0, ErrAutoincReadFailed + } + // The local rest is not enough for allocN, skip it. + if alloc.base+n1 > alloc.end { + var newBase, newEnd int64 + startTime := time.Now() + // Although it may skip a segment here, we still think it is consumed. + consumeDur := startTime.Sub(alloc.lastAllocTime) + nextStep := NextStep(alloc.step, consumeDur) + // Make sure nextStep is big enough. + if nextStep <= n1 { + alloc.step = mathutil.MinInt64(n1*2, maxStep) + } else { + alloc.step = nextStep + } + err := kv.RunInNewTxn(alloc.store, true, func(txn kv.Transaction) error { + m := meta.NewMeta(txn) + var err1 error + newBase, err1 = m.GetAutoTableID(alloc.dbID, tableID) + if err1 != nil { + return err1 + } + tmpStep := mathutil.MinInt64(math.MaxInt64-newBase, alloc.step) + // The global rest is not enough for alloc. + if tmpStep < n1 { + return ErrAutoincReadFailed + } + newEnd, err1 = m.GenAutoTableID(alloc.dbID, tableID, tmpStep) + return err1 + }) + + if err != nil { + return 0, 0, err + } + alloc.lastAllocTime = time.Now() + if newBase == math.MaxInt64 { + return 0, 0, ErrAutoincReadFailed + } + alloc.base, alloc.end = newBase, newEnd + } + logutil.Logger(context.TODO()).Debug("alloc N signed ID", + zap.Uint64("from ID", uint64(alloc.base)), + zap.Uint64("to ID", uint64(alloc.base+n1)), + zap.Int64("table ID", tableID), + zap.Int64("database ID", alloc.dbID)) + min := alloc.base + alloc.base += n1 + return min, alloc.base, nil +} + +func (alloc *allocator) alloc4Unsigned(tableID int64, n uint64) (int64, int64, error) { + n1 := int64(n) + // Condition alloc.base+n1 > alloc.end will overflow when alloc.base + n1 > MaxInt64. So need this. + if math.MaxUint64-uint64(alloc.base) <= n { + return 0, 0, ErrAutoincReadFailed + } + // The local rest is not enough for alloc, skip it. + if uint64(alloc.base)+n > uint64(alloc.end) { + var newBase, newEnd int64 + startTime := time.Now() + // Although it may skip a segment here, we still treat it as consumed. + consumeDur := startTime.Sub(alloc.lastAllocTime) + nextStep := NextStep(alloc.step, consumeDur) + // Make sure nextStep is big enough. + if nextStep <= n1 { + alloc.step = mathutil.MinInt64(n1*2, maxStep) + } else { + alloc.step = nextStep + } + err := kv.RunInNewTxn(alloc.store, true, func(txn kv.Transaction) error { + m := meta.NewMeta(txn) + var err1 error + newBase, err1 = m.GetAutoTableID(alloc.dbID, tableID) + if err1 != nil { + return err1 + } + tmpStep := int64(mathutil.MinUint64(math.MaxUint64-uint64(newBase), uint64(alloc.step))) + // The global rest is not enough for alloc. + if tmpStep < n1 { + return ErrAutoincReadFailed + } + newEnd, err1 = m.GenAutoTableID(alloc.dbID, tableID, tmpStep) + return err1 + }) + + if err != nil { + return 0, 0, err + } + alloc.lastAllocTime = time.Now() + if uint64(newBase) == math.MaxUint64 { + return 0, 0, ErrAutoincReadFailed + } + alloc.base, alloc.end = newBase, newEnd + } + logutil.Logger(context.TODO()).Debug("alloc unsigned ID", + zap.Uint64(" from ID", uint64(alloc.base)), + zap.Uint64("to ID", uint64(alloc.base+n1)), + zap.Int64("table ID", tableID), + zap.Int64("database ID", alloc.dbID)) + min := alloc.base + // Use uint64 n directly. + alloc.base = int64(uint64(alloc.base) + n) + return min, alloc.base, nil +} diff --git a/meta/autoid/autoid_test.go b/meta/autoid/autoid_test.go new file mode 100644 index 0000000..e58fb60 --- /dev/null +++ b/meta/autoid/autoid_test.go @@ -0,0 +1,490 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package autoid_test + +import ( + "fmt" + "math" + "math/rand" + "sync" + "testing" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta" + "github.com/pingcap/tidb/meta/autoid" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/store/mockstore" +) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + TestingT(t) +} + +var _ = Suite(&testSuite{}) + +type testSuite struct { +} + +func (*testSuite) TestT(c *C) { + c.Assert(failpoint.Enable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange", `return(true)`), IsNil) + defer func() { + c.Assert(failpoint.Disable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange"), IsNil) + }() + + store, err := mockstore.NewMockTikvStore() + c.Assert(err, IsNil) + defer store.Close() + + err = kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { + m := meta.NewMeta(txn) + err = m.CreateDatabase(&model.DBInfo{ID: 1, Name: model.NewCIStr("a")}) + c.Assert(err, IsNil) + err = m.CreateTableOrView(1, &model.TableInfo{ID: 1, Name: model.NewCIStr("t")}) + c.Assert(err, IsNil) + err = m.CreateTableOrView(1, &model.TableInfo{ID: 2, Name: model.NewCIStr("t1")}) + c.Assert(err, IsNil) + err = m.CreateTableOrView(1, &model.TableInfo{ID: 3, Name: model.NewCIStr("t1")}) + c.Assert(err, IsNil) + err = m.CreateTableOrView(1, &model.TableInfo{ID: 4, Name: model.NewCIStr("t2")}) + c.Assert(err, IsNil) + return nil + }) + c.Assert(err, IsNil) + + alloc := autoid.NewAllocator(store, 1, false) + c.Assert(alloc, NotNil) + + globalAutoID, err := alloc.NextGlobalAutoID(1) + c.Assert(err, IsNil) + c.Assert(globalAutoID, Equals, int64(1)) + _, id, err := alloc.Alloc(1, 1) + c.Assert(err, IsNil) + c.Assert(id, Equals, int64(1)) + _, id, err = alloc.Alloc(1, 1) + c.Assert(err, IsNil) + c.Assert(id, Equals, int64(2)) + _, _, err = alloc.Alloc(0, 1) + c.Assert(err, NotNil) + globalAutoID, err = alloc.NextGlobalAutoID(1) + c.Assert(err, IsNil) + c.Assert(globalAutoID, Equals, int64(autoid.GetStep()+1)) + + // rebase + err = alloc.Rebase(1, int64(1), true) + c.Assert(err, IsNil) + _, id, err = alloc.Alloc(1, 1) + c.Assert(err, IsNil) + c.Assert(id, Equals, int64(3)) + err = alloc.Rebase(1, int64(3), true) + c.Assert(err, IsNil) + _, id, err = alloc.Alloc(1, 1) + c.Assert(err, IsNil) + c.Assert(id, Equals, int64(4)) + err = alloc.Rebase(1, int64(10), true) + c.Assert(err, IsNil) + _, id, err = alloc.Alloc(1, 1) + c.Assert(err, IsNil) + c.Assert(id, Equals, int64(11)) + err = alloc.Rebase(1, int64(3010), true) + c.Assert(err, IsNil) + _, id, err = alloc.Alloc(1, 1) + c.Assert(err, IsNil) + c.Assert(id, Equals, int64(3011)) + + alloc = autoid.NewAllocator(store, 1, false) + c.Assert(alloc, NotNil) + _, id, err = alloc.Alloc(1, 1) + c.Assert(err, IsNil) + c.Assert(id, Equals, int64(autoid.GetStep()+1)) + + alloc = autoid.NewAllocator(store, 1, false) + c.Assert(alloc, NotNil) + err = alloc.Rebase(2, int64(1), false) + c.Assert(err, IsNil) + _, id, err = alloc.Alloc(2, 1) + c.Assert(err, IsNil) + c.Assert(id, Equals, int64(2)) + + alloc = autoid.NewAllocator(store, 1, false) + c.Assert(alloc, NotNil) + err = alloc.Rebase(3, int64(3210), false) + c.Assert(err, IsNil) + alloc = autoid.NewAllocator(store, 1, false) + c.Assert(alloc, NotNil) + err = alloc.Rebase(3, int64(3000), false) + c.Assert(err, IsNil) + _, id, err = alloc.Alloc(3, 1) + c.Assert(err, IsNil) + c.Assert(id, Equals, int64(3211)) + err = alloc.Rebase(3, int64(6543), false) + c.Assert(err, IsNil) + _, id, err = alloc.Alloc(3, 1) + c.Assert(err, IsNil) + c.Assert(id, Equals, int64(6544)) + + // Test the MaxInt64 is the upper bound of `alloc` function but not `rebase`. + err = alloc.Rebase(3, int64(math.MaxInt64-1), true) + c.Assert(err, IsNil) + _, _, err = alloc.Alloc(3, 1) + c.Assert(alloc, NotNil) + err = alloc.Rebase(3, int64(math.MaxInt64), true) + c.Assert(err, IsNil) + + // alloc N for signed + alloc = autoid.NewAllocator(store, 1, false) + c.Assert(alloc, NotNil) + globalAutoID, err = alloc.NextGlobalAutoID(4) + c.Assert(err, IsNil) + c.Assert(globalAutoID, Equals, int64(1)) + min, max, err := alloc.Alloc(4, 1) + c.Assert(err, IsNil) + c.Assert(max-min, Equals, int64(1)) + c.Assert(min+1, Equals, int64(1)) + + min, max, err = alloc.Alloc(4, 2) + c.Assert(err, IsNil) + c.Assert(max-min, Equals, int64(2)) + c.Assert(min+1, Equals, int64(2)) + c.Assert(max, Equals, int64(3)) + + min, max, err = alloc.Alloc(4, 100) + c.Assert(err, IsNil) + c.Assert(max-min, Equals, int64(100)) + expected := int64(4) + for i := min + 1; i <= max; i++ { + c.Assert(i, Equals, expected) + expected++ + } + + err = alloc.Rebase(4, int64(1000), false) + c.Assert(err, IsNil) + min, max, err = alloc.Alloc(4, 3) + c.Assert(err, IsNil) + c.Assert(max-min, Equals, int64(3)) + c.Assert(min+1, Equals, int64(1001)) + c.Assert(min+2, Equals, int64(1002)) + c.Assert(max, Equals, int64(1003)) + + lastRemainOne := alloc.End() + err = alloc.Rebase(4, alloc.End()-2, false) + c.Assert(err, IsNil) + min, max, err = alloc.Alloc(4, 5) + c.Assert(err, IsNil) + c.Assert(max-min, Equals, int64(5)) + c.Assert(min+1, Greater, lastRemainOne) +} + +func (*testSuite) TestUnsignedAutoid(c *C) { + c.Assert(failpoint.Enable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange", `return(true)`), IsNil) + defer func() { + c.Assert(failpoint.Disable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange"), IsNil) + }() + + store, err := mockstore.NewMockTikvStore() + c.Assert(err, IsNil) + defer store.Close() + + err = kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { + m := meta.NewMeta(txn) + err = m.CreateDatabase(&model.DBInfo{ID: 1, Name: model.NewCIStr("a")}) + c.Assert(err, IsNil) + err = m.CreateTableOrView(1, &model.TableInfo{ID: 1, Name: model.NewCIStr("t")}) + c.Assert(err, IsNil) + err = m.CreateTableOrView(1, &model.TableInfo{ID: 2, Name: model.NewCIStr("t1")}) + c.Assert(err, IsNil) + err = m.CreateTableOrView(1, &model.TableInfo{ID: 3, Name: model.NewCIStr("t1")}) + c.Assert(err, IsNil) + err = m.CreateTableOrView(1, &model.TableInfo{ID: 4, Name: model.NewCIStr("t2")}) + c.Assert(err, IsNil) + return nil + }) + c.Assert(err, IsNil) + + alloc := autoid.NewAllocator(store, 1, true) + c.Assert(alloc, NotNil) + + globalAutoID, err := alloc.NextGlobalAutoID(1) + c.Assert(err, IsNil) + c.Assert(globalAutoID, Equals, int64(1)) + _, id, err := alloc.Alloc(1, 1) + c.Assert(err, IsNil) + c.Assert(id, Equals, int64(1)) + _, id, err = alloc.Alloc(1, 1) + c.Assert(err, IsNil) + c.Assert(id, Equals, int64(2)) + _, _, err = alloc.Alloc(0, 1) + c.Assert(err, NotNil) + globalAutoID, err = alloc.NextGlobalAutoID(1) + c.Assert(err, IsNil) + c.Assert(globalAutoID, Equals, int64(autoid.GetStep()+1)) + + // rebase + err = alloc.Rebase(1, int64(1), true) + c.Assert(err, IsNil) + _, id, err = alloc.Alloc(1, 1) + c.Assert(err, IsNil) + c.Assert(id, Equals, int64(3)) + err = alloc.Rebase(1, int64(3), true) + c.Assert(err, IsNil) + _, id, err = alloc.Alloc(1, 1) + c.Assert(err, IsNil) + c.Assert(id, Equals, int64(4)) + err = alloc.Rebase(1, int64(10), true) + c.Assert(err, IsNil) + _, id, err = alloc.Alloc(1, 1) + c.Assert(err, IsNil) + c.Assert(id, Equals, int64(11)) + err = alloc.Rebase(1, int64(3010), true) + c.Assert(err, IsNil) + _, id, err = alloc.Alloc(1, 1) + c.Assert(err, IsNil) + c.Assert(id, Equals, int64(3011)) + + alloc = autoid.NewAllocator(store, 1, true) + c.Assert(alloc, NotNil) + _, id, err = alloc.Alloc(1, 1) + c.Assert(err, IsNil) + c.Assert(id, Equals, int64(autoid.GetStep()+1)) + + alloc = autoid.NewAllocator(store, 1, true) + c.Assert(alloc, NotNil) + err = alloc.Rebase(2, int64(1), false) + c.Assert(err, IsNil) + _, id, err = alloc.Alloc(2, 1) + c.Assert(err, IsNil) + c.Assert(id, Equals, int64(2)) + + alloc = autoid.NewAllocator(store, 1, true) + c.Assert(alloc, NotNil) + err = alloc.Rebase(3, int64(3210), false) + c.Assert(err, IsNil) + alloc = autoid.NewAllocator(store, 1, true) + c.Assert(alloc, NotNil) + err = alloc.Rebase(3, int64(3000), false) + c.Assert(err, IsNil) + _, id, err = alloc.Alloc(3, 1) + c.Assert(err, IsNil) + c.Assert(id, Equals, int64(3211)) + err = alloc.Rebase(3, int64(6543), false) + c.Assert(err, IsNil) + _, id, err = alloc.Alloc(3, 1) + c.Assert(err, IsNil) + c.Assert(id, Equals, int64(6544)) + + // Test the MaxUint64 is the upper bound of `alloc` func but not `rebase`. + var n uint64 = math.MaxUint64 - 1 + un := int64(n) + err = alloc.Rebase(3, un, true) + c.Assert(err, IsNil) + _, _, err = alloc.Alloc(3, 1) + c.Assert(err, NotNil) + un = int64(n + 1) + err = alloc.Rebase(3, un, true) + c.Assert(err, IsNil) + + // alloc N for unsigned + alloc = autoid.NewAllocator(store, 1, true) + c.Assert(alloc, NotNil) + globalAutoID, err = alloc.NextGlobalAutoID(4) + c.Assert(err, IsNil) + c.Assert(globalAutoID, Equals, int64(1)) + + min, max, err := alloc.Alloc(4, 2) + c.Assert(err, IsNil) + c.Assert(max-min, Equals, int64(2)) + c.Assert(min+1, Equals, int64(1)) + c.Assert(max, Equals, int64(2)) + + err = alloc.Rebase(4, int64(500), true) + c.Assert(err, IsNil) + min, max, err = alloc.Alloc(4, 2) + c.Assert(err, IsNil) + c.Assert(max-min, Equals, int64(2)) + c.Assert(min+1, Equals, int64(501)) + c.Assert(max, Equals, int64(502)) + + lastRemainOne := alloc.End() + err = alloc.Rebase(4, alloc.End()-2, false) + c.Assert(err, IsNil) + min, max, err = alloc.Alloc(4, 5) + c.Assert(err, IsNil) + c.Assert(max-min, Equals, int64(5)) + c.Assert(min+1, Greater, lastRemainOne) +} + +// TestConcurrentAlloc is used for the test that +// multiple alloctors allocate ID with the same table ID concurrently. +func (*testSuite) TestConcurrentAlloc(c *C) { + store, err := mockstore.NewMockTikvStore() + c.Assert(err, IsNil) + defer store.Close() + autoid.SetStep(100) + defer func() { + autoid.SetStep(5000) + }() + + dbID := int64(2) + tblID := int64(100) + err = kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { + m := meta.NewMeta(txn) + err = m.CreateDatabase(&model.DBInfo{ID: dbID, Name: model.NewCIStr("a")}) + c.Assert(err, IsNil) + err = m.CreateTableOrView(dbID, &model.TableInfo{ID: tblID, Name: model.NewCIStr("t")}) + c.Assert(err, IsNil) + return nil + }) + c.Assert(err, IsNil) + + var mu sync.Mutex + wg := sync.WaitGroup{} + m := map[int64]struct{}{} + count := 10 + errCh := make(chan error, count) + + allocIDs := func() { + alloc := autoid.NewAllocator(store, dbID, false) + for j := 0; j < int(autoid.GetStep())+5; j++ { + _, id, err1 := alloc.Alloc(tblID, 1) + if err1 != nil { + errCh <- err1 + break + } + + mu.Lock() + if _, ok := m[id]; ok { + errCh <- fmt.Errorf("duplicate id:%v", id) + mu.Unlock() + break + } + m[id] = struct{}{} + mu.Unlock() + + //test Alloc N + N := rand.Uint64() % 100 + min, max, err1 := alloc.Alloc(tblID, N) + if err1 != nil { + errCh <- err1 + break + } + + errFlag := false + mu.Lock() + for i := min + 1; i <= max; i++ { + if _, ok := m[i]; ok { + errCh <- fmt.Errorf("duplicate id:%v", i) + errFlag = true + mu.Unlock() + break + } + m[i] = struct{}{} + } + if errFlag { + break + } + mu.Unlock() + } + } + for i := 0; i < count; i++ { + wg.Add(1) + go func(num int) { + defer wg.Done() + time.Sleep(time.Duration(num%10) * time.Microsecond) + allocIDs() + }(i) + } + wg.Wait() + + close(errCh) + err = <-errCh + c.Assert(err, IsNil) +} + +// TestRollbackAlloc tests that when the allocation transaction commit failed, +// the local variable base and end doesn't change. +func (*testSuite) TestRollbackAlloc(c *C) { + store, err := mockstore.NewMockTikvStore() + c.Assert(err, IsNil) + defer store.Close() + dbID := int64(1) + tblID := int64(2) + err = kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { + m := meta.NewMeta(txn) + err = m.CreateDatabase(&model.DBInfo{ID: dbID, Name: model.NewCIStr("a")}) + c.Assert(err, IsNil) + err = m.CreateTableOrView(dbID, &model.TableInfo{ID: tblID, Name: model.NewCIStr("t")}) + c.Assert(err, IsNil) + return nil + }) + c.Assert(err, IsNil) + + injectConf := new(kv.InjectionConfig) + injectConf.SetCommitError(errors.New("injected")) + injectedStore := kv.NewInjectedStore(store, injectConf) + alloc := autoid.NewAllocator(injectedStore, 1, false) + _, _, err = alloc.Alloc(2, 1) + c.Assert(err, NotNil) + c.Assert(alloc.Base(), Equals, int64(0)) + c.Assert(alloc.End(), Equals, int64(0)) + + err = alloc.Rebase(2, 100, true) + c.Assert(err, NotNil) + c.Assert(alloc.Base(), Equals, int64(0)) + c.Assert(alloc.End(), Equals, int64(0)) +} + +// TestNextStep tests generate next auto id step. +func (*testSuite) TestNextStep(c *C) { + nextStep := autoid.NextStep(2000000, 1*time.Nanosecond) + c.Assert(nextStep, Equals, int64(2000000)) + nextStep = autoid.NextStep(678910, 10*time.Second) + c.Assert(nextStep, Equals, int64(678910)) + nextStep = autoid.NextStep(50000, 10*time.Minute) + c.Assert(nextStep, Equals, int64(30000)) +} + +func BenchmarkAllocator_Alloc(b *testing.B) { + b.StopTimer() + store, err := mockstore.NewMockTikvStore() + if err != nil { + return + } + defer store.Close() + dbID := int64(1) + tblID := int64(2) + err = kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { + m := meta.NewMeta(txn) + err = m.CreateDatabase(&model.DBInfo{ID: dbID, Name: model.NewCIStr("a")}) + if err != nil { + return err + } + err = m.CreateTableOrView(dbID, &model.TableInfo{ID: tblID, Name: model.NewCIStr("t")}) + if err != nil { + return err + } + return nil + }) + if err != nil { + return + } + alloc := autoid.NewAllocator(store, 1, false) + b.StartTimer() + for i := 0; i < b.N; i++ { + alloc.Alloc(2, 1) + } +} diff --git a/meta/autoid/errors.go b/meta/autoid/errors.go new file mode 100644 index 0000000..e44d96c --- /dev/null +++ b/meta/autoid/errors.go @@ -0,0 +1,36 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package autoid + +import ( + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" +) + +// Error instances. +var ( + errInvalidTableID = terror.ClassAutoid.New(mysql.ErrInvalidTableID, mysql.MySQLErrName[mysql.ErrInvalidTableID]) + ErrAutoincReadFailed = terror.ClassAutoid.New(mysql.ErrAutoincReadFailed, mysql.MySQLErrName[mysql.ErrAutoincReadFailed]) + ErrWrongAutoKey = terror.ClassAutoid.New(mysql.ErrWrongAutoKey, mysql.MySQLErrName[mysql.ErrWrongAutoKey]) +) + +func init() { + // Map error codes to mysql error codes. + tableMySQLErrCodes := map[terror.ErrCode]uint16{ + mysql.ErrAutoincReadFailed: mysql.ErrAutoincReadFailed, + mysql.ErrWrongAutoKey: mysql.ErrWrongAutoKey, + mysql.ErrInvalidTableID: mysql.ErrInvalidTableID, + } + terror.ErrClassToMySQLCodes[terror.ClassAutoid] = tableMySQLErrCodes +} diff --git a/meta/meta.go b/meta/meta.go new file mode 100644 index 0000000..4742733 --- /dev/null +++ b/meta/meta.go @@ -0,0 +1,819 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package meta + +import ( + "encoding/binary" + "encoding/json" + "fmt" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "math" + "sort" + "strconv" + "strings" + "sync" + + "github.com/pingcap/tidb/structure" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +var ( + globalIDMutex sync.Mutex +) + +// Meta structure: +// NextGlobalID -> int64 +// SchemaVersion -> int64 +// DBs -> { +// DB:1 -> db meta data []byte +// DB:2 -> db meta data []byte +// } +// DB:1 -> { +// Table:1 -> table meta data []byte +// Table:2 -> table meta data []byte +// TID:1 -> int64 +// TID:2 -> int64 +// } +// + +var ( + mMetaPrefix = []byte("m") + mNextGlobalIDKey = []byte("NextGlobalID") + mSchemaVersionKey = []byte("SchemaVersionKey") + mDBs = []byte("DBs") + mDBPrefix = "DB" + mTablePrefix = "Table" + mTableIDPrefix = "TID" + mBootstrapKey = []byte("BootstrapKey") + mSchemaDiffPrefix = "Diff" +) + +var ( + // ErrDBExists is the error for db exists. + ErrDBExists = terror.ClassMeta.New(mysql.ErrDBCreateExists, mysql.MySQLErrName[mysql.ErrDBCreateExists]) + // ErrDBNotExists is the error for db not exists. + ErrDBNotExists = terror.ClassMeta.New(mysql.ErrBadDB, mysql.MySQLErrName[mysql.ErrBadDB]) + // ErrTableExists is the error for table exists. + ErrTableExists = terror.ClassMeta.New(mysql.ErrTableExists, mysql.MySQLErrName[mysql.ErrTableExists]) + // ErrTableNotExists is the error for table not exists. + ErrTableNotExists = terror.ClassMeta.New(mysql.ErrNoSuchTable, mysql.MySQLErrName[mysql.ErrNoSuchTable]) +) + +// Meta is for handling meta information in a transaction. +type Meta struct { + txn *structure.TxStructure + StartTS uint64 // StartTS is the txn's start TS. + jobListKey JobListKeyType +} + +// NewMeta creates a Meta in transaction txn. +// If the current Meta needs to handle a job, jobListKey is the type of the job's list. +func NewMeta(txn kv.Transaction, jobListKeys ...JobListKeyType) *Meta { + txn.SetOption(kv.Priority, kv.PriorityHigh) + txn.SetOption(kv.SyncLog, true) + t := structure.NewStructure(txn, txn, mMetaPrefix) + listKey := DefaultJobListKey + if len(jobListKeys) != 0 { + listKey = jobListKeys[0] + } + return &Meta{txn: t, + StartTS: txn.StartTS(), + jobListKey: listKey, + } +} + +// NewSnapshotMeta creates a Meta with snapshot. +func NewSnapshotMeta(snapshot kv.Snapshot) *Meta { + t := structure.NewStructure(snapshot, nil, mMetaPrefix) + return &Meta{txn: t} +} + +// GenGlobalID generates next id globally. +func (m *Meta) GenGlobalID() (int64, error) { + globalIDMutex.Lock() + defer globalIDMutex.Unlock() + + return m.txn.Inc(mNextGlobalIDKey, 1) +} + +// GenGlobalIDs generates the next n global IDs. +func (m *Meta) GenGlobalIDs(n int) ([]int64, error) { + globalIDMutex.Lock() + defer globalIDMutex.Unlock() + + newID, err := m.txn.Inc(mNextGlobalIDKey, int64(n)) + if err != nil { + return nil, err + } + origID := newID - int64(n) + ids := make([]int64, 0, n) + for i := origID + 1; i <= newID; i++ { + ids = append(ids, i) + } + return ids, nil +} + +// GetGlobalID gets current global id. +func (m *Meta) GetGlobalID() (int64, error) { + return m.txn.GetInt64(mNextGlobalIDKey) +} + +func (m *Meta) dbKey(dbID int64) []byte { + return []byte(fmt.Sprintf("%s:%d", mDBPrefix, dbID)) +} + +func (m *Meta) autoTableIDKey(tableID int64) []byte { + return []byte(fmt.Sprintf("%s:%d", mTableIDPrefix, tableID)) +} + +func (m *Meta) tableKey(tableID int64) []byte { + return []byte(fmt.Sprintf("%s:%d", mTablePrefix, tableID)) +} + +// DDLJobHistoryKey is only used for testing. +func DDLJobHistoryKey(m *Meta, jobID int64) []byte { + return m.txn.EncodeHashDataKey(mDDLJobHistoryKey, m.jobIDKey(jobID)) +} + +// GenAutoTableIDKeyValue generates meta key by dbID, tableID and corresponding value by autoID. +func (m *Meta) GenAutoTableIDKeyValue(dbID, tableID, autoID int64) (key, value []byte) { + dbKey := m.dbKey(dbID) + autoTableIDKey := m.autoTableIDKey(tableID) + return m.txn.EncodeHashAutoIDKeyValue(dbKey, autoTableIDKey, autoID) +} + +// GenAutoTableID adds step to the auto ID of the table and returns the sum. +func (m *Meta) GenAutoTableID(dbID, tableID, step int64) (int64, error) { + // Check if DB exists. + dbKey := m.dbKey(dbID) + if err := m.checkDBExists(dbKey); err != nil { + return 0, errors.Trace(err) + } + // Check if table exists. + tableKey := m.tableKey(tableID) + if err := m.checkTableExists(dbKey, tableKey); err != nil { + return 0, errors.Trace(err) + } + + return m.txn.HInc(dbKey, m.autoTableIDKey(tableID), step) +} + +// GetAutoTableID gets current auto id with table id. +func (m *Meta) GetAutoTableID(dbID int64, tableID int64) (int64, error) { + return m.txn.HGetInt64(m.dbKey(dbID), m.autoTableIDKey(tableID)) +} + +// GetSchemaVersion gets current global schema version. +func (m *Meta) GetSchemaVersion() (int64, error) { + return m.txn.GetInt64(mSchemaVersionKey) +} + +// GenSchemaVersion generates next schema version. +func (m *Meta) GenSchemaVersion() (int64, error) { + return m.txn.Inc(mSchemaVersionKey, 1) +} + +func (m *Meta) checkDBExists(dbKey []byte) error { + v, err := m.txn.HGet(mDBs, dbKey) + if err == nil && v == nil { + err = ErrDBNotExists.GenWithStack("database doesn't exist") + } + return errors.Trace(err) +} + +func (m *Meta) checkDBNotExists(dbKey []byte) error { + v, err := m.txn.HGet(mDBs, dbKey) + if err == nil && v != nil { + err = ErrDBExists.GenWithStack("database already exists") + } + return errors.Trace(err) +} + +func (m *Meta) checkTableExists(dbKey []byte, tableKey []byte) error { + v, err := m.txn.HGet(dbKey, tableKey) + if err == nil && v == nil { + err = ErrTableNotExists.GenWithStack("table doesn't exist") + } + return errors.Trace(err) +} + +func (m *Meta) checkTableNotExists(dbKey []byte, tableKey []byte) error { + v, err := m.txn.HGet(dbKey, tableKey) + if err == nil && v != nil { + err = ErrTableExists.GenWithStack("table already exists") + } + return errors.Trace(err) +} + +// CreateDatabase creates a database with db info. +func (m *Meta) CreateDatabase(dbInfo *model.DBInfo) error { + dbKey := m.dbKey(dbInfo.ID) + + if err := m.checkDBNotExists(dbKey); err != nil { + return errors.Trace(err) + } + + data, err := json.Marshal(dbInfo) + if err != nil { + return errors.Trace(err) + } + + return m.txn.HSet(mDBs, dbKey, data) +} + +// UpdateDatabase updates a database with db info. +func (m *Meta) UpdateDatabase(dbInfo *model.DBInfo) error { + dbKey := m.dbKey(dbInfo.ID) + + if err := m.checkDBExists(dbKey); err != nil { + return errors.Trace(err) + } + + data, err := json.Marshal(dbInfo) + if err != nil { + return errors.Trace(err) + } + + return m.txn.HSet(mDBs, dbKey, data) +} + +// CreateTableOrView creates a table with tableInfo in database. +func (m *Meta) CreateTableOrView(dbID int64, tableInfo *model.TableInfo) error { + // Check if db exists. + dbKey := m.dbKey(dbID) + if err := m.checkDBExists(dbKey); err != nil { + return errors.Trace(err) + } + + // Check if table exists. + tableKey := m.tableKey(tableInfo.ID) + if err := m.checkTableNotExists(dbKey, tableKey); err != nil { + return errors.Trace(err) + } + + data, err := json.Marshal(tableInfo) + if err != nil { + return errors.Trace(err) + } + + return m.txn.HSet(dbKey, tableKey, data) +} + +// CreateTableAndSetAutoID creates a table with tableInfo in database, +// and rebases the table autoID. +func (m *Meta) CreateTableAndSetAutoID(dbID int64, tableInfo *model.TableInfo, autoID int64) error { + err := m.CreateTableOrView(dbID, tableInfo) + if err != nil { + return errors.Trace(err) + } + _, err = m.txn.HInc(m.dbKey(dbID), m.autoTableIDKey(tableInfo.ID), autoID) + return errors.Trace(err) +} + +// DropDatabase drops whole database. +func (m *Meta) DropDatabase(dbID int64) error { + // Check if db exists. + dbKey := m.dbKey(dbID) + if err := m.txn.HClear(dbKey); err != nil { + return errors.Trace(err) + } + + if err := m.txn.HDel(mDBs, dbKey); err != nil { + return errors.Trace(err) + } + + return nil +} + +// DropTableOrView drops table in database. +// If delAutoID is true, it will delete the auto_increment id key-value of the table. +// For rename table, we do not need to rename auto_increment id key-value. +func (m *Meta) DropTableOrView(dbID int64, tblID int64, delAutoID bool) error { + // Check if db exists. + dbKey := m.dbKey(dbID) + if err := m.checkDBExists(dbKey); err != nil { + return errors.Trace(err) + } + + // Check if table exists. + tableKey := m.tableKey(tblID) + if err := m.checkTableExists(dbKey, tableKey); err != nil { + return errors.Trace(err) + } + + if err := m.txn.HDel(dbKey, tableKey); err != nil { + return errors.Trace(err) + } + if delAutoID { + if err := m.txn.HDel(dbKey, m.autoTableIDKey(tblID)); err != nil { + return errors.Trace(err) + } + } + return nil +} + +// UpdateTable updates the table with table info. +func (m *Meta) UpdateTable(dbID int64, tableInfo *model.TableInfo) error { + // Check if db exists. + dbKey := m.dbKey(dbID) + if err := m.checkDBExists(dbKey); err != nil { + return errors.Trace(err) + } + + // Check if table exists. + tableKey := m.tableKey(tableInfo.ID) + if err := m.checkTableExists(dbKey, tableKey); err != nil { + return errors.Trace(err) + } + + data, err := json.Marshal(tableInfo) + if err != nil { + return errors.Trace(err) + } + + err = m.txn.HSet(dbKey, tableKey, data) + return errors.Trace(err) +} + +// ListTables shows all tables in database. +func (m *Meta) ListTables(dbID int64) ([]*model.TableInfo, error) { + dbKey := m.dbKey(dbID) + if err := m.checkDBExists(dbKey); err != nil { + return nil, errors.Trace(err) + } + + res, err := m.txn.HGetAll(dbKey) + if err != nil { + return nil, errors.Trace(err) + } + + tables := make([]*model.TableInfo, 0, len(res)/2) + for _, r := range res { + // only handle table meta + tableKey := string(r.Field) + if !strings.HasPrefix(tableKey, mTablePrefix) { + continue + } + + tbInfo := &model.TableInfo{} + err = json.Unmarshal(r.Value, tbInfo) + if err != nil { + return nil, errors.Trace(err) + } + + tables = append(tables, tbInfo) + } + + return tables, nil +} + +// ListDatabases shows all databases. +func (m *Meta) ListDatabases() ([]*model.DBInfo, error) { + res, err := m.txn.HGetAll(mDBs) + if err != nil { + return nil, errors.Trace(err) + } + + dbs := make([]*model.DBInfo, 0, len(res)) + for _, r := range res { + dbInfo := &model.DBInfo{} + err = json.Unmarshal(r.Value, dbInfo) + if err != nil { + return nil, errors.Trace(err) + } + dbs = append(dbs, dbInfo) + } + return dbs, nil +} + +// GetDatabase gets the database value with ID. +func (m *Meta) GetDatabase(dbID int64) (*model.DBInfo, error) { + dbKey := m.dbKey(dbID) + value, err := m.txn.HGet(mDBs, dbKey) + if err != nil || value == nil { + return nil, errors.Trace(err) + } + + dbInfo := &model.DBInfo{} + err = json.Unmarshal(value, dbInfo) + return dbInfo, errors.Trace(err) +} + +// GetTable gets the table value in database with tableID. +func (m *Meta) GetTable(dbID int64, tableID int64) (*model.TableInfo, error) { + // Check if db exists. + dbKey := m.dbKey(dbID) + if err := m.checkDBExists(dbKey); err != nil { + return nil, errors.Trace(err) + } + + tableKey := m.tableKey(tableID) + value, err := m.txn.HGet(dbKey, tableKey) + if err != nil || value == nil { + return nil, errors.Trace(err) + } + + tableInfo := &model.TableInfo{} + err = json.Unmarshal(value, tableInfo) + return tableInfo, errors.Trace(err) +} + +// DDL job structure +// DDLJobList: list jobs +// DDLJobHistory: hash +// DDLJobReorg: hash +// +// for multi DDL workers, only one can become the owner +// to operate DDL jobs, and dispatch them to MR Jobs. + +var ( + mDDLJobListKey = []byte("DDLJobList") + mDDLJobAddIdxList = []byte("DDLJobAddIdxList") + mDDLJobHistoryKey = []byte("DDLJobHistory") + mDDLJobReorgKey = []byte("DDLJobReorg") +) + +// JobListKeyType is a key type of the DDL job queue. +type JobListKeyType []byte + +var ( + // DefaultJobListKey keeps all actions of DDL jobs except "add index". + DefaultJobListKey JobListKeyType = mDDLJobListKey + // AddIndexJobListKey only keeps the action of adding index. + AddIndexJobListKey JobListKeyType = mDDLJobAddIdxList +) + +func (m *Meta) enQueueDDLJob(key []byte, job *model.Job) error { + b, err := job.Encode(true) + if err == nil { + err = m.txn.RPush(key, b) + } + return errors.Trace(err) +} + +// EnQueueDDLJob adds a DDL job to the list. +func (m *Meta) EnQueueDDLJob(job *model.Job, jobListKeys ...JobListKeyType) error { + listKey := m.jobListKey + if len(jobListKeys) != 0 { + listKey = jobListKeys[0] + } + + return m.enQueueDDLJob(listKey, job) +} + +func (m *Meta) deQueueDDLJob(key []byte) (*model.Job, error) { + value, err := m.txn.LPop(key) + if err != nil || value == nil { + return nil, errors.Trace(err) + } + + job := &model.Job{} + err = job.Decode(value) + return job, errors.Trace(err) +} + +// DeQueueDDLJob pops a DDL job from the list. +func (m *Meta) DeQueueDDLJob() (*model.Job, error) { + return m.deQueueDDLJob(m.jobListKey) +} + +func (m *Meta) getDDLJob(key []byte, index int64) (*model.Job, error) { + value, err := m.txn.LIndex(key, index) + if err != nil || value == nil { + return nil, errors.Trace(err) + } + + job := &model.Job{ + // For compatibility, if the job is enqueued by old version TiDB and Priority field is omitted, + // set the default priority to kv.PriorityLow. + Priority: kv.PriorityLow, + } + err = job.Decode(value) + // Check if the job.Priority is valid. + if job.Priority < kv.PriorityNormal || job.Priority > kv.PriorityHigh { + job.Priority = kv.PriorityLow + } + return job, errors.Trace(err) +} + +// GetDDLJobByIdx returns the corresponding DDL job by the index. +// The length of jobListKeys can only be 1 or 0. +// If its length is 1, we need to replace m.jobListKey with jobListKeys[0]. +// Otherwise, we use m.jobListKey directly. +func (m *Meta) GetDDLJobByIdx(index int64, jobListKeys ...JobListKeyType) (*model.Job, error) { + listKey := m.jobListKey + if len(jobListKeys) != 0 { + listKey = jobListKeys[0] + } + job, err := m.getDDLJob(listKey, index) + + return job, errors.Trace(err) +} + +// updateDDLJob updates the DDL job with index and key. +// updateRawArgs is used to determine whether to update the raw args when encode the job. +func (m *Meta) updateDDLJob(index int64, job *model.Job, key []byte, updateRawArgs bool) error { + b, err := job.Encode(updateRawArgs) + if err == nil { + err = m.txn.LSet(key, index, b) + } + return errors.Trace(err) +} + +// UpdateDDLJob updates the DDL job with index. +// updateRawArgs is used to determine whether to update the raw args when encode the job. +// The length of jobListKeys can only be 1 or 0. +// If its length is 1, we need to replace m.jobListKey with jobListKeys[0]. +// Otherwise, we use m.jobListKey directly. +func (m *Meta) UpdateDDLJob(index int64, job *model.Job, updateRawArgs bool, jobListKeys ...JobListKeyType) error { + listKey := m.jobListKey + if len(jobListKeys) != 0 { + listKey = jobListKeys[0] + } + err := m.updateDDLJob(index, job, listKey, updateRawArgs) + + return errors.Trace(err) +} + +// DDLJobQueueLen returns the DDL job queue length. +// The length of jobListKeys can only be 1 or 0. +// If its length is 1, we need to replace m.jobListKey with jobListKeys[0]. +// Otherwise, we use m.jobListKey directly. +func (m *Meta) DDLJobQueueLen(jobListKeys ...JobListKeyType) (int64, error) { + listKey := m.jobListKey + if len(jobListKeys) != 0 { + listKey = jobListKeys[0] + } + return m.txn.LLen(listKey) +} + +// GetAllDDLJobsInQueue gets all DDL Jobs in the current queue. +// The length of jobListKeys can only be 1 or 0. +// If its length is 1, we need to replace m.jobListKey with jobListKeys[0]. +// Otherwise, we use m.jobListKey directly. +func (m *Meta) GetAllDDLJobsInQueue(jobListKeys ...JobListKeyType) ([]*model.Job, error) { + listKey := m.jobListKey + if len(jobListKeys) != 0 { + listKey = jobListKeys[0] + } + + values, err := m.txn.LGetAll(listKey) + if err != nil || values == nil { + return nil, errors.Trace(err) + } + + jobs := make([]*model.Job, 0, len(values)) + for _, val := range values { + job := &model.Job{} + err = job.Decode(val) + if err != nil { + return nil, errors.Trace(err) + } + jobs = append(jobs, job) + } + + return jobs, nil +} + +func (m *Meta) jobIDKey(id int64) []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, uint64(id)) + return b +} + +func (m *Meta) reorgJobStartHandle(id int64) []byte { + // There is no "_start", to make it compatible with the older TiDB versions. + return m.jobIDKey(id) +} + +func (m *Meta) reorgJobEndHandle(id int64) []byte { + b := make([]byte, 8, 12) + binary.BigEndian.PutUint64(b, uint64(id)) + b = append(b, "_end"...) + return b +} + +func (m *Meta) reorgJobPhysicalTableID(id int64) []byte { + b := make([]byte, 8, 12) + binary.BigEndian.PutUint64(b, uint64(id)) + b = append(b, "_pid"...) + return b +} + +func (m *Meta) addHistoryDDLJob(key []byte, job *model.Job, updateRawArgs bool) error { + b, err := job.Encode(updateRawArgs) + if err == nil { + err = m.txn.HSet(key, m.jobIDKey(job.ID), b) + } + return errors.Trace(err) +} + +// AddHistoryDDLJob adds DDL job to history. +func (m *Meta) AddHistoryDDLJob(job *model.Job, updateRawArgs bool) error { + return m.addHistoryDDLJob(mDDLJobHistoryKey, job, updateRawArgs) +} + +func (m *Meta) getHistoryDDLJob(key []byte, id int64) (*model.Job, error) { + value, err := m.txn.HGet(key, m.jobIDKey(id)) + if err != nil || value == nil { + return nil, errors.Trace(err) + } + + job := &model.Job{} + err = job.Decode(value) + return job, errors.Trace(err) +} + +// GetHistoryDDLJob gets a history DDL job. +func (m *Meta) GetHistoryDDLJob(id int64) (*model.Job, error) { + job, err := m.getHistoryDDLJob(mDDLJobHistoryKey, id) + + return job, errors.Trace(err) +} + +// GetAllHistoryDDLJobs gets all history DDL jobs. +func (m *Meta) GetAllHistoryDDLJobs() ([]*model.Job, error) { + pairs, err := m.txn.HGetAll(mDDLJobHistoryKey) + if err != nil { + return nil, errors.Trace(err) + } + return decodeAndSortJob(pairs) +} + +// GetLastNHistoryDDLJobs gets latest N history ddl jobs. +func (m *Meta) GetLastNHistoryDDLJobs(num int) ([]*model.Job, error) { + pairs, err := m.txn.HGetLastN(mDDLJobHistoryKey, num) + if err != nil { + return nil, errors.Trace(err) + } + return decodeAndSortJob(pairs) +} + +func decodeAndSortJob(jobPairs []structure.HashPair) ([]*model.Job, error) { + jobs := make([]*model.Job, 0, len(jobPairs)) + for _, pair := range jobPairs { + job := &model.Job{} + err := job.Decode(pair.Value) + if err != nil { + return nil, errors.Trace(err) + } + jobs = append(jobs, job) + } + sorter := &jobsSorter{jobs: jobs} + sort.Sort(sorter) + return jobs, nil +} + +// jobsSorter implements the sort.Interface interface. +type jobsSorter struct { + jobs []*model.Job +} + +func (s *jobsSorter) Swap(i, j int) { + s.jobs[i], s.jobs[j] = s.jobs[j], s.jobs[i] +} + +func (s *jobsSorter) Len() int { + return len(s.jobs) +} + +func (s *jobsSorter) Less(i, j int) bool { + return s.jobs[i].ID < s.jobs[j].ID +} + +// GetBootstrapVersion returns the version of the server which boostrap the store. +// If the store is not bootstraped, the version will be zero. +func (m *Meta) GetBootstrapVersion() (int64, error) { + value, err := m.txn.GetInt64(mBootstrapKey) + return value, errors.Trace(err) +} + +// FinishBootstrap finishes bootstrap. +func (m *Meta) FinishBootstrap(version int64) error { + err := m.txn.Set(mBootstrapKey, []byte(fmt.Sprintf("%d", version))) + return errors.Trace(err) +} + +// UpdateDDLReorgStartHandle saves the job reorganization latest processed start handle for later resuming. +func (m *Meta) UpdateDDLReorgStartHandle(job *model.Job, startHandle int64) error { + err := m.txn.HSet(mDDLJobReorgKey, m.reorgJobStartHandle(job.ID), []byte(strconv.FormatInt(startHandle, 10))) + return errors.Trace(err) +} + +// UpdateDDLReorgHandle saves the job reorganization latest processed information for later resuming. +func (m *Meta) UpdateDDLReorgHandle(job *model.Job, startHandle, endHandle, physicalTableID int64) error { + err := m.txn.HSet(mDDLJobReorgKey, m.reorgJobStartHandle(job.ID), []byte(strconv.FormatInt(startHandle, 10))) + if err != nil { + return errors.Trace(err) + } + err = m.txn.HSet(mDDLJobReorgKey, m.reorgJobEndHandle(job.ID), []byte(strconv.FormatInt(endHandle, 10))) + if err != nil { + return errors.Trace(err) + } + err = m.txn.HSet(mDDLJobReorgKey, m.reorgJobPhysicalTableID(job.ID), []byte(strconv.FormatInt(physicalTableID, 10))) + return errors.Trace(err) +} + +// RemoveDDLReorgHandle removes the job reorganization related handles. +func (m *Meta) RemoveDDLReorgHandle(job *model.Job) error { + err := m.txn.HDel(mDDLJobReorgKey, m.reorgJobStartHandle(job.ID)) + if err != nil { + return errors.Trace(err) + } + if err = m.txn.HDel(mDDLJobReorgKey, m.reorgJobEndHandle(job.ID)); err != nil { + logutil.BgLogger().Warn("remove DDL reorg end handle", zap.Error(err)) + } + if err = m.txn.HDel(mDDLJobReorgKey, m.reorgJobPhysicalTableID(job.ID)); err != nil { + logutil.BgLogger().Warn("remove DDL reorg physical ID", zap.Error(err)) + } + return nil +} + +// GetDDLReorgHandle gets the latest processed DDL reorganize position. +func (m *Meta) GetDDLReorgHandle(job *model.Job) (startHandle, endHandle, physicalTableID int64, err error) { + startHandle, err = m.txn.HGetInt64(mDDLJobReorgKey, m.reorgJobStartHandle(job.ID)) + if err != nil { + err = errors.Trace(err) + return + } + endHandle, err = m.txn.HGetInt64(mDDLJobReorgKey, m.reorgJobEndHandle(job.ID)) + if err != nil { + err = errors.Trace(err) + return + } + physicalTableID, err = m.txn.HGetInt64(mDDLJobReorgKey, m.reorgJobPhysicalTableID(job.ID)) + if err != nil { + err = errors.Trace(err) + return + } + // physicalTableID may be 0, because older version TiDB (without table partition) doesn't store them. + // update them to table's in this case. + if physicalTableID == 0 { + if job.ReorgMeta != nil { + endHandle = job.ReorgMeta.EndHandle + } else { + endHandle = math.MaxInt64 + } + physicalTableID = job.TableID + logutil.BgLogger().Warn("new TiDB binary running on old TiDB DDL reorg data", + zap.Int64("partition ID", physicalTableID), + zap.Int64("startHandle", startHandle), + zap.Int64("endHandle", endHandle)) + } + return +} + +func (m *Meta) schemaDiffKey(schemaVersion int64) []byte { + return []byte(fmt.Sprintf("%s:%d", mSchemaDiffPrefix, schemaVersion)) +} + +// GetSchemaDiff gets the modification information on a given schema version. +func (m *Meta) GetSchemaDiff(schemaVersion int64) (*model.SchemaDiff, error) { + diffKey := m.schemaDiffKey(schemaVersion) + data, err := m.txn.Get(diffKey) + + if err != nil || len(data) == 0 { + return nil, errors.Trace(err) + } + diff := &model.SchemaDiff{} + err = json.Unmarshal(data, diff) + return diff, errors.Trace(err) +} + +// SetSchemaDiff sets the modification information on a given schema version. +func (m *Meta) SetSchemaDiff(diff *model.SchemaDiff) error { + data, err := json.Marshal(diff) + if err != nil { + return errors.Trace(err) + } + diffKey := m.schemaDiffKey(diff.Version) + err = m.txn.Set(diffKey, data) + + return errors.Trace(err) +} + +func init() { + metaMySQLErrCodes := map[terror.ErrCode]uint16{ + mysql.ErrDBCreateExists: mysql.ErrDBCreateExists, + mysql.ErrBadDB: mysql.ErrBadDB, + mysql.ErrNoSuchTable: mysql.ErrNoSuchTable, + mysql.ErrTableExists: mysql.ErrTableExists, + } + terror.ErrClassToMySQLCodes[terror.ClassMeta] = metaMySQLErrCodes +} diff --git a/meta/meta_test.go b/meta/meta_test.go new file mode 100644 index 0000000..747283c --- /dev/null +++ b/meta/meta_test.go @@ -0,0 +1,503 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package meta_test + +import ( + "context" + "math" + "strconv" + "sync" + "testing" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/meta" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/util/testleak" +) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + TestingT(t) +} + +var _ = Suite(&testSuite{}) + +type testSuite struct { +} + +func (s *testSuite) TestMeta(c *C) { + defer testleak.AfterTest(c)() + store, err := mockstore.NewMockTikvStore() + c.Assert(err, IsNil) + defer store.Close() + + txn, err := store.Begin() + c.Assert(err, IsNil) + defer txn.Rollback() + + t := meta.NewMeta(txn) + + n, err := t.GenGlobalID() + c.Assert(err, IsNil) + c.Assert(n, Equals, int64(1)) + + n, err = t.GetGlobalID() + c.Assert(err, IsNil) + c.Assert(n, Equals, int64(1)) + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + ids, err := t.GenGlobalIDs(3) + c.Assert(err, IsNil) + anyMatch(c, ids, []int64{2, 3, 4}, []int64{6, 7, 8}) + }() + + wg.Add(1) + go func() { + defer wg.Done() + ids, err := t.GenGlobalIDs(4) + c.Assert(err, IsNil) + anyMatch(c, ids, []int64{5, 6, 7, 8}, []int64{2, 3, 4, 5}) + }() + wg.Wait() + + n, err = t.GetSchemaVersion() + c.Assert(err, IsNil) + c.Assert(n, Equals, int64(0)) + + n, err = t.GenSchemaVersion() + c.Assert(err, IsNil) + c.Assert(n, Equals, int64(1)) + + n, err = t.GetSchemaVersion() + c.Assert(err, IsNil) + c.Assert(n, Equals, int64(1)) + + dbInfo := &model.DBInfo{ + ID: 1, + Name: model.NewCIStr("a"), + } + err = t.CreateDatabase(dbInfo) + c.Assert(err, IsNil) + + err = t.CreateDatabase(dbInfo) + c.Assert(err, NotNil) + c.Assert(meta.ErrDBExists.Equal(err), IsTrue) + + v, err := t.GetDatabase(1) + c.Assert(err, IsNil) + c.Assert(v, DeepEquals, dbInfo) + + dbInfo.Name = model.NewCIStr("aa") + err = t.UpdateDatabase(dbInfo) + c.Assert(err, IsNil) + + v, err = t.GetDatabase(1) + c.Assert(err, IsNil) + c.Assert(v, DeepEquals, dbInfo) + + dbs, err := t.ListDatabases() + c.Assert(err, IsNil) + c.Assert(dbs, DeepEquals, []*model.DBInfo{dbInfo}) + + tbInfo := &model.TableInfo{ + ID: 1, + Name: model.NewCIStr("t"), + } + err = t.CreateTableOrView(1, tbInfo) + c.Assert(err, IsNil) + + n, err = t.GenAutoTableID(1, 1, 10) + c.Assert(err, IsNil) + c.Assert(n, Equals, int64(10)) + + n, err = t.GetAutoTableID(1, 1) + c.Assert(err, IsNil) + c.Assert(n, Equals, int64(10)) + + err = t.CreateTableOrView(1, tbInfo) + c.Assert(err, NotNil) + c.Assert(meta.ErrTableExists.Equal(err), IsTrue) + + tbInfo.Name = model.NewCIStr("tt") + err = t.UpdateTable(1, tbInfo) + c.Assert(err, IsNil) + + table, err := t.GetTable(1, 1) + c.Assert(err, IsNil) + c.Assert(table, DeepEquals, tbInfo) + + table, err = t.GetTable(1, 2) + c.Assert(err, IsNil) + c.Assert(table, IsNil) + + tbInfo2 := &model.TableInfo{ + ID: 2, + Name: model.NewCIStr("bb"), + } + err = t.CreateTableOrView(1, tbInfo2) + c.Assert(err, IsNil) + + tables, err := t.ListTables(1) + c.Assert(err, IsNil) + c.Assert(tables, DeepEquals, []*model.TableInfo{tbInfo, tbInfo2}) + // Generate an auto id. + n, err = t.GenAutoTableID(1, 2, 10) + c.Assert(err, IsNil) + c.Assert(n, Equals, int64(10)) + // Make sure the auto id key-value entry is there. + n, err = t.GetAutoTableID(1, 2) + c.Assert(err, IsNil) + c.Assert(n, Equals, int64(10)) + + err = t.DropTableOrView(1, tbInfo2.ID, true) + c.Assert(err, IsNil) + // Make sure auto id key-value entry is gone. + n, err = t.GetAutoTableID(1, 2) + c.Assert(err, IsNil) + c.Assert(n, Equals, int64(0)) + + tables, err = t.ListTables(1) + c.Assert(err, IsNil) + c.Assert(tables, DeepEquals, []*model.TableInfo{tbInfo}) + + // Test case for drop a table without delete auto id key-value entry. + tid := int64(100) + tbInfo100 := &model.TableInfo{ + ID: tid, + Name: model.NewCIStr("t_rename"), + } + // Create table. + err = t.CreateTableOrView(1, tbInfo100) + c.Assert(err, IsNil) + // Update auto ID. + currentDBID := int64(1) + n, err = t.GenAutoTableID(currentDBID, tid, 10) + c.Assert(err, IsNil) + c.Assert(n, Equals, int64(10)) + // Fail to update auto ID. + // The table ID doesn't exist. + nonExistentID := int64(1234) + _, err = t.GenAutoTableID(currentDBID, nonExistentID, 10) + c.Assert(err, NotNil) + c.Assert(meta.ErrTableNotExists.Equal(err), IsTrue) + // Fail to update auto ID. + // The current database ID doesn't exist. + currentDBID = nonExistentID + _, err = t.GenAutoTableID(currentDBID, tid, 10) + c.Assert(err, NotNil) + c.Assert(meta.ErrDBNotExists.Equal(err), IsTrue) + // Test case for CreateTableAndSetAutoID. + tbInfo3 := &model.TableInfo{ + ID: 3, + Name: model.NewCIStr("tbl3"), + } + err = t.CreateTableAndSetAutoID(1, tbInfo3, 123) + c.Assert(err, IsNil) + id, err := t.GetAutoTableID(1, tbInfo3.ID) + c.Assert(err, IsNil) + c.Assert(id, Equals, int64(123)) + // Test case for GenAutoTableIDKeyValue. + key, val := t.GenAutoTableIDKeyValue(1, tbInfo3.ID, 1234) + c.Assert(val, DeepEquals, []byte(strconv.FormatInt(1234, 10))) + c.Assert(key, DeepEquals, []byte{0x6d, 0x44, 0x42, 0x3a, 0x31, 0x0, 0x0, 0x0, 0x0, 0xfb, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x68, 0x54, 0x49, 0x44, 0x3a, 0x33, 0x0, 0x0, 0x0, 0xfc}) + + err = t.DropDatabase(1) + c.Assert(err, IsNil) + err = t.DropDatabase(currentDBID) + c.Assert(err, IsNil) + + dbs, err = t.ListDatabases() + c.Assert(err, IsNil) + c.Assert(dbs, HasLen, 0) + + bootstrapVer, err := t.GetBootstrapVersion() + c.Assert(err, IsNil) + c.Assert(bootstrapVer, Equals, int64(0)) + + err = t.FinishBootstrap(int64(1)) + c.Assert(err, IsNil) + + bootstrapVer, err = t.GetBootstrapVersion() + c.Assert(err, IsNil) + c.Assert(bootstrapVer, Equals, int64(1)) + + // Test case for meta.FinishBootstrap with a version. + err = t.FinishBootstrap(int64(10)) + c.Assert(err, IsNil) + bootstrapVer, err = t.GetBootstrapVersion() + c.Assert(err, IsNil) + c.Assert(bootstrapVer, Equals, int64(10)) + + // Test case for SchemaDiff. + schemaDiff := &model.SchemaDiff{ + Version: 100, + SchemaID: 1, + Type: model.ActionTruncateTable, + TableID: 2, + OldTableID: 3, + } + err = t.SetSchemaDiff(schemaDiff) + c.Assert(err, IsNil) + readDiff, err := t.GetSchemaDiff(schemaDiff.Version) + c.Assert(err, IsNil) + c.Assert(readDiff, DeepEquals, schemaDiff) + + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + + // Test for DDLJobHistoryKey. + key = meta.DDLJobHistoryKey(t, 888) + c.Assert(key, DeepEquals, []byte{0x6d, 0x44, 0x44, 0x4c, 0x4a, 0x6f, 0x62, 0x48, 0x69, 0xff, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x0, 0x0, 0x0, 0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x68, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x78, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf7}) +} + +func (s *testSuite) TestSnapshot(c *C) { + defer testleak.AfterTest(c)() + store, err := mockstore.NewMockTikvStore() + c.Assert(err, IsNil) + defer store.Close() + + txn, _ := store.Begin() + m := meta.NewMeta(txn) + m.GenGlobalID() + n, _ := m.GetGlobalID() + c.Assert(n, Equals, int64(1)) + txn.Commit(context.Background()) + + ver1, _ := store.CurrentVersion() + time.Sleep(time.Millisecond) + txn, _ = store.Begin() + m = meta.NewMeta(txn) + m.GenGlobalID() + n, _ = m.GetGlobalID() + c.Assert(n, Equals, int64(2)) + txn.Commit(context.Background()) + + snapshot, _ := store.GetSnapshot(ver1) + snapMeta := meta.NewSnapshotMeta(snapshot) + n, _ = snapMeta.GetGlobalID() + c.Assert(n, Equals, int64(1)) + _, err = snapMeta.GenGlobalID() + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[structure:8220]write on snapshot") +} + +func (s *testSuite) TestDDL(c *C) { + defer testleak.AfterTest(c)() + store, err := mockstore.NewMockTikvStore() + c.Assert(err, IsNil) + defer store.Close() + + txn, err := store.Begin() + c.Assert(err, IsNil) + + defer txn.Rollback() + + t := meta.NewMeta(txn) + + job := &model.Job{ID: 1} + err = t.EnQueueDDLJob(job) + c.Assert(err, IsNil) + n, err := t.DDLJobQueueLen() + c.Assert(err, IsNil) + c.Assert(n, Equals, int64(1)) + + v, err := t.GetDDLJobByIdx(0) + c.Assert(err, IsNil) + c.Assert(v, DeepEquals, job) + v, err = t.GetDDLJobByIdx(1) + c.Assert(err, IsNil) + c.Assert(v, IsNil) + job.ID = 2 + err = t.UpdateDDLJob(0, job, true) + c.Assert(err, IsNil) + + err = t.UpdateDDLReorgStartHandle(job, 1) + c.Assert(err, IsNil) + + i, j, k, err := t.GetDDLReorgHandle(job) + c.Assert(err, IsNil) + c.Assert(i, Equals, int64(1)) + c.Assert(j, Equals, int64(math.MaxInt64)) + c.Assert(k, Equals, int64(0)) + + err = t.UpdateDDLReorgHandle(job, 1, 2, 3) + c.Assert(err, IsNil) + + i, j, k, err = t.GetDDLReorgHandle(job) + c.Assert(err, IsNil) + c.Assert(i, Equals, int64(1)) + c.Assert(j, Equals, int64(2)) + c.Assert(k, Equals, int64(3)) + + err = t.RemoveDDLReorgHandle(job) + c.Assert(err, IsNil) + + i, j, k, err = t.GetDDLReorgHandle(job) + c.Assert(err, IsNil) + c.Assert(i, Equals, int64(0)) + // The default value for endHandle is MaxInt64, not 0. + c.Assert(j, Equals, int64(math.MaxInt64)) + c.Assert(k, Equals, int64(0)) + + // Test GetDDLReorgHandle failed. + _, _, _, err = t.GetDDLReorgHandle(job) + c.Assert(err, IsNil) + + v, err = t.DeQueueDDLJob() + c.Assert(err, IsNil) + c.Assert(v, DeepEquals, job) + + err = t.AddHistoryDDLJob(job, true) + c.Assert(err, IsNil) + v, err = t.GetHistoryDDLJob(2) + c.Assert(err, IsNil) + c.Assert(v, DeepEquals, job) + + // Add multiple history jobs. + arg := "test arg" + historyJob1 := &model.Job{ID: 1234} + historyJob1.Args = append(job.Args, arg) + err = t.AddHistoryDDLJob(historyJob1, true) + c.Assert(err, IsNil) + historyJob2 := &model.Job{ID: 123} + historyJob2.Args = append(job.Args, arg) + err = t.AddHistoryDDLJob(historyJob2, false) + c.Assert(err, IsNil) + all, err := t.GetAllHistoryDDLJobs() + c.Assert(err, IsNil) + var lastID int64 + for _, job := range all { + c.Assert(job.ID, Greater, lastID) + lastID = job.ID + arg1 := "" + job.DecodeArgs(&arg1) + if job.ID == historyJob1.ID { + c.Assert(*(job.Args[0].(*string)), Equals, historyJob1.Args[0]) + } else { + c.Assert(job.Args, IsNil) + } + } + + // Test for get last N history ddl jobs. + historyJobs, err := t.GetLastNHistoryDDLJobs(2) + c.Assert(err, IsNil) + c.Assert(len(historyJobs), Equals, 2) + c.Assert(historyJobs[0].ID == 123, IsTrue) + c.Assert(historyJobs[1].ID == 1234, IsTrue) + + // Test GetAllDDLJobsInQueue. + err = t.EnQueueDDLJob(job) + c.Assert(err, IsNil) + job1 := &model.Job{ID: 2} + err = t.EnQueueDDLJob(job1) + c.Assert(err, IsNil) + jobs, err := t.GetAllDDLJobsInQueue() + c.Assert(err, IsNil) + expectJobs := []*model.Job{job, job1} + c.Assert(jobs, DeepEquals, expectJobs) + + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + + // Test for add index job. + txn1, err := store.Begin() + c.Assert(err, IsNil) + defer txn1.Rollback() + + m := meta.NewMeta(txn1, meta.AddIndexJobListKey) + err = m.EnQueueDDLJob(job) + c.Assert(err, IsNil) + job.ID = 123 + err = m.UpdateDDLJob(0, job, true, meta.AddIndexJobListKey) + c.Assert(err, IsNil) + v, err = m.GetDDLJobByIdx(0, meta.AddIndexJobListKey) + c.Assert(err, IsNil) + c.Assert(v, DeepEquals, job) + l, err := m.DDLJobQueueLen(meta.AddIndexJobListKey) + c.Assert(err, IsNil) + c.Assert(l, Equals, int64(1)) + jobs, err = m.GetAllDDLJobsInQueue(meta.AddIndexJobListKey) + c.Assert(err, IsNil) + expectJobs = []*model.Job{job} + c.Assert(jobs, DeepEquals, expectJobs) + + err = txn1.Commit(context.Background()) + c.Assert(err, IsNil) +} + +func (s *testSuite) BenchmarkGenGlobalIDs(c *C) { + defer testleak.AfterTest(c)() + store, err := mockstore.NewMockTikvStore() + c.Assert(err, IsNil) + defer store.Close() + + txn, err := store.Begin() + c.Assert(err, IsNil) + defer txn.Rollback() + + t := meta.NewMeta(txn) + + c.ResetTimer() + var ids []int64 + for i := 0; i < c.N; i++ { + ids, _ = t.GenGlobalIDs(10) + } + c.Assert(ids, HasLen, 10) + c.Assert(ids[9], Equals, int64(c.N)*10) +} + +func (s *testSuite) BenchmarkGenGlobalIDOneByOne(c *C) { + defer testleak.AfterTest(c)() + store, err := mockstore.NewMockTikvStore() + c.Assert(err, IsNil) + defer store.Close() + + txn, err := store.Begin() + c.Assert(err, IsNil) + defer txn.Rollback() + + t := meta.NewMeta(txn) + + c.ResetTimer() + var id int64 + for i := 0; i < c.N; i++ { + for j := 0; j < 10; j++ { + id, _ = t.GenGlobalID() + } + } + c.Assert(id, Equals, int64(c.N)*10) +} + +func anyMatch(c *C, ids []int64, candidates ...[]int64) { + var match bool +OUTER: + for _, cand := range candidates { + if len(ids) != len(cand) { + continue + } + for i, v := range cand { + if ids[i] != v { + continue OUTER + } + } + match = true + break + } + c.Assert(match, IsTrue) +} diff --git a/owner/fail_test.go b/owner/fail_test.go new file mode 100644 index 0000000..af468ce --- /dev/null +++ b/owner/fail_test.go @@ -0,0 +1,114 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package owner + +import ( + "context" + "math" + "net" + "os" + "sync" + "testing" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/testleak" + "go.etcd.io/etcd/clientv3" + "google.golang.org/grpc" +) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + logLevel := os.Getenv("log_level") + logutil.InitLogger(logutil.NewLogConfig(logLevel, "", logutil.EmptyFileLogConfig, false)) + TestingT(t) +} + +var _ = Suite(&testSuite{}) + +type testSuite struct { +} + +func (s *testSuite) SetUpSuite(c *C) { +} + +func (s *testSuite) TearDownSuite(c *C) { +} + +var ( + endpoints = []string{"unix://new_session:12379"} + dialTimeout = 5 * time.Second + retryCnt = int(math.MaxInt32) +) + +func (s *testSuite) TestFailNewSession(c *C) { + ln, err := net.Listen("unix", "new_session:12379") + c.Assert(err, IsNil) + srv := grpc.NewServer(grpc.ConnectionTimeout(time.Minute)) + var stop sync.WaitGroup + stop.Add(1) + go func() { + if err = srv.Serve(ln); err != nil { + c.Errorf("can't serve gRPC requests %v", err) + } + stop.Done() + }() + + leakFunc := testleak.AfterTest(c) + defer func() { + srv.Stop() + stop.Wait() + leakFunc() + }() + + func() { + cli, err := clientv3.New(clientv3.Config{ + Endpoints: endpoints, + DialTimeout: dialTimeout, + }) + c.Assert(err, IsNil) + defer func() { + if cli != nil { + cli.Close() + } + c.Assert(failpoint.Disable("github.com/pingcap/tidb/owner/closeClient"), IsNil) + }() + c.Assert(failpoint.Enable("github.com/pingcap/tidb/owner/closeClient", `return(true)`), IsNil) + _, err = NewSession(context.Background(), "fail_new_serssion", cli, retryCnt, ManagerSessionTTL) + isContextDone := terror.ErrorEqual(grpc.ErrClientConnClosing, err) || terror.ErrorEqual(context.Canceled, err) + c.Assert(isContextDone, IsTrue, Commentf("err %v", err)) + }() + + func() { + cli, err := clientv3.New(clientv3.Config{ + Endpoints: endpoints, + DialTimeout: dialTimeout, + }) + c.Assert(err, IsNil) + defer func() { + if cli != nil { + cli.Close() + } + c.Assert(failpoint.Disable("github.com/pingcap/tidb/owner/closeGrpc"), IsNil) + }() + c.Assert(failpoint.Enable("github.com/pingcap/tidb/owner/closeGrpc", `return(true)`), IsNil) + _, err = NewSession(context.Background(), "fail_new_serssion", cli, retryCnt, ManagerSessionTTL) + isContextDone := terror.ErrorEqual(grpc.ErrClientConnClosing, err) || terror.ErrorEqual(context.Canceled, err) + c.Assert(isContextDone, IsTrue, Commentf("err %v", err)) + }() + +} diff --git a/owner/manager.go b/owner/manager.go new file mode 100644 index 0000000..0ab33ae --- /dev/null +++ b/owner/manager.go @@ -0,0 +1,382 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package owner + +import ( + "context" + "fmt" + "math" + "os" + "strconv" + "sync/atomic" + "time" + "unsafe" + + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/parser/terror" + + "github.com/pingcap/tidb/util" + "github.com/pingcap/tidb/util/logutil" + "go.etcd.io/etcd/clientv3" + "go.etcd.io/etcd/clientv3/concurrency" + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + "go.etcd.io/etcd/mvcc/mvccpb" + "go.uber.org/zap" + "google.golang.org/grpc" +) + +const ( + newSessionRetryInterval = 200 * time.Millisecond + logIntervalCnt = int(3 * time.Second / newSessionRetryInterval) +) + +// Manager is used to campaign the owner and manage the owner information. +type Manager interface { + // ID returns the ID of the manager. + ID() string + // IsOwner returns whether the ownerManager is the owner. + IsOwner() bool + // RetireOwner make the manager to be a not owner. It's exported for testing. + RetireOwner() + // GetOwnerID gets the owner ID. + GetOwnerID(ctx context.Context) (string, error) + // CampaignOwner campaigns the owner. + CampaignOwner(ctx context.Context) error + // ResignOwner lets the owner start a new election. + ResignOwner(ctx context.Context) error + // Cancel cancels this etcd ownerManager campaign. + Cancel() +} + +const ( + // NewSessionDefaultRetryCnt is the default retry times when create new session. + NewSessionDefaultRetryCnt = 3 + // NewSessionRetryUnlimited is the unlimited retry times when create new session. + NewSessionRetryUnlimited = math.MaxInt64 + keyOpDefaultTimeout = 5 * time.Second +) + +// DDLOwnerChecker is used to check whether tidb is owner. +type DDLOwnerChecker interface { + // IsOwner returns whether the ownerManager is the owner. + IsOwner() bool +} + +// ownerManager represents the structure which is used for electing owner. +type ownerManager struct { + id string // id is the ID of the manager. + key string + prompt string + logPrefix string + logCtx context.Context + etcdCli *clientv3.Client + cancel context.CancelFunc + elec unsafe.Pointer +} + +// NewOwnerManager creates a new Manager. +func NewOwnerManager(etcdCli *clientv3.Client, prompt, id, key string, cancel context.CancelFunc) Manager { + logPrefix := fmt.Sprintf("[%s] %s ownerManager %s", prompt, key, id) + return &ownerManager{ + etcdCli: etcdCli, + id: id, + key: key, + prompt: prompt, + cancel: cancel, + logPrefix: logPrefix, + logCtx: logutil.WithKeyValue(context.Background(), "owner info", logPrefix), + } +} + +// ID implements Manager.ID interface. +func (m *ownerManager) ID() string { + return m.id +} + +// IsOwner implements Manager.IsOwner interface. +func (m *ownerManager) IsOwner() bool { + return atomic.LoadPointer(&m.elec) != unsafe.Pointer(nil) +} + +// Cancel implements Manager.Cancel interface. +func (m *ownerManager) Cancel() { + m.cancel() +} + +// ManagerSessionTTL is the etcd session's TTL in seconds. It's exported for testing. +var ManagerSessionTTL = 60 + +// setManagerSessionTTL sets the ManagerSessionTTL value, it's used for testing. +func setManagerSessionTTL() error { + ttlStr := os.Getenv("tidb_manager_ttl") + if len(ttlStr) == 0 { + return nil + } + ttl, err := strconv.Atoi(ttlStr) + if err != nil { + return errors.Trace(err) + } + ManagerSessionTTL = ttl + return nil +} + +// NewSession creates a new etcd session. +func NewSession(ctx context.Context, logPrefix string, etcdCli *clientv3.Client, retryCnt, ttl int) (*concurrency.Session, error) { + var err error + + var etcdSession *concurrency.Session + failedCnt := 0 + for i := 0; i < retryCnt; i++ { + if err = contextDone(ctx, err); err != nil { + return etcdSession, errors.Trace(err) + } + + failpoint.Inject("closeClient", func(val failpoint.Value) { + if val.(bool) { + if err := etcdCli.Close(); err != nil { + failpoint.Return(etcdSession, errors.Trace(err)) + } + } + }) + + failpoint.Inject("closeGrpc", func(val failpoint.Value) { + if val.(bool) { + if err := etcdCli.ActiveConnection().Close(); err != nil { + failpoint.Return(etcdSession, errors.Trace(err)) + } + } + }) + + etcdSession, err = concurrency.NewSession(etcdCli, + concurrency.WithTTL(ttl), concurrency.WithContext(ctx)) + + if err == nil { + break + } + if failedCnt%logIntervalCnt == 0 { + logutil.BgLogger().Warn("failed to new session to etcd", zap.String("ownerInfo", logPrefix), zap.Error(err)) + } + + time.Sleep(newSessionRetryInterval) + failedCnt++ + } + return etcdSession, errors.Trace(err) +} + +// CampaignOwner implements Manager.CampaignOwner interface. +func (m *ownerManager) CampaignOwner(ctx context.Context) error { + logPrefix := fmt.Sprintf("[%s] %s", m.prompt, m.key) + session, err := NewSession(ctx, logPrefix, m.etcdCli, NewSessionDefaultRetryCnt, ManagerSessionTTL) + if err != nil { + return errors.Trace(err) + } + go m.campaignLoop(ctx, session) + return nil +} + +// ResignOwner lets the owner start a new election. +func (m *ownerManager) ResignOwner(ctx context.Context) error { + elec := (*concurrency.Election)(atomic.LoadPointer(&m.elec)) + if elec == nil { + return errors.Errorf("This node is not a ddl owner, can't be resigned.") + } + + childCtx, cancel := context.WithTimeout(ctx, keyOpDefaultTimeout) + err := elec.Resign(childCtx) + cancel() + if err != nil { + return errors.Trace(err) + } + + logutil.Logger(m.logCtx).Warn("resign ddl owner success") + return nil +} + +func (m *ownerManager) toBeOwner(elec *concurrency.Election) { + atomic.StorePointer(&m.elec, unsafe.Pointer(elec)) +} + +// RetireOwner make the manager to be a not owner. +func (m *ownerManager) RetireOwner() { + atomic.StorePointer(&m.elec, nil) +} + +func (m *ownerManager) campaignLoop(ctx context.Context, etcdSession *concurrency.Session) { + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + defer func() { + cancel() + if r := recover(); r != nil { + buf := util.GetStack() + logutil.BgLogger().Error("recover panic", zap.String("prompt", m.prompt), zap.Any("error", r), zap.String("buffer", string(buf))) + + } + }() + + logPrefix := m.logPrefix + logCtx := m.logCtx + var err error + for { + if err != nil { + + } + + select { + case <-etcdSession.Done(): + logutil.Logger(logCtx).Info("etcd session is done, creates a new one") + leaseID := etcdSession.Lease() + etcdSession, err = NewSession(ctx, logPrefix, m.etcdCli, NewSessionRetryUnlimited, ManagerSessionTTL) + if err != nil { + logutil.Logger(logCtx).Info("break campaign loop, NewSession failed", zap.Error(err)) + m.revokeSession(logPrefix, leaseID) + return + } + case <-ctx.Done(): + logutil.Logger(logCtx).Info("break campaign loop, context is done") + m.revokeSession(logPrefix, etcdSession.Lease()) + return + default: + } + // If the etcd server turns clocks forward,the following case may occur. + // The etcd server deletes this session's lease ID, but etcd session doesn't find it. + // In this time if we do the campaign operation, the etcd server will return ErrLeaseNotFound. + if terror.ErrorEqual(err, rpctypes.ErrLeaseNotFound) { + if etcdSession != nil { + err = etcdSession.Close() + logutil.Logger(logCtx).Info("etcd session encounters the error of lease not found, closes it", zap.Error(err)) + } + continue + } + + elec := concurrency.NewElection(etcdSession, m.key) + err = elec.Campaign(ctx, m.id) + if err != nil { + logutil.Logger(logCtx).Info("failed to campaign", zap.Error(err)) + continue + } + + ownerKey, err := GetOwnerInfo(ctx, logCtx, elec, m.id) + if err != nil { + continue + } + + m.toBeOwner(elec) + m.watchOwner(ctx, etcdSession, ownerKey) + m.RetireOwner() + + logutil.Logger(logCtx).Warn("is not the owner") + } +} + +func (m *ownerManager) revokeSession(logPrefix string, leaseID clientv3.LeaseID) { + // Revoke the session lease. + // If revoke takes longer than the ttl, lease is expired anyway. + cancelCtx, cancel := context.WithTimeout(context.Background(), + time.Duration(ManagerSessionTTL)*time.Second) + _, err := m.etcdCli.Revoke(cancelCtx, leaseID) + cancel() + logutil.Logger(m.logCtx).Info("revoke session", zap.Error(err)) +} + +// GetOwnerID implements Manager.GetOwnerID interface. +func (m *ownerManager) GetOwnerID(ctx context.Context) (string, error) { + resp, err := m.etcdCli.Get(ctx, m.key, clientv3.WithFirstCreate()...) + if err != nil { + return "", errors.Trace(err) + } + if len(resp.Kvs) == 0 { + return "", concurrency.ErrElectionNoLeader + } + return string(resp.Kvs[0].Value), nil +} + +// GetOwnerInfo gets the owner information. +func GetOwnerInfo(ctx, logCtx context.Context, elec *concurrency.Election, id string) (string, error) { + resp, err := elec.Leader(ctx) + if err != nil { + // If no leader elected currently, it returns ErrElectionNoLeader. + logutil.Logger(logCtx).Info("failed to get leader", zap.Error(err)) + return "", errors.Trace(err) + } + ownerID := string(resp.Kvs[0].Value) + logutil.Logger(logCtx).Info("get owner", zap.String("ownerID", ownerID)) + if ownerID != id { + logutil.Logger(logCtx).Warn("is not the owner") + return "", errors.New("ownerInfoNotMatch") + } + + return string(resp.Kvs[0].Key), nil +} + +func (m *ownerManager) watchOwner(ctx context.Context, etcdSession *concurrency.Session, key string) { + logPrefix := fmt.Sprintf("[%s] ownerManager %s watch owner key %v", m.prompt, m.id, key) + logCtx := logutil.WithKeyValue(context.Background(), "owner info", logPrefix) + logutil.BgLogger().Debug(logPrefix) + watchCh := m.etcdCli.Watch(ctx, key) + for { + select { + case resp, ok := <-watchCh: + if !ok { + + logutil.Logger(logCtx).Info("watcher is closed, no owner") + return + } + if resp.Canceled { + + logutil.Logger(logCtx).Info("watch canceled, no owner") + return + } + + for _, ev := range resp.Events { + if ev.Type == mvccpb.DELETE { + + logutil.Logger(logCtx).Info("watch failed, owner is deleted") + return + } + } + case <-etcdSession.Done(): + + return + case <-ctx.Done(): + + return + } + } +} + +func init() { + err := setManagerSessionTTL() + if err != nil { + logutil.BgLogger().Warn("set manager session TTL failed", zap.Error(err)) + } +} + +func contextDone(ctx context.Context, err error) error { + select { + case <-ctx.Done(): + return errors.Trace(ctx.Err()) + default: + } + // Sometime the ctx isn't closed, but the etcd client is closed, + // we need to treat it as if context is done. + // TODO: Make sure ctx is closed with etcd client. + if terror.ErrorEqual(err, context.Canceled) || + terror.ErrorEqual(err, context.DeadlineExceeded) || + terror.ErrorEqual(err, grpc.ErrClientConnClosing) { + return errors.Trace(err) + } + + return nil +} diff --git a/owner/manager_test.go b/owner/manager_test.go new file mode 100644 index 0000000..0d92561 --- /dev/null +++ b/owner/manager_test.go @@ -0,0 +1,195 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package owner_test + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/pingcap/errors" + . "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/owner" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/util/logutil" + "go.etcd.io/etcd/clientv3" + "go.etcd.io/etcd/clientv3/concurrency" + "go.etcd.io/etcd/integration" + goctx "golang.org/x/net/context" +) + +const testLease = 5 * time.Millisecond + +func checkOwner(d DDL, fbVal bool) (isOwner bool) { + manager := d.OwnerManager() + // The longest to wait for 3 seconds to + // make sure that campaigning owners is completed. + for i := 0; i < 600; i++ { + time.Sleep(5 * time.Millisecond) + isOwner = manager.IsOwner() + if isOwner == fbVal { + break + } + } + return +} + +func TestSingle(t *testing.T) { + store, err := mockstore.NewMockTikvStore() + if err != nil { + t.Fatal(err) + } + defer store.Close() + + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + defer clus.Terminate(t) + cli := clus.RandClient() + ctx := goctx.Background() + d := NewDDL( + ctx, + WithEtcdClient(cli), + WithStore(store), + WithLease(testLease), + ) + defer d.Stop() + + isOwner := checkOwner(d, true) + if !isOwner { + t.Fatalf("expect true, got isOwner:%v", isOwner) + } + + // test for newSession failed + ctx, cancel := goctx.WithCancel(ctx) + cancel() + manager := owner.NewOwnerManager(cli, "ddl", "ddl_id", DDLOwnerKey, nil) + err = manager.CampaignOwner(ctx) + if !terror.ErrorEqual(err, goctx.Canceled) && + !terror.ErrorEqual(err, goctx.DeadlineExceeded) { + t.Fatalf("campaigned result don't match, err %v", err) + } + isOwner = checkOwner(d, true) + if !isOwner { + t.Fatalf("expect true, got isOwner:%v", isOwner) + } + // The test is used to exit campaign loop. + d.OwnerManager().Cancel() + isOwner = checkOwner(d, false) + if isOwner { + t.Fatalf("expect false, got isOwner:%v", isOwner) + } + time.Sleep(200 * time.Millisecond) + ownerID, _ := manager.GetOwnerID(goctx.Background()) + // The error is ok to be not nil since we canceled the manager. + if ownerID != "" { + t.Fatalf("owner %s is not empty", ownerID) + } +} + +func TestCluster(t *testing.T) { + tmpTTL := 3 + orignalTTL := owner.ManagerSessionTTL + owner.ManagerSessionTTL = tmpTTL + defer func() { + owner.ManagerSessionTTL = orignalTTL + }() + store, err := mockstore.NewMockTikvStore() + if err != nil { + t.Fatal(err) + } + defer store.Close() + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 4}) + defer clus.Terminate(t) + + cli := clus.Client(0) + d := NewDDL( + goctx.Background(), + WithEtcdClient(cli), + WithStore(store), + WithLease(testLease), + ) + isOwner := checkOwner(d, true) + if !isOwner { + t.Fatalf("expect true, got isOwner:%v", isOwner) + } + cli1 := clus.Client(1) + d1 := NewDDL( + goctx.Background(), + WithEtcdClient(cli1), + WithStore(store), + WithLease(testLease), + ) + isOwner = checkOwner(d1, false) + if isOwner { + t.Fatalf("expect false, got isOwner:%v", isOwner) + } + + // Delete the leader key, the d1 become the owner. + cliRW := clus.Client(2) + err = deleteLeader(cliRW, DDLOwnerKey) + if err != nil { + t.Fatal(err) + } + isOwner = checkOwner(d, false) + if isOwner { + t.Fatalf("expect false, got isOwner:%v", isOwner) + } + d.Stop() + + // d3 (not owner) stop + cli3 := clus.Client(3) + d3 := NewDDL( + goctx.Background(), + WithEtcdClient(cli3), + WithStore(store), + WithLease(testLease), + ) + defer d3.Stop() + isOwner = checkOwner(d3, false) + if isOwner { + t.Fatalf("expect false, got isOwner:%v", isOwner) + } + d3.Stop() + + // Cancel the owner context, there is no owner. + d1.Stop() + time.Sleep(time.Duration(tmpTTL+1) * time.Second) + session, err := concurrency.NewSession(cliRW) + if err != nil { + t.Fatalf("new session failed %v", err) + } + elec := concurrency.NewElection(session, DDLOwnerKey) + logPrefix := fmt.Sprintf("[ddl] %s ownerManager %s", DDLOwnerKey, "useless id") + logCtx := logutil.WithKeyValue(context.Background(), "owner info", logPrefix) + _, err = owner.GetOwnerInfo(goctx.Background(), logCtx, elec, "useless id") + if !terror.ErrorEqual(err, concurrency.ErrElectionNoLeader) { + t.Fatalf("get owner info result don't match, err %v", err) + } +} + +func deleteLeader(cli *clientv3.Client, prefixKey string) error { + session, err := concurrency.NewSession(cli) + if err != nil { + return errors.Trace(err) + } + defer session.Close() + elec := concurrency.NewElection(session, prefixKey) + resp, err := elec.Leader(goctx.Background()) + if err != nil { + return errors.Trace(err) + } + _, err = cli.Delete(goctx.Background(), string(resp.Kvs[0].Key)) + return errors.Trace(err) +} diff --git a/owner/mock.go b/owner/mock.go new file mode 100644 index 0000000..60f2515 --- /dev/null +++ b/owner/mock.go @@ -0,0 +1,86 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package owner + +import ( + "context" + "sync/atomic" + + "github.com/pingcap/errors" +) + +var _ Manager = &mockManager{} + +// mockManager represents the structure which is used for electing owner. +// It's used for local store and testing. +// So this worker will always be the owner. +type mockManager struct { + owner int32 + id string // id is the ID of manager. + cancel context.CancelFunc +} + +// NewMockManager creates a new mock Manager. +func NewMockManager(id string, cancel context.CancelFunc) Manager { + return &mockManager{ + id: id, + cancel: cancel, + } +} + +// ID implements Manager.ID interface. +func (m *mockManager) ID() string { + return m.id +} + +// IsOwner implements Manager.IsOwner interface. +func (m *mockManager) IsOwner() bool { + return atomic.LoadInt32(&m.owner) == 1 +} + +func (m *mockManager) toBeOwner() { + atomic.StoreInt32(&m.owner, 1) +} + +// RetireOwner implements Manager.RetireOwner interface. +func (m *mockManager) RetireOwner() { + atomic.StoreInt32(&m.owner, 0) +} + +// Cancel implements Manager.Cancel interface. +func (m *mockManager) Cancel() { + m.cancel() +} + +// GetOwnerID implements Manager.GetOwnerID interface. +func (m *mockManager) GetOwnerID(ctx context.Context) (string, error) { + if m.IsOwner() { + return m.ID(), nil + } + return "", errors.New("no owner") +} + +// CampaignOwner implements Manager.CampaignOwner interface. +func (m *mockManager) CampaignOwner(_ context.Context) error { + m.toBeOwner() + return nil +} + +// ResignOwner lets the owner start a new election. +func (m *mockManager) ResignOwner(ctx context.Context) error { + if m.IsOwner() { + m.RetireOwner() + } + return nil +} diff --git a/parser/Makefile b/parser/Makefile new file mode 100644 index 0000000..4ae4486 --- /dev/null +++ b/parser/Makefile @@ -0,0 +1,37 @@ +.PHONY: all parser clean + +ARCH:="`uname -s`" +MAC:="Darwin" +LINUX:="Linux" + +all: parser.go + +test: parser.go + sh test.sh + +parser.go: parser.y + make parser + +parser: bin/goyacc + bin/goyacc -o /dev/null parser.y + bin/goyacc -o parser.go parser.y 2>&1 | egrep "(shift|reduce)/reduce" | awk '{print} END {if (NR > 0) {print "Find conflict in parser.y. Please check y.output for more information."; exit 1;}}' + rm -f y.output + + @if [ $(ARCH) = $(LINUX) ]; \ + then \ + sed -i -e 's|//line.*||' -e 's/yyEofCode/yyEOFCode/' parser.go; \ + elif [ $(ARCH) = $(MAC) ]; \ + then \ + /usr/bin/sed -i "" 's|//line.*||' parser.go; \ + /usr/bin/sed -i "" 's/yyEofCode/yyEOFCode/' parser.go; \ + fi + + @awk 'BEGIN{print "// Code generated by goyacc DO NOT EDIT."} {print $0}' parser.go > tmp_parser.go && mv tmp_parser.go parser.go; + +bin/goyacc: goyacc/main.go + GO111MODULE=on go build -o bin/goyacc goyacc/main.go + +clean: + go clean -i ./... + rm -rf *.out + rm parser.go diff --git a/parser/ast/ast.go b/parser/ast/ast.go new file mode 100644 index 0000000..7b41919 --- /dev/null +++ b/parser/ast/ast.go @@ -0,0 +1,153 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ast is the abstract syntax tree parsed from a SQL statement by parser. +// It can be analysed and transformed by optimizer. +package ast + +import ( + "io" + + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/types" +) + +// Node is the basic element of the AST. +// Interfaces embed Node should have 'Node' name suffix. +type Node interface { + // Accept accepts Visitor to visit itself. + // The returned node should replace original node. + // ok returns false to stop visiting. + // + // Implementation of this method should first call visitor.Enter, + // assign the returned node to its method receiver, if skipChildren returns true, + // children should be skipped. Otherwise, call its children in particular order that + // later elements depends on former elements. Finally, return visitor.Leave. + Accept(v Visitor) (node Node, ok bool) + // Text returns the original text of the element. + Text() string + // SetText sets original text to the Node. + SetText(text string) +} + +// Flags indicates whether an expression contains certain types of expression. +const ( + FlagConstant uint64 = 0 + FlagHasFunc uint64 = 1 << iota + FlagHasReference + FlagHasAggregateFunc + FlagHasVariable + FlagHasDefault +) + +// ExprNode is a node that can be evaluated. +// Name of implementations should have 'Expr' suffix. +type ExprNode interface { + // Node is embedded in ExprNode. + Node + // SetType sets evaluation type to the expression. + SetType(tp *types.FieldType) + // GetType gets the evaluation type of the expression. + GetType() *types.FieldType + // SetFlag sets flag to the expression. + // Flag indicates whether the expression contains + // parameter marker, reference, aggregate function... + SetFlag(flag uint64) + // GetFlag returns the flag of the expression. + GetFlag() uint64 + + // Format formats the AST into a writer. + Format(w io.Writer) +} + +// OptBinary is used for parser. +type OptBinary struct { + IsBinary bool + Charset string +} + +// FuncNode represents function call expression node. +type FuncNode interface { + ExprNode + functionExpression() +} + +// StmtNode represents statement node. +// Name of implementations should have 'Stmt' suffix. +type StmtNode interface { + Node + statement() +} + +// DDLNode represents DDL statement node. +type DDLNode interface { + StmtNode + ddlStatement() +} + +// DMLNode represents DML statement node. +type DMLNode interface { + StmtNode + dmlStatement() +} + +// ResultField represents a result field which can be a column from a table, +// or an expression in select field. It is a generated property during +// binding process. ResultField is the key element to evaluate a ColumnNameExpr. +// After resolving process, every ColumnNameExpr will be resolved to a ResultField. +// During execution, every row retrieved from table will set the row value to +// ResultFields of that table, so ColumnNameExpr resolved to that ResultField can be +// easily evaluated. +type ResultField struct { + Column *model.ColumnInfo + ColumnAsName model.CIStr + Table *model.TableInfo + TableAsName model.CIStr + DBName model.CIStr + + // Expr represents the expression for the result field. If it is generated from a select field, it would + // be the expression of that select field, otherwise the type would be ValueExpr and value + // will be set for every retrieved row. + Expr ExprNode + TableName *TableName + // Referenced indicates the result field has been referenced or not. + // If not, we don't need to get the values. + Referenced bool +} + +// ResultSetNode interface has a ResultFields property, represents a Node that returns result set. +// Implementations include SelectStmt, SubqueryExpr, TableSource, TableName and Join. +type ResultSetNode interface { + Node +} + +// SensitiveStmtNode overloads StmtNode and provides a SecureText method. +type SensitiveStmtNode interface { + StmtNode + // SecureText is different from Text that it hide password information. + SecureText() string +} + +// Visitor visits a Node. +type Visitor interface { + // Enter is called before children nodes are visited. + // The returned node must be the same type as the input node n. + // skipChildren returns true means children nodes should be skipped, + // this is useful when work is done in Enter and there is no need to visit children. + Enter(n Node) (node Node, skipChildren bool) + // Leave is called after children nodes have been visited. + // The returned node's type can be different from the input node if it is a ExprNode, + // Non-expression node must be the same type as the input node n. + // ok returns false to stop visiting. + Leave(n Node) (node Node, ok bool) +} diff --git a/parser/ast/base.go b/parser/ast/base.go new file mode 100644 index 0000000..80087f4 --- /dev/null +++ b/parser/ast/base.go @@ -0,0 +1,99 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import ( + "github.com/pingcap/tidb/parser/types" +) + +// node is the struct implements node interface except for Accept method. +// Node implementations should embed it in. +type node struct { + text string +} + +// SetText implements Node interface. +func (n *node) SetText(text string) { + n.text = text +} + +// Text implements Node interface. +func (n *node) Text() string { + return n.text +} + +// stmtNode implements StmtNode interface. +// Statement implementations should embed it in. +type stmtNode struct { + node +} + +// statement implements StmtNode interface. +func (sn *stmtNode) statement() {} + +// ddlNode implements DDLNode interface. +// DDL implementations should embed it in. +type ddlNode struct { + stmtNode +} + +// ddlStatement implements DDLNode interface. +func (dn *ddlNode) ddlStatement() {} + +// dmlNode is the struct implements DMLNode interface. +// DML implementations should embed it in. +type dmlNode struct { + stmtNode +} + +// dmlStatement implements DMLNode interface. +func (dn *dmlNode) dmlStatement() {} + +// exprNode is the struct implements Expression interface. +// Expression implementations should embed it in. +type exprNode struct { + node + Type types.FieldType + flag uint64 +} + +// TexprNode is exported for parser driver. +type TexprNode = exprNode + +// SetType implements ExprNode interface. +func (en *exprNode) SetType(tp *types.FieldType) { + en.Type = *tp +} + +// GetType implements ExprNode interface. +func (en *exprNode) GetType() *types.FieldType { + return &en.Type +} + +// SetFlag implements ExprNode interface. +func (en *exprNode) SetFlag(flag uint64) { + en.flag = flag +} + +// GetFlag implements ExprNode interface. +func (en *exprNode) GetFlag() uint64 { + return en.flag +} + +type funcNode struct { + exprNode +} + +// functionExpression implements FunctionNode interface. +func (fn *funcNode) functionExpression() {} diff --git a/parser/ast/ddl.go b/parser/ast/ddl.go new file mode 100644 index 0000000..43a901b --- /dev/null +++ b/parser/ast/ddl.go @@ -0,0 +1,666 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import ( + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/types" +) + +var ( + _ DDLNode = &AlterTableStmt{} + _ DDLNode = &CreateDatabaseStmt{} + _ DDLNode = &CreateIndexStmt{} + _ DDLNode = &CreateTableStmt{} + _ DDLNode = &DropDatabaseStmt{} + _ DDLNode = &DropIndexStmt{} + _ DDLNode = &DropTableStmt{} + _ DDLNode = &TruncateTableStmt{} + + _ Node = &AlterTableSpec{} + _ Node = &ColumnDef{} + _ Node = &ColumnOption{} + _ Node = &Constraint{} + _ Node = &IndexPartSpecification{} +) + +// CharsetOpt is used for parsing charset option from SQL. +type CharsetOpt struct { + Chs string + Col string +} + +// DatabaseOptionType is the type for database options. +type DatabaseOptionType int + +// Database option types. +const ( + DatabaseOptionNone DatabaseOptionType = iota + DatabaseOptionCharset + DatabaseOptionCollate + DatabaseOptionEncryption +) + +// DatabaseOption represents database option. +type DatabaseOption struct { + Tp DatabaseOptionType + Value string +} + +// CreateDatabaseStmt is a statement to create a database. +// See https://dev.mysql.com/doc/refman/5.7/en/create-database.html +type CreateDatabaseStmt struct { + ddlNode + + IfNotExists bool + Name string + Options []*DatabaseOption +} + +// Accept implements Node Accept interface. +func (n *CreateDatabaseStmt) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*CreateDatabaseStmt) + return v.Leave(n) +} + +// DropDatabaseStmt is a statement to drop a database and all tables in the database. +// See https://dev.mysql.com/doc/refman/5.7/en/drop-database.html +type DropDatabaseStmt struct { + ddlNode + + IfExists bool + Name string +} + +// Accept implements Node Accept interface. +func (n *DropDatabaseStmt) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*DropDatabaseStmt) + return v.Leave(n) +} + +// IndexPartSpecifications is used for parsing index column name or index expression from SQL. +type IndexPartSpecification struct { + node + + Column *ColumnName + Length int + Expr ExprNode +} + +// Accept implements Node Accept interface. +func (n *IndexPartSpecification) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*IndexPartSpecification) + if n.Expr != nil { + node, ok := n.Expr.Accept(v) + if !ok { + return n, false + } + n.Expr = node.(ExprNode) + return v.Leave(n) + } + node, ok := n.Column.Accept(v) + if !ok { + return n, false + } + n.Column = node.(*ColumnName) + return v.Leave(n) +} + +// ColumnOptionType is the type for ColumnOption. +type ColumnOptionType int + +// ColumnOption types. +const ( + ColumnOptionNoOption ColumnOptionType = iota + ColumnOptionPrimaryKey + ColumnOptionNotNull + ColumnOptionAutoIncrement + ColumnOptionDefaultValue + ColumnOptionUniqKey + ColumnOptionNull + ColumnOptionOnUpdate // For Timestamp and Datetime only. + ColumnOptionFulltext + ColumnOptionComment + ColumnOptionGenerated + ColumnOptionReference + ColumnOptionCollate + ColumnOptionCheck + ColumnOptionColumnFormat + ColumnOptionStorage + ColumnOptionAutoRandom +) + +var ( + invalidOptionForGeneratedColumn = map[ColumnOptionType]struct{}{ + ColumnOptionAutoIncrement: {}, + ColumnOptionOnUpdate: {}, + ColumnOptionDefaultValue: {}, + } +) + +// ColumnOption is used for parsing column constraint info from SQL. +type ColumnOption struct { + node + + Tp ColumnOptionType + // Expr is used for ColumnOptionDefaultValue/ColumnOptionOnUpdateColumnOptionGenerated. + // For ColumnOptionDefaultValue or ColumnOptionOnUpdate, it's the target value. + // For ColumnOptionGenerated, it's the target expression. + Expr ExprNode + // Stored is only for ColumnOptionGenerated, default is false. + Stored bool + StrValue string + AutoRandomBitLength int + // Enforced is only for Check, default is true. + Enforced bool +} + +// Accept implements Node Accept interface. +func (n *ColumnOption) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*ColumnOption) + if n.Expr != nil { + node, ok := n.Expr.Accept(v) + if !ok { + return n, false + } + n.Expr = node.(ExprNode) + } + return v.Leave(n) +} + +// IndexVisibility is the option for index visibility. +type IndexVisibility int + +// IndexVisibility options. +const ( + IndexVisibilityDefault IndexVisibility = iota + IndexVisibilityVisible + IndexVisibilityInvisible +) + +// IndexOption is the index options. +// KEY_BLOCK_SIZE [=] value +// | index_type +// | WITH PARSER parser_name +// | COMMENT 'string' +// See http://dev.mysql.com/doc/refman/5.7/en/create-table.html +type IndexOption struct { + node + + KeyBlockSize uint64 + Tp model.IndexType + Comment string + ParserName model.CIStr + Visibility IndexVisibility +} + +// Accept implements Node Accept interface. +func (n *IndexOption) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*IndexOption) + return v.Leave(n) +} + +// ConstraintType is the type for Constraint. +type ConstraintType int + +// ConstraintTypes +const ( + ConstraintNoConstraint ConstraintType = iota + ConstraintPrimaryKey + ConstraintKey + ConstraintIndex + ConstraintUniq + ConstraintUniqKey + ConstraintUniqIndex + ConstraintForeignKey + ConstraintFulltext + ConstraintCheck +) + +// Constraint is constraint for table definition. +type Constraint struct { + node + + // only supported by MariaDB 10.0.2+ (ADD {INDEX|KEY}, ADD FOREIGN KEY), + // see https://mariadb.com/kb/en/library/alter-table/ + IfNotExists bool + + Tp ConstraintType + Name string + + Keys []*IndexPartSpecification // Used for PRIMARY KEY, UNIQUE, ...... + + Option *IndexOption // Index Options + + Expr ExprNode // Used for Check + + Enforced bool // Used for Check +} + +// Accept implements Node Accept interface. +func (n *Constraint) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*Constraint) + for i, val := range n.Keys { + node, ok := val.Accept(v) + if !ok { + return n, false + } + n.Keys[i] = node.(*IndexPartSpecification) + } + if n.Option != nil { + node, ok := n.Option.Accept(v) + if !ok { + return n, false + } + n.Option = node.(*IndexOption) + } + return v.Leave(n) +} + +// ColumnDef is used for parsing column definition from SQL. +type ColumnDef struct { + node + + Name *ColumnName + Tp *types.FieldType + Options []*ColumnOption +} + +// Accept implements Node Accept interface. +func (n *ColumnDef) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*ColumnDef) + node, ok := n.Name.Accept(v) + if !ok { + return n, false + } + n.Name = node.(*ColumnName) + for i, val := range n.Options { + node, ok := val.Accept(v) + if !ok { + return n, false + } + n.Options[i] = node.(*ColumnOption) + } + return v.Leave(n) +} + +// Validate checks if a column definition is legal. +// For example, generated column definitions that contain such +// column options as `ON UPDATE`, `AUTO_INCREMENT`, `DEFAULT` +// are illegal. +func (n *ColumnDef) Validate() bool { + generatedCol := false + illegalOpt4gc := false + for _, opt := range n.Options { + if opt.Tp == ColumnOptionGenerated { + generatedCol = true + } + _, found := invalidOptionForGeneratedColumn[opt.Tp] + illegalOpt4gc = illegalOpt4gc || found + } + return !(generatedCol && illegalOpt4gc) +} + +// CreateTableStmt is a statement to create a table. +// See https://dev.mysql.com/doc/refman/5.7/en/create-table.html +type CreateTableStmt struct { + ddlNode + + IfNotExists bool + IsTemporary bool + Table *TableName + ReferTable *TableName + Cols []*ColumnDef + Constraints []*Constraint +} + +// Accept implements Node Accept interface. +func (n *CreateTableStmt) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*CreateTableStmt) + node, ok := n.Table.Accept(v) + if !ok { + return n, false + } + n.Table = node.(*TableName) + if n.ReferTable != nil { + node, ok = n.ReferTable.Accept(v) + if !ok { + return n, false + } + n.ReferTable = node.(*TableName) + } + for i, val := range n.Cols { + node, ok = val.Accept(v) + if !ok { + return n, false + } + n.Cols[i] = node.(*ColumnDef) + } + for i, val := range n.Constraints { + node, ok = val.Accept(v) + if !ok { + return n, false + } + n.Constraints[i] = node.(*Constraint) + } + + return v.Leave(n) +} + +// DropTableStmt is a statement to drop one or more tables. +// See https://dev.mysql.com/doc/refman/5.7/en/drop-table.html +type DropTableStmt struct { + ddlNode + + IfExists bool + Tables []*TableName + IsView bool + IsTemporary bool // make sense ONLY if/when IsView == false +} + +// Accept implements Node Accept interface. +func (n *DropTableStmt) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*DropTableStmt) + for i, val := range n.Tables { + node, ok := val.Accept(v) + if !ok { + return n, false + } + n.Tables[i] = node.(*TableName) + } + return v.Leave(n) +} + +// IndexKeyType is the type for index key. +type IndexKeyType int + +// Index key types. +const ( + IndexKeyTypeNone IndexKeyType = iota + IndexKeyTypeUnique + IndexKeyTypeSpatial + IndexKeyTypeFullText +) + +// CreateIndexStmt is a statement to create an index. +// See https://dev.mysql.com/doc/refman/5.7/en/create-index.html +type CreateIndexStmt struct { + ddlNode + + // only supported by MariaDB 10.0.2+, + // see https://mariadb.com/kb/en/library/create-index/ + IfNotExists bool + + IndexName string + Table *TableName + IndexPartSpecifications []*IndexPartSpecification + IndexOption *IndexOption + KeyType IndexKeyType +} + +// Accept implements Node Accept interface. +func (n *CreateIndexStmt) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*CreateIndexStmt) + node, ok := n.Table.Accept(v) + if !ok { + return n, false + } + n.Table = node.(*TableName) + for i, val := range n.IndexPartSpecifications { + node, ok = val.Accept(v) + if !ok { + return n, false + } + n.IndexPartSpecifications[i] = node.(*IndexPartSpecification) + } + if n.IndexOption != nil { + node, ok := n.IndexOption.Accept(v) + if !ok { + return n, false + } + n.IndexOption = node.(*IndexOption) + } + return v.Leave(n) +} + +// DropIndexStmt is a statement to drop the index. +// See https://dev.mysql.com/doc/refman/5.7/en/drop-index.html +type DropIndexStmt struct { + ddlNode + + IfExists bool + IndexName string + Table *TableName +} + +// Accept implements Node Accept interface. +func (n *DropIndexStmt) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*DropIndexStmt) + node, ok := n.Table.Accept(v) + if !ok { + return n, false + } + n.Table = node.(*TableName) + return v.Leave(n) +} + +// AlterTableType is the type for AlterTableSpec. +type AlterTableType int + +// AlterTable types. +const ( + AlterTableOption AlterTableType = iota + 1 + AlterTableAddColumns + AlterTableAddConstraint + AlterTableDropColumn + AlterTableDropPrimaryKey + AlterTableDropIndex + AlterTableDropForeignKey + AlterTableModifyColumn + AlterTableChangeColumn + AlterTableRenameColumn + AlterTableAlterColumn + AlterTableLock + AlterTableAlgorithm + AlterTableRenameIndex + AlterTableForce + AlterTablePartition + AlterTableEnableKeys + AlterTableDisableKeys + AlterTableRemovePartitioning + AlterTableWithValidation + AlterTableWithoutValidation + AlterTableSecondaryLoad + AlterTableSecondaryUnload + AlterTableAlterCheck + AlterTableDropCheck + AlterTableImportTablespace + AlterTableDiscardTablespace + AlterTableIndexInvisible + // TODO: Add more actions + AlterTableOrderByColumns +) + +// AlterTableSpec represents alter table specification. +type AlterTableSpec struct { + node + + // only supported by MariaDB 10.0.2+ (DROP COLUMN, CHANGE COLUMN, MODIFY COLUMN, DROP INDEX, DROP FOREIGN KEY) + // see https://mariadb.com/kb/en/library/alter-table/ + IfExists bool + + // only supported by MariaDB 10.0.2+ (ADD COLUMN) + // see https://mariadb.com/kb/en/library/alter-table/ + IfNotExists bool + + NoWriteToBinlog bool + + Tp AlterTableType + Name string + Constraint *Constraint + NewTable *TableName + NewColumns []*ColumnDef + NewConstraints []*Constraint + OldColumnName *ColumnName + NewColumnName *ColumnName + Comment string + FromKey model.CIStr + ToKey model.CIStr + WithValidation bool + Num uint64 + Visibility IndexVisibility +} + +// Accept implements Node Accept interface. +func (n *AlterTableSpec) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*AlterTableSpec) + if n.Constraint != nil { + node, ok := n.Constraint.Accept(v) + if !ok { + return n, false + } + n.Constraint = node.(*Constraint) + } + if n.NewTable != nil { + node, ok := n.NewTable.Accept(v) + if !ok { + return n, false + } + n.NewTable = node.(*TableName) + } + for _, col := range n.NewColumns { + node, ok := col.Accept(v) + if !ok { + return n, false + } + col = node.(*ColumnDef) + } + for _, constraint := range n.NewConstraints { + node, ok := constraint.Accept(v) + if !ok { + return n, false + } + constraint = node.(*Constraint) + } + if n.OldColumnName != nil { + node, ok := n.OldColumnName.Accept(v) + if !ok { + return n, false + } + n.OldColumnName = node.(*ColumnName) + } + return v.Leave(n) +} + +// AlterTableStmt is a statement to change the structure of a table. +// See https://dev.mysql.com/doc/refman/5.7/en/alter-table.html +type AlterTableStmt struct { + ddlNode + + Table *TableName + Specs []*AlterTableSpec +} + +// Accept implements Node Accept interface. +func (n *AlterTableStmt) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*AlterTableStmt) + node, ok := n.Table.Accept(v) + if !ok { + return n, false + } + n.Table = node.(*TableName) + for i, val := range n.Specs { + node, ok = val.Accept(v) + if !ok { + return n, false + } + n.Specs[i] = node.(*AlterTableSpec) + } + return v.Leave(n) +} + +// TruncateTableStmt is a statement to empty a table completely. +// See https://dev.mysql.com/doc/refman/5.7/en/truncate-table.html +type TruncateTableStmt struct { + ddlNode + + Table *TableName +} + +// Accept implements Node Accept interface. +func (n *TruncateTableStmt) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*TruncateTableStmt) + node, ok := n.Table.Accept(v) + if !ok { + return n, false + } + n.Table = node.(*TableName) + return v.Leave(n) +} diff --git a/parser/ast/ddl_test.go b/parser/ast/ddl_test.go new file mode 100644 index 0000000..2b42ad3 --- /dev/null +++ b/parser/ast/ddl_test.go @@ -0,0 +1,62 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast_test + +import ( + . "github.com/pingcap/check" + . "github.com/pingcap/tidb/parser/ast" +) + +var _ = Suite(&testDDLSuite{}) + +type testDDLSuite struct { +} + +func (ts *testDDLSuite) TestDDLVisitorCover(c *C) { + ce := &checkExpr{} + constraint := &Constraint{Keys: []*IndexPartSpecification{{Column: &ColumnName{}}, {Column: &ColumnName{}}}, Option: &IndexOption{}} + + alterTableSpec := &AlterTableSpec{Constraint: constraint, NewTable: &TableName{}, NewColumns: []*ColumnDef{{Name: &ColumnName{}}}, OldColumnName: &ColumnName{}} + + stmts := []struct { + node Node + expectedEnterCnt int + expectedLeaveCnt int + }{ + {&CreateDatabaseStmt{}, 0, 0}, + {&DropDatabaseStmt{}, 0, 0}, + {&DropIndexStmt{Table: &TableName{}}, 0, 0}, + {&DropTableStmt{Tables: []*TableName{{}, {}}}, 0, 0}, + {&TruncateTableStmt{Table: &TableName{}}, 0, 0}, + + // TODO: cover children + {&AlterTableStmt{Table: &TableName{}, Specs: []*AlterTableSpec{alterTableSpec}}, 0, 0}, + {&CreateIndexStmt{Table: &TableName{}}, 0, 0}, + {&CreateTableStmt{Table: &TableName{}, ReferTable: &TableName{}}, 0, 0}, + {&AlterTableSpec{}, 0, 0}, + {&ColumnDef{Name: &ColumnName{}, Options: []*ColumnOption{{Expr: ce}}}, 1, 1}, + {&ColumnOption{Expr: ce}, 1, 1}, + {&IndexPartSpecification{Column: &ColumnName{}}, 0, 0}, + {&AlterTableSpec{NewConstraints: []*Constraint{constraint, constraint}}, 0, 0}, + {&AlterTableSpec{NewConstraints: []*Constraint{constraint}, NewColumns: []*ColumnDef{{Name: &ColumnName{}}}}, 0, 0}, + } + + for _, v := range stmts { + ce.reset() + v.node.Accept(checkVisitor{}) + c.Check(ce.enterCnt, Equals, v.expectedEnterCnt) + c.Check(ce.leaveCnt, Equals, v.expectedLeaveCnt) + v.node.Accept(visitor1{}) + } +} diff --git a/parser/ast/dml.go b/parser/ast/dml.go new file mode 100644 index 0000000..c09fa20 --- /dev/null +++ b/parser/ast/dml.go @@ -0,0 +1,746 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import ( + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" +) + +var ( + _ DMLNode = &DeleteStmt{} + _ DMLNode = &InsertStmt{} + _ DMLNode = &SelectStmt{} + _ DMLNode = &ShowStmt{} + + _ Node = &Assignment{} + _ Node = &ByItem{} + _ Node = &FieldList{} + _ Node = &GroupByClause{} + _ Node = &HavingClause{} + _ Node = &Join{} + _ Node = &Limit{} + _ Node = &OnCondition{} + _ Node = &OrderByClause{} + _ Node = &SelectField{} + _ Node = &TableName{} + _ Node = &TableRefsClause{} + _ Node = &TableSource{} + _ Node = &WildCardField{} +) + +// JoinType is join type, including cross/left/right/full. +type JoinType int + +const ( + // CrossJoin is cross join type. + CrossJoin JoinType = iota + 1 + // LeftJoin is left Join type. + LeftJoin + // RightJoin is right Join type. + RightJoin +) + +// Join represents table join. +type Join struct { + node + + // Left table can be TableSource or JoinNode. + Left ResultSetNode + // Right table can be TableSource or JoinNode or nil. + Right ResultSetNode + // Tp represents join type. + Tp JoinType + // On represents join on condition. + On *OnCondition +} + +// Accept implements Node Accept interface. +func (n *Join) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*Join) + node, ok := n.Left.Accept(v) + if !ok { + return n, false + } + n.Left = node.(ResultSetNode) + if n.Right != nil { + node, ok = n.Right.Accept(v) + if !ok { + return n, false + } + n.Right = node.(ResultSetNode) + } + if n.On != nil { + node, ok = n.On.Accept(v) + if !ok { + return n, false + } + n.On = node.(*OnCondition) + } + return v.Leave(n) +} + +// TableName represents a table name. +type TableName struct { + node + + Schema model.CIStr + Name model.CIStr + + DBInfo *model.DBInfo + TableInfo *model.TableInfo + + IndexHints []*IndexHint + PartitionNames []model.CIStr +} + +// IndexHintType is the type for index hint use, ignore or force. +type IndexHintType int + +// IndexHintUseType values. +const ( + HintUse IndexHintType = 1 + HintIgnore IndexHintType = 2 + HintForce IndexHintType = 3 +) + +// IndexHintScope is the type for index hint for join, order by or group by. +type IndexHintScope int + +// Index hint scopes. +const ( + HintForScan IndexHintScope = 1 + HintForJoin IndexHintScope = 2 + HintForOrderBy IndexHintScope = 3 + HintForGroupBy IndexHintScope = 4 +) + +// IndexHint represents a hint for optimizer to use/ignore/force for join/order by/group by. +type IndexHint struct { + IndexNames []model.CIStr + HintType IndexHintType + HintScope IndexHintScope +} + +// Accept implements Node Accept interface. +func (n *TableName) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*TableName) + return v.Leave(n) +} + +// OnCondition represents JOIN on condition. +type OnCondition struct { + node + + Expr ExprNode +} + +// Accept implements Node Accept interface. +func (n *OnCondition) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*OnCondition) + node, ok := n.Expr.Accept(v) + if !ok { + return n, false + } + n.Expr = node.(ExprNode) + return v.Leave(n) +} + +// TableSource represents table source with a name. +type TableSource struct { + node + + // Source is the source of the data, can be a TableName, + // a SelectStmt, a UnionStmt, or a JoinNode. + Source ResultSetNode + + // AsName is the alias name of the table source. + AsName model.CIStr +} + +// Accept implements Node Accept interface. +func (n *TableSource) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*TableSource) + node, ok := n.Source.Accept(v) + if !ok { + return n, false + } + n.Source = node.(ResultSetNode) + return v.Leave(n) +} + +// WildCardField is a special type of select field content. +type WildCardField struct { + node + + Table model.CIStr + Schema model.CIStr +} + +// Accept implements Node Accept interface. +func (n *WildCardField) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*WildCardField) + return v.Leave(n) +} + +// SelectField represents fields in select statement. +// There are two type of select field: wildcard +// and expression with optional alias name. +type SelectField struct { + node + + // Offset is used to get original text. + Offset int + // WildCard is not nil, Expr will be nil. + WildCard *WildCardField + // Expr is not nil, WildCard will be nil. + Expr ExprNode + // AsName is alias name for Expr. + AsName model.CIStr + // Auxiliary stands for if this field is auxiliary. + // When we add a Field into SelectField list which is used for having/orderby clause but the field is not in select clause, + // we should set its Auxiliary to true. Then the TrimExec will trim the field. + Auxiliary bool +} + +// Accept implements Node Accept interface. +func (n *SelectField) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*SelectField) + if n.Expr != nil { + node, ok := n.Expr.Accept(v) + if !ok { + return n, false + } + n.Expr = node.(ExprNode) + } + return v.Leave(n) +} + +// FieldList represents field list in select statement. +type FieldList struct { + node + + Fields []*SelectField +} + +// Accept implements Node Accept interface. +func (n *FieldList) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*FieldList) + for i, val := range n.Fields { + node, ok := val.Accept(v) + if !ok { + return n, false + } + n.Fields[i] = node.(*SelectField) + } + return v.Leave(n) +} + +// TableRefsClause represents table references clause in dml statement. +type TableRefsClause struct { + node + + TableRefs *Join +} + +// Accept implements Node Accept interface. +func (n *TableRefsClause) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*TableRefsClause) + node, ok := n.TableRefs.Accept(v) + if !ok { + return n, false + } + n.TableRefs = node.(*Join) + return v.Leave(n) +} + +// ByItem represents an item in order by or group by. +type ByItem struct { + node + + Expr ExprNode + Desc bool +} + +// Accept implements Node Accept interface. +func (n *ByItem) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*ByItem) + node, ok := n.Expr.Accept(v) + if !ok { + return n, false + } + n.Expr = node.(ExprNode) + return v.Leave(n) +} + +// GroupByClause represents group by clause. +type GroupByClause struct { + node + Items []*ByItem +} + +// Accept implements Node Accept interface. +func (n *GroupByClause) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*GroupByClause) + for i, val := range n.Items { + node, ok := val.Accept(v) + if !ok { + return n, false + } + n.Items[i] = node.(*ByItem) + } + return v.Leave(n) +} + +// HavingClause represents having clause. +type HavingClause struct { + node + Expr ExprNode +} + +// Accept implements Node Accept interface. +func (n *HavingClause) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*HavingClause) + node, ok := n.Expr.Accept(v) + if !ok { + return n, false + } + n.Expr = node.(ExprNode) + return v.Leave(n) +} + +// OrderByClause represents order by clause. +type OrderByClause struct { + node + Items []*ByItem + ForUnion bool +} + +// Accept implements Node Accept interface. +func (n *OrderByClause) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*OrderByClause) + for i, val := range n.Items { + node, ok := val.Accept(v) + if !ok { + return n, false + } + n.Items[i] = node.(*ByItem) + } + return v.Leave(n) +} + +// SelectStmt represents the select query node. +// See https://dev.mysql.com/doc/refman/5.7/en/select.html +type SelectStmt struct { + dmlNode + + // SelectStmtOpts wraps around select hints and switches. + *SelectStmtOpts + // Distinct represents whether the select has distinct option. + Distinct bool + // From is the from clause of the query. + From *TableRefsClause + // Where is the where clause in select statement. + Where ExprNode + // Fields is the select expression list. + Fields *FieldList + // GroupBy is the group by expression list. + GroupBy *GroupByClause + // Having is the having condition. + Having *HavingClause + // OrderBy is the ordering expression list. + OrderBy *OrderByClause + // Limit is the limit clause. + Limit *Limit + // TableHints represents the table level Optimizer Hint for join type + TableHints []*TableOptimizerHint + // IsInBraces indicates whether it's a stmt in brace. + IsInBraces bool +} + +// Accept implements Node Accept interface. +func (n *SelectStmt) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + + n = newNode.(*SelectStmt) + if n.TableHints != nil && len(n.TableHints) != 0 { + newHints := make([]*TableOptimizerHint, len(n.TableHints)) + for i, hint := range n.TableHints { + node, ok := hint.Accept(v) + if !ok { + return n, false + } + newHints[i] = node.(*TableOptimizerHint) + } + n.TableHints = newHints + } + + if n.Fields != nil { + node, ok := n.Fields.Accept(v) + if !ok { + return n, false + } + n.Fields = node.(*FieldList) + } + + if n.From != nil { + node, ok := n.From.Accept(v) + if !ok { + return n, false + } + n.From = node.(*TableRefsClause) + } + + if n.Where != nil { + node, ok := n.Where.Accept(v) + if !ok { + return n, false + } + n.Where = node.(ExprNode) + } + + if n.GroupBy != nil { + node, ok := n.GroupBy.Accept(v) + if !ok { + return n, false + } + n.GroupBy = node.(*GroupByClause) + } + + if n.Having != nil { + node, ok := n.Having.Accept(v) + if !ok { + return n, false + } + n.Having = node.(*HavingClause) + } + + if n.OrderBy != nil { + node, ok := n.OrderBy.Accept(v) + if !ok { + return n, false + } + n.OrderBy = node.(*OrderByClause) + } + + if n.Limit != nil { + node, ok := n.Limit.Accept(v) + if !ok { + return n, false + } + n.Limit = node.(*Limit) + } + + return v.Leave(n) +} + +// Assignment is the expression for assignment, like a = 1. +type Assignment struct { + node + // Column is the column name to be assigned. + Column *ColumnName + // Expr is the expression assigning to ColName. + Expr ExprNode +} + +// Accept implements Node Accept interface. +func (n *Assignment) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*Assignment) + node, ok := n.Column.Accept(v) + if !ok { + return n, false + } + n.Column = node.(*ColumnName) + node, ok = n.Expr.Accept(v) + if !ok { + return n, false + } + n.Expr = node.(ExprNode) + return v.Leave(n) +} + +type ColumnNameOrUserVar struct { + ColumnName *ColumnName + UserVar *VariableExpr +} + +// InsertStmt is a statement to insert new rows into an existing table. +// See https://dev.mysql.com/doc/refman/5.7/en/insert.html +type InsertStmt struct { + dmlNode + + IsReplace bool + Table *TableRefsClause + Columns []*ColumnName + Lists [][]ExprNode + Setlist []*Assignment + Priority mysql.PriorityEnum + Select ResultSetNode +} + +// Accept implements Node Accept interface. +func (n *InsertStmt) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + + n = newNode.(*InsertStmt) + if n.Select != nil { + node, ok := n.Select.Accept(v) + if !ok { + return n, false + } + n.Select = node.(ResultSetNode) + } + + node, ok := n.Table.Accept(v) + if !ok { + return n, false + } + n.Table = node.(*TableRefsClause) + + for i, val := range n.Columns { + node, ok := val.Accept(v) + if !ok { + return n, false + } + n.Columns[i] = node.(*ColumnName) + } + for i, list := range n.Lists { + for j, val := range list { + node, ok := val.Accept(v) + if !ok { + return n, false + } + n.Lists[i][j] = node.(ExprNode) + } + } + for i, val := range n.Setlist { + node, ok := val.Accept(v) + if !ok { + return n, false + } + n.Setlist[i] = node.(*Assignment) + } + return v.Leave(n) +} + +// DeleteStmt is a statement to delete rows from table. +// See https://dev.mysql.com/doc/refman/5.7/en/delete.html +type DeleteStmt struct { + dmlNode + + // TableRefs is used in both single table and multiple table delete statement. + TableRefs *TableRefsClause + Where ExprNode + Order *OrderByClause + Limit *Limit + Priority mysql.PriorityEnum + Quick bool +} + +// Accept implements Node Accept interface. +func (n *DeleteStmt) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + + n = newNode.(*DeleteStmt) + node, ok := n.TableRefs.Accept(v) + if !ok { + return n, false + } + n.TableRefs = node.(*TableRefsClause) + + if n.Where != nil { + node, ok = n.Where.Accept(v) + if !ok { + return n, false + } + n.Where = node.(ExprNode) + } + if n.Order != nil { + node, ok = n.Order.Accept(v) + if !ok { + return n, false + } + n.Order = node.(*OrderByClause) + } + if n.Limit != nil { + node, ok = n.Limit.Accept(v) + if !ok { + return n, false + } + n.Limit = node.(*Limit) + } + return v.Leave(n) +} + +// Limit is the limit clause. +type Limit struct { + node + + Count ExprNode + Offset ExprNode +} + +// Accept implements Node Accept interface. +func (n *Limit) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + if n.Count != nil { + node, ok := n.Count.Accept(v) + if !ok { + return n, false + } + n.Count = node.(ExprNode) + } + if n.Offset != nil { + node, ok := n.Offset.Accept(v) + if !ok { + return n, false + } + n.Offset = node.(ExprNode) + } + + n = newNode.(*Limit) + return v.Leave(n) +} + +// ShowStmtType is the type for SHOW statement. +type ShowStmtType int + +// Show statement types. +const ( + ShowNone = iota + ShowDatabases + ShowTables + ShowWarnings + ShowVariables + ShowCreateTable + ShowProcessList + ShowCreateDatabase + ShowErrors +) + +// ShowStmt is a statement to provide information about databases, tables, columns and so on. +// See https://dev.mysql.com/doc/refman/5.7/en/show.html +type ShowStmt struct { + dmlNode + + Tp ShowStmtType // Databases/Tables/Columns/.... + DBName string + Table *TableName // Used for showing columns. + Column *ColumnName // Used for `desc table column`. + IndexName model.CIStr + Flag int // Some flag parsed from sql, such as FULL. + Full bool + IfNotExists bool // Used for `show create database if not exists` + Extended bool // Used for `show extended columns from ...` + + // GlobalScope is used by `show variables` and `show bindings` + GlobalScope bool + Where ExprNode +} + +// Accept implements Node Accept interface. +func (n *ShowStmt) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*ShowStmt) + if n.Table != nil { + node, ok := n.Table.Accept(v) + if !ok { + return n, false + } + n.Table = node.(*TableName) + } + if n.Column != nil { + node, ok := n.Column.Accept(v) + if !ok { + return n, false + } + n.Column = node.(*ColumnName) + } + + if n.Where != nil { + node, ok := n.Where.Accept(v) + if !ok { + return n, false + } + n.Where = node.(ExprNode) + } + return v.Leave(n) +} diff --git a/parser/ast/dml_test.go b/parser/ast/dml_test.go new file mode 100644 index 0000000..0148c99 --- /dev/null +++ b/parser/ast/dml_test.go @@ -0,0 +1,63 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast_test + +import ( + . "github.com/pingcap/check" + . "github.com/pingcap/tidb/parser/ast" +) + +var _ = Suite(&testDMLSuite{}) + +type testDMLSuite struct { +} + +func (ts *testDMLSuite) TestDMLVisitorCover(c *C) { + ce := &checkExpr{} + + tableRefsClause := &TableRefsClause{TableRefs: &Join{Left: &TableSource{Source: &TableName{}}, On: &OnCondition{Expr: ce}}} + + stmts := []struct { + node Node + expectedEnterCnt int + expectedLeaveCnt int + }{ + {&Assignment{Column: &ColumnName{}, Expr: ce}, 1, 1}, + {&ByItem{Expr: ce}, 1, 1}, + {&GroupByClause{Items: []*ByItem{{Expr: ce}, {Expr: ce}}}, 2, 2}, + {&HavingClause{Expr: ce}, 1, 1}, + {&Join{Left: &TableSource{Source: &TableName{}}}, 0, 0}, + {&Limit{Count: ce, Offset: ce}, 2, 2}, + {&OnCondition{Expr: ce}, 1, 1}, + {&OrderByClause{Items: []*ByItem{{Expr: ce}, {Expr: ce}}}, 2, 2}, + {&SelectField{Expr: ce, WildCard: &WildCardField{}}, 1, 1}, + {&TableName{}, 0, 0}, + {tableRefsClause, 1, 1}, + {&TableSource{Source: &TableName{}}, 0, 0}, + {&WildCardField{}, 0, 0}, + + // TODO: cover childrens + {&InsertStmt{Table: tableRefsClause}, 1, 1}, + {&SelectStmt{}, 0, 0}, + {&FieldList{}, 0, 0}, + } + + for _, v := range stmts { + ce.reset() + v.node.Accept(checkVisitor{}) + c.Check(ce.enterCnt, Equals, v.expectedEnterCnt) + c.Check(ce.leaveCnt, Equals, v.expectedLeaveCnt) + v.node.Accept(visitor1{}) + } +} diff --git a/parser/ast/expressions.go b/parser/ast/expressions.go new file mode 100644 index 0000000..b6e54b0 --- /dev/null +++ b/parser/ast/expressions.go @@ -0,0 +1,500 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import ( + "fmt" + "io" + "strings" + + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/opcode" +) + +var ( + _ ExprNode = &BetweenExpr{} + _ ExprNode = &BinaryOperationExpr{} + _ ExprNode = &ColumnNameExpr{} + _ ExprNode = &DefaultExpr{} + _ ExprNode = &IsNullExpr{} + _ ExprNode = &ParenthesesExpr{} + _ ExprNode = &PatternInExpr{} + _ ExprNode = &RowExpr{} + _ ExprNode = &UnaryOperationExpr{} + _ ExprNode = &ValuesExpr{} + _ ExprNode = &VariableExpr{} + + _ Node = &ColumnName{} +) + +// ValueExpr define a interface for ValueExpr. +type ValueExpr interface { + ExprNode + SetValue(val interface{}) + GetValue() interface{} + GetDatumString() string + GetString() string + GetProjectionOffset() int + SetProjectionOffset(offset int) +} + +// NewValueExpr creates a ValueExpr with value, and sets default field type. +var NewValueExpr func(interface{}) ValueExpr + +// BetweenExpr is for "between and" or "not between and" expression. +type BetweenExpr struct { + exprNode + // Expr is the expression to be checked. + Expr ExprNode + // Left is the expression for minimal value in the range. + Left ExprNode + // Right is the expression for maximum value in the range. + Right ExprNode + // Not is true, the expression is "not between and". + Not bool +} + +// Format the ExprNode into a Writer. +func (n *BetweenExpr) Format(w io.Writer) { + n.Expr.Format(w) + if n.Not { + fmt.Fprint(w, " NOT BETWEEN ") + } else { + fmt.Fprint(w, " BETWEEN ") + } + n.Left.Format(w) + fmt.Fprint(w, " AND ") + n.Right.Format(w) +} + +// Accept implements Node interface. +func (n *BetweenExpr) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + + n = newNode.(*BetweenExpr) + node, ok := n.Expr.Accept(v) + if !ok { + return n, false + } + n.Expr = node.(ExprNode) + + node, ok = n.Left.Accept(v) + if !ok { + return n, false + } + n.Left = node.(ExprNode) + + node, ok = n.Right.Accept(v) + if !ok { + return n, false + } + n.Right = node.(ExprNode) + + return v.Leave(n) +} + +// BinaryOperationExpr is for binary operation like `1 + 1`, `1 - 1`, etc. +type BinaryOperationExpr struct { + exprNode + // Op is the operator code for BinaryOperation. + Op opcode.Op + // L is the left expression in BinaryOperation. + L ExprNode + // R is the right expression in BinaryOperation. + R ExprNode +} + +// Format the ExprNode into a Writer. +func (n *BinaryOperationExpr) Format(w io.Writer) { + n.L.Format(w) + fmt.Fprint(w, " ") + n.Op.Format(w) + fmt.Fprint(w, " ") + n.R.Format(w) +} + +// Accept implements Node interface. +func (n *BinaryOperationExpr) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + + n = newNode.(*BinaryOperationExpr) + node, ok := n.L.Accept(v) + if !ok { + return n, false + } + n.L = node.(ExprNode) + + node, ok = n.R.Accept(v) + if !ok { + return n, false + } + n.R = node.(ExprNode) + + return v.Leave(n) +} + +// ColumnName represents column name. +type ColumnName struct { + node + Schema model.CIStr + Table model.CIStr + Name model.CIStr +} + +// Accept implements Node Accept interface. +func (n *ColumnName) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*ColumnName) + return v.Leave(n) +} + +// String implements Stringer interface. +func (n *ColumnName) String() string { + result := n.Name.L + if n.Table.L != "" { + result = n.Table.L + "." + result + } + if n.Schema.L != "" { + result = n.Schema.L + "." + result + } + return result +} + +// OrigColName returns the full original column name. +func (n *ColumnName) OrigColName() (ret string) { + ret = n.Name.O + if n.Table.O == "" { + return + } + ret = n.Table.O + "." + ret + if n.Schema.O == "" { + return + } + ret = n.Schema.O + "." + ret + return +} + +// ColumnNameExpr represents a column name expression. +type ColumnNameExpr struct { + exprNode + + // Name is the referenced column name. + Name *ColumnName + + // Refer is the result field the column name refers to. + // The value of Refer.Expr is used as the value of the expression. + Refer *ResultField +} + +// Format the ExprNode into a Writer. +func (n *ColumnNameExpr) Format(w io.Writer) { + name := strings.Replace(n.Name.String(), ".", "`.`", -1) + fmt.Fprintf(w, "`%s`", name) +} + +// Accept implements Node Accept interface. +func (n *ColumnNameExpr) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*ColumnNameExpr) + node, ok := n.Name.Accept(v) + if !ok { + return n, false + } + n.Name = node.(*ColumnName) + return v.Leave(n) +} + +// DefaultExpr is the default expression using default value for a column. +type DefaultExpr struct { + exprNode + // Name is the column name. + Name *ColumnName +} + +// Format the ExprNode into a Writer. +func (n *DefaultExpr) Format(w io.Writer) { + panic("Not implemented") +} + +// Accept implements Node Accept interface. +func (n *DefaultExpr) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*DefaultExpr) + if n.Name != nil { + node, ok := n.Name.Accept(v) + if !ok { + return n, false + } + n.Name = node.(*ColumnName) + } + return v.Leave(n) +} + +// PatternInExpr is the expression for in operator, like "expr in (1, 2, 3)" or "expr in (select c from t)". +type PatternInExpr struct { + exprNode + // Expr is the value expression to be compared. + Expr ExprNode + // List is the list expression in compare list. + List []ExprNode + // Not is true, the expression is "not in". + Not bool +} + +// Format the ExprNode into a Writer. +func (n *PatternInExpr) Format(w io.Writer) { + n.Expr.Format(w) + if n.Not { + fmt.Fprint(w, " NOT IN (") + } else { + fmt.Fprint(w, " IN (") + } + for i, expr := range n.List { + if i != 0 { + fmt.Fprint(w, ",") + } + expr.Format(w) + } + fmt.Fprint(w, ")") +} + +// Accept implements Node Accept interface. +func (n *PatternInExpr) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*PatternInExpr) + node, ok := n.Expr.Accept(v) + if !ok { + return n, false + } + n.Expr = node.(ExprNode) + for i, val := range n.List { + node, ok = val.Accept(v) + if !ok { + return n, false + } + n.List[i] = node.(ExprNode) + } + return v.Leave(n) +} + +// IsNullExpr is the expression for null check. +type IsNullExpr struct { + exprNode + // Expr is the expression to be checked. + Expr ExprNode + // Not is true, the expression is "is not null". + Not bool +} + +// Format the ExprNode into a Writer. +func (n *IsNullExpr) Format(w io.Writer) { + n.Expr.Format(w) + if n.Not { + fmt.Fprint(w, " IS NOT NULL") + return + } + fmt.Fprint(w, " IS NULL") +} + +// Accept implements Node Accept interface. +func (n *IsNullExpr) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*IsNullExpr) + node, ok := n.Expr.Accept(v) + if !ok { + return n, false + } + n.Expr = node.(ExprNode) + return v.Leave(n) +} + +// ParenthesesExpr is the parentheses expression. +type ParenthesesExpr struct { + exprNode + // Expr is the expression in parentheses. + Expr ExprNode +} + +// Format the ExprNode into a Writer. +func (n *ParenthesesExpr) Format(w io.Writer) { + fmt.Fprint(w, "(") + n.Expr.Format(w) + fmt.Fprint(w, ")") +} + +// Accept implements Node Accept interface. +func (n *ParenthesesExpr) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*ParenthesesExpr) + if n.Expr != nil { + node, ok := n.Expr.Accept(v) + if !ok { + return n, false + } + n.Expr = node.(ExprNode) + } + return v.Leave(n) +} + +// RowExpr is the expression for row constructor. +// See https://dev.mysql.com/doc/refman/5.7/en/row-subqueries.html +type RowExpr struct { + exprNode + + Values []ExprNode +} + +// Format the ExprNode into a Writer. +func (n *RowExpr) Format(w io.Writer) { + panic("Not implemented") +} + +// Accept implements Node Accept interface. +func (n *RowExpr) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*RowExpr) + for i, val := range n.Values { + node, ok := val.Accept(v) + if !ok { + return n, false + } + n.Values[i] = node.(ExprNode) + } + return v.Leave(n) +} + +// UnaryOperationExpr is the expression for unary operator. +type UnaryOperationExpr struct { + exprNode + // Op is the operator opcode. + Op opcode.Op + // V is the unary expression. + V ExprNode +} + +// Format the ExprNode into a Writer. +func (n *UnaryOperationExpr) Format(w io.Writer) { + n.Op.Format(w) + n.V.Format(w) +} + +// Accept implements Node Accept interface. +func (n *UnaryOperationExpr) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*UnaryOperationExpr) + node, ok := n.V.Accept(v) + if !ok { + return n, false + } + n.V = node.(ExprNode) + return v.Leave(n) +} + +// ValuesExpr is the expression used in INSERT VALUES. +type ValuesExpr struct { + exprNode + // Column is column name. + Column *ColumnNameExpr +} + +// Format the ExprNode into a Writer. +func (n *ValuesExpr) Format(w io.Writer) { + panic("Not implemented") +} + +// Accept implements Node Accept interface. +func (n *ValuesExpr) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*ValuesExpr) + node, ok := n.Column.Accept(v) + if !ok { + return n, false + } + // `node` may be *ast.ValueExpr, to avoid panic, we write `ok` but do not use + // it. + n.Column, _ = node.(*ColumnNameExpr) + return v.Leave(n) +} + +// VariableExpr is the expression for variable. +type VariableExpr struct { + exprNode + // Name is the variable name. + Name string + // IsGlobal indicates whether this variable is global. + IsGlobal bool + // IsSystem indicates whether this variable is a system variable in current session. + IsSystem bool + // ExplicitScope indicates whether this variable scope is set explicitly. + ExplicitScope bool + // Value is the variable value. + Value ExprNode +} + +// Format the ExprNode into a Writer. +func (n *VariableExpr) Format(w io.Writer) { + panic("Not implemented") +} + +// Accept implements Node Accept interface. +func (n *VariableExpr) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*VariableExpr) + if n.Value == nil { + return v.Leave(n) + } + + node, ok := n.Value.Accept(v) + if !ok { + return n, false + } + n.Value = node.(ExprNode) + return v.Leave(n) +} diff --git a/parser/ast/expressions_test.go b/parser/ast/expressions_test.go new file mode 100644 index 0000000..338e7ff --- /dev/null +++ b/parser/ast/expressions_test.go @@ -0,0 +1,93 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast_test + +import ( + . "github.com/pingcap/check" + . "github.com/pingcap/tidb/parser/ast" + _ "github.com/pingcap/tidb/types/parser_driver" +) + +var _ = Suite(&testExpressionsSuite{}) + +type testExpressionsSuite struct { +} + +type checkVisitor struct{} + +func (v checkVisitor) Enter(in Node) (Node, bool) { + if e, ok := in.(*checkExpr); ok { + e.enterCnt++ + return in, true + } + return in, false +} + +func (v checkVisitor) Leave(in Node) (Node, bool) { + if e, ok := in.(*checkExpr); ok { + e.leaveCnt++ + } + return in, true +} + +type checkExpr struct { + ValueExpr + + enterCnt int + leaveCnt int +} + +func (n *checkExpr) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*checkExpr) + return v.Leave(n) +} + +func (n *checkExpr) reset() { + n.enterCnt = 0 + n.leaveCnt = 0 +} + +func (tc *testExpressionsSuite) TestExpresionsVisitorCover(c *C) { + ce := &checkExpr{} + stmts := + []struct { + node Node + expectedEnterCnt int + expectedLeaveCnt int + }{ + {&BetweenExpr{Expr: ce, Left: ce, Right: ce}, 3, 3}, + {&BinaryOperationExpr{L: ce, R: ce}, 2, 2}, + {&ColumnNameExpr{Name: &ColumnName{}}, 0, 0}, + {&DefaultExpr{Name: &ColumnName{}}, 0, 0}, + {&IsNullExpr{Expr: ce}, 1, 1}, + {&ParenthesesExpr{Expr: ce}, 1, 1}, + {&RowExpr{Values: []ExprNode{ce, ce}}, 2, 2}, + {&UnaryOperationExpr{V: ce}, 1, 1}, + {NewValueExpr(0), 0, 0}, + {&ValuesExpr{Column: &ColumnNameExpr{Name: &ColumnName{}}}, 0, 0}, + {&VariableExpr{Value: ce}, 1, 1}, + } + + for _, v := range stmts { + ce.reset() + v.node.Accept(checkVisitor{}) + c.Check(ce.enterCnt, Equals, v.expectedEnterCnt) + c.Check(ce.leaveCnt, Equals, v.expectedLeaveCnt) + v.node.Accept(visitor1{}) + } +} diff --git a/parser/ast/flag.go b/parser/ast/flag.go new file mode 100644 index 0000000..95fcf90 --- /dev/null +++ b/parser/ast/flag.go @@ -0,0 +1,101 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +// HasAggFlag checks if the expr contains FlagHasAggregateFunc. +func HasAggFlag(expr ExprNode) bool { + return expr.GetFlag()&FlagHasAggregateFunc > 0 +} + +// SetFlag sets flag for expression. +func SetFlag(n Node) { + var setter flagSetter + n.Accept(&setter) +} + +type flagSetter struct { +} + +func (f *flagSetter) Enter(in Node) (Node, bool) { + return in, false +} + +func (f *flagSetter) Leave(in Node) (Node, bool) { + switch x := in.(type) { + case *AggregateFuncExpr: + f.aggregateFunc(x) + case *BetweenExpr: + x.SetFlag(x.Expr.GetFlag() | x.Left.GetFlag() | x.Right.GetFlag()) + case *BinaryOperationExpr: + x.SetFlag(x.L.GetFlag() | x.R.GetFlag()) + case *ColumnNameExpr: + x.SetFlag(FlagHasReference) + case *DefaultExpr: + x.SetFlag(FlagHasDefault) + case *FuncCallExpr: + f.funcCall(x) + case *IsNullExpr: + x.SetFlag(x.Expr.GetFlag()) + case *ParenthesesExpr: + x.SetFlag(x.Expr.GetFlag()) + case *PatternInExpr: + f.patternIn(x) + case *RowExpr: + f.row(x) + case *UnaryOperationExpr: + x.SetFlag(x.V.GetFlag()) + case *ValuesExpr: + x.SetFlag(FlagHasReference) + case *VariableExpr: + if x.Value == nil { + x.SetFlag(FlagHasVariable) + } else { + x.SetFlag(FlagHasVariable | x.Value.GetFlag()) + } + } + + return in, true +} + +func (f *flagSetter) patternIn(x *PatternInExpr) { + flag := x.Expr.GetFlag() + for _, val := range x.List { + flag |= val.GetFlag() + } + x.SetFlag(flag) +} + +func (f *flagSetter) row(x *RowExpr) { + var flag uint64 + for _, val := range x.Values { + flag |= val.GetFlag() + } + x.SetFlag(flag) +} + +func (f *flagSetter) funcCall(x *FuncCallExpr) { + flag := FlagHasFunc + for _, val := range x.Args { + flag |= val.GetFlag() + } + x.SetFlag(flag) +} + +func (f *flagSetter) aggregateFunc(x *AggregateFuncExpr) { + flag := FlagHasAggregateFunc + for _, val := range x.Args { + flag |= val.GetFlag() + } + x.SetFlag(flag) +} diff --git a/parser/ast/flag_test.go b/parser/ast/flag_test.go new file mode 100644 index 0000000..20217a8 --- /dev/null +++ b/parser/ast/flag_test.go @@ -0,0 +1,53 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast_test + +import ( + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser" + "github.com/pingcap/tidb/parser/ast" +) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + TestingT(t) +} + +var _ = Suite(&testFlagSuite{}) + +type testFlagSuite struct { + *parser.Parser +} + +func (ts *testFlagSuite) SetUpSuite(c *C) { + ts.Parser = parser.New() +} + +func (ts *testFlagSuite) TestHasAggFlag(c *C) { + expr := &ast.BetweenExpr{} + flagTests := []struct { + flag uint64 + hasAgg bool + }{ + {ast.FlagHasAggregateFunc, true}, + {ast.FlagHasAggregateFunc | ast.FlagHasVariable, true}, + {ast.FlagHasVariable, false}, + } + for _, tt := range flagTests { + expr.SetFlag(tt.flag) + c.Assert(ast.HasAggFlag(expr), Equals, tt.hasAgg) + } +} diff --git a/parser/ast/functions.go b/parser/ast/functions.go new file mode 100644 index 0000000..8bd639a --- /dev/null +++ b/parser/ast/functions.go @@ -0,0 +1,139 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import ( + "fmt" + "io" + + "github.com/pingcap/tidb/parser/model" +) + +var ( + _ FuncNode = &AggregateFuncExpr{} + _ FuncNode = &FuncCallExpr{} +) + +// List scalar function names. +const ( + IsNull = "isnull" + Length = "length" + Strcmp = "strcmp" + OctetLength = "octet_length" + If = "if" + Ifnull = "ifnull" + LogicAnd = "and" + LogicOr = "or" + GE = "ge" + LE = "le" + EQ = "eq" + NE = "ne" + LT = "lt" + GT = "gt" + Plus = "plus" + Minus = "minus" + Div = "div" + Mul = "mul" + UnaryNot = "not" + UnaryMinus = "unaryminus" + In = "in" + RowFunc = "row" + SetVar = "setvar" + GetVar = "getvar" + Values = "values" +) + +// FuncCallExpr is for function expression. +type FuncCallExpr struct { + funcNode + // FnName is the function name. + FnName model.CIStr + // Args is the function args. + Args []ExprNode +} + +// Format the ExprNode into a Writer. +func (n *FuncCallExpr) Format(w io.Writer) { + fmt.Fprintf(w, "%s(", n.FnName.L) + for i, arg := range n.Args { + arg.Format(w) + if i != len(n.Args)-1 { + fmt.Fprint(w, ", ") + } + } + fmt.Fprint(w, ")") +} + +// Accept implements Node interface. +func (n *FuncCallExpr) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*FuncCallExpr) + for i, val := range n.Args { + node, ok := val.Accept(v) + if !ok { + return n, false + } + n.Args[i] = node.(ExprNode) + } + return v.Leave(n) +} + +const ( + // AggFuncCount is the name of Count function. + AggFuncCount = "count" + // AggFuncSum is the name of Sum function. + AggFuncSum = "sum" + // AggFuncAvg is the name of Avg function. + AggFuncAvg = "avg" + // AggFuncFirstRow is the name of FirstRowColumn function. + AggFuncFirstRow = "firstrow" + // AggFuncMax is the name of max function. + AggFuncMax = "max" + // AggFuncMin is the name of min function. + AggFuncMin = "min" +) + +// AggregateFuncExpr represents aggregate function expression. +type AggregateFuncExpr struct { + funcNode + // F is the function name. + F string + // Args is the function args. + Args []ExprNode +} + +// Format the ExprNode into a Writer. +func (n *AggregateFuncExpr) Format(w io.Writer) { + panic("Not implemented") +} + +// Accept implements Node Accept interface. +func (n *AggregateFuncExpr) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*AggregateFuncExpr) + for i, val := range n.Args { + node, ok := val.Accept(v) + if !ok { + return n, false + } + n.Args[i] = node.(ExprNode) + } + return v.Leave(n) +} diff --git a/parser/ast/functions_test.go b/parser/ast/functions_test.go new file mode 100644 index 0000000..a0697f8 --- /dev/null +++ b/parser/ast/functions_test.go @@ -0,0 +1,37 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast_test + +import ( + . "github.com/pingcap/check" + . "github.com/pingcap/tidb/parser/ast" +) + +var _ = Suite(&testFunctionsSuite{}) + +type testFunctionsSuite struct { +} + +func (ts *testFunctionsSuite) TestFunctionsVisitorCover(c *C) { + valueExpr := NewValueExpr(42) + stmts := []Node{ + &AggregateFuncExpr{Args: []ExprNode{valueExpr}}, + &FuncCallExpr{Args: []ExprNode{valueExpr}}, + } + + for _, stmt := range stmts { + stmt.Accept(visitor{}) + stmt.Accept(visitor1{}) + } +} diff --git a/parser/ast/misc.go b/parser/ast/misc.go new file mode 100644 index 0000000..11ad36b --- /dev/null +++ b/parser/ast/misc.go @@ -0,0 +1,313 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import ( + "fmt" + + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" +) + +var ( + _ StmtNode = &AdminStmt{} + _ StmtNode = &BeginStmt{} + _ StmtNode = &CommitStmt{} + _ StmtNode = &ExplainStmt{} + _ StmtNode = &RollbackStmt{} + _ StmtNode = &SetStmt{} + _ StmtNode = &UseStmt{} + + _ Node = &VariableAssignment{} +) + +const ( + // Valid formats for explain statement. + ExplainFormatROW = "row" + ExplainFormatDOT = "dot" +) + +var ( + // ExplainFormats stores the valid formats for explain statement, used by validator. + ExplainFormats = []string{ + ExplainFormatROW, + ExplainFormatDOT, + } +) + +// TypeOpt is used for parsing data type option from SQL. +type TypeOpt struct { + IsUnsigned bool + IsZerofill bool +} + +// FloatOpt is used for parsing floating-point type option from SQL. +// See http://dev.mysql.com/doc/refman/5.7/en/floating-point-types.html +type FloatOpt struct { + Flen int + Decimal int +} + +// ExplainStmt is a statement to provide information about how is SQL statement executed +// or get columns information in a table. +// See https://dev.mysql.com/doc/refman/5.7/en/explain.html +type ExplainStmt struct { + stmtNode + + Stmt StmtNode + Format string +} + +// Accept implements Node Accept interface. +func (n *ExplainStmt) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*ExplainStmt) + node, ok := n.Stmt.Accept(v) + if !ok { + return n, false + } + n.Stmt = node.(DMLNode) + return v.Leave(n) +} + +// BeginStmt is a statement to start a new transaction. +// See https://dev.mysql.com/doc/refman/5.7/en/commit.html +type BeginStmt struct { + stmtNode +} + +// Accept implements Node Accept interface. +func (n *BeginStmt) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + return v.Leave(n) +} + +// CommitStmt is a statement to commit the current transaction. +// See https://dev.mysql.com/doc/refman/5.7/en/commit.html +type CommitStmt struct { + stmtNode +} + +// Accept implements Node Accept interface. +func (n *CommitStmt) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*CommitStmt) + return v.Leave(n) +} + +// RollbackStmt is a statement to roll back the current transaction. +// See https://dev.mysql.com/doc/refman/5.7/en/commit.html +type RollbackStmt struct { + stmtNode +} + +// Accept implements Node Accept interface. +func (n *RollbackStmt) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*RollbackStmt) + return v.Leave(n) +} + +// UseStmt is a statement to use the DBName database as the current database. +// See https://dev.mysql.com/doc/refman/5.7/en/use.html +type UseStmt struct { + stmtNode + + DBName string +} + +// Accept implements Node Accept interface. +func (n *UseStmt) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*UseStmt) + return v.Leave(n) +} + +// VariableAssignment is a variable assignment struct. +type VariableAssignment struct { + node + Name string + Value ExprNode + IsGlobal bool + IsSystem bool +} + +// Accept implements Node interface. +func (n *VariableAssignment) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*VariableAssignment) + node, ok := n.Value.Accept(v) + if !ok { + return n, false + } + n.Value = node.(ExprNode) + return v.Leave(n) +} + +// SetStmt is the statement to set variables. +type SetStmt struct { + stmtNode + // Variables is the list of variable assignment. + Variables []*VariableAssignment +} + +// Accept implements Node Accept interface. +func (n *SetStmt) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*SetStmt) + for i, val := range n.Variables { + node, ok := val.Accept(v) + if !ok { + return n, false + } + n.Variables[i] = node.(*VariableAssignment) + } + return v.Leave(n) +} + +// AdminStmtType is the type for admin statement. +type AdminStmtType int + +// Admin statement types. +const ( + AdminShowDDL = iota + 1 + AdminShowDDLJobs +) + +// AdminStmt is the struct for Admin statement. +type AdminStmt struct { + stmtNode + + Tp AdminStmtType + Tables []*TableName + JobNumber int64 + Where ExprNode +} + +// Accept implements Node Accept interface. +func (n *AdminStmt) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + + n = newNode.(*AdminStmt) + for i, val := range n.Tables { + node, ok := val.Accept(v) + if !ok { + return n, false + } + n.Tables[i] = node.(*TableName) + } + + return v.Leave(n) +} + +// Ident is the table identifier composed of schema name and table name. +type Ident struct { + Schema model.CIStr + Name model.CIStr +} + +// String implements fmt.Stringer interface. +func (i Ident) String() string { + if i.Schema.O == "" { + return i.Name.O + } + return fmt.Sprintf("%s.%s", i.Schema, i.Name) +} + +// SelectStmtOpts wrap around select hints and switches +type SelectStmtOpts struct { + Distinct bool + SQLBigResult bool + SQLBufferResult bool + SQLCache bool + SQLSmallResult bool + CalcFoundRows bool + StraightJoin bool + Priority mysql.PriorityEnum + TableHints []*TableOptimizerHint +} + +// TableOptimizerHint is Table level optimizer hint +type TableOptimizerHint struct { + node + // HintName is the name or alias of the table(s) which the hint will affect. + // Table hints has no schema info + // It allows only table name or alias (if table has an alias) + HintName model.CIStr + // QBName is the default effective query block of this hint. + QBName model.CIStr + Tables []HintTable + Indexes []model.CIStr + StoreType model.CIStr + // Statement Execution Time Optimizer Hints + // See https://dev.mysql.com/doc/refman/5.7/en/optimizer-hints.html#optimizer-hints-execution-time + MaxExecutionTime uint64 + MemoryQuota int64 + QueryType model.CIStr + HintFlag bool +} + +// HintTable is table in the hint. It may have query block info. +type HintTable struct { + DBName model.CIStr + TableName model.CIStr + QBName model.CIStr +} + +// Accept implements Node Accept interface. +func (n *TableOptimizerHint) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*TableOptimizerHint) + return v.Leave(n) +} + +type BinaryLiteral interface { + ToString() string +} + +// NewDecimal creates a types.Decimal value, it's provided by parser driver. +var NewDecimal func(string) (interface{}, error) + +// NewHexLiteral creates a types.HexLiteral value, it's provided by parser driver. +var NewHexLiteral func(string) (interface{}, error) + +// NewBitLiteral creates a types.BitLiteral value, it's provided by parser driver. +var NewBitLiteral func(string) (interface{}, error) diff --git a/parser/ast/misc_test.go b/parser/ast/misc_test.go new file mode 100644 index 0000000..ae41211 --- /dev/null +++ b/parser/ast/misc_test.go @@ -0,0 +1,59 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast_test + +import ( + . "github.com/pingcap/check" + . "github.com/pingcap/tidb/parser/ast" +) + +var _ = Suite(&testMiscSuite{}) + +type testMiscSuite struct { +} + +type visitor struct{} + +func (v visitor) Enter(in Node) (Node, bool) { + return in, false +} + +func (v visitor) Leave(in Node) (Node, bool) { + return in, true +} + +type visitor1 struct { + visitor +} + +func (visitor1) Enter(in Node) (Node, bool) { + return in, true +} + +func (ts *testMiscSuite) TestSensitiveStatement(c *C) { + negative := []StmtNode{ + &AlterTableStmt{}, + &CreateDatabaseStmt{}, + &CreateIndexStmt{}, + &CreateTableStmt{}, + &DropDatabaseStmt{}, + &DropIndexStmt{}, + &DropTableStmt{}, + &TruncateTableStmt{}, + } + for _, stmt := range negative { + _, ok := stmt.(SensitiveStmtNode) + c.Assert(ok, IsFalse) + } +} diff --git a/parser/ast/stats.go b/parser/ast/stats.go new file mode 100644 index 0000000..2392c9c --- /dev/null +++ b/parser/ast/stats.go @@ -0,0 +1,42 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +var ( + _ StmtNode = &AnalyzeTableStmt{} +) + +// AnalyzeTableStmt is used to create table statistics. +type AnalyzeTableStmt struct { + stmtNode + + TableNames []*TableName +} + +// Accept implements Node Accept interface. +func (n *AnalyzeTableStmt) Accept(v Visitor) (Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*AnalyzeTableStmt) + for i, val := range n.TableNames { + node, ok := val.Accept(v) + if !ok { + return n, false + } + n.TableNames[i] = node.(*TableName) + } + return v.Leave(n) +} diff --git a/parser/ast/util.go b/parser/ast/util.go new file mode 100644 index 0000000..4501086 --- /dev/null +++ b/parser/ast/util.go @@ -0,0 +1,57 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +// IsReadOnly checks whether the input ast is readOnly. +func IsReadOnly(node Node) bool { + switch st := node.(type) { + case *SelectStmt: + checker := readOnlyChecker{ + readOnly: true, + } + + node.Accept(&checker) + return checker.readOnly + case *ExplainStmt: + return IsReadOnly(st.Stmt) + default: + return false + } +} + +// readOnlyChecker checks whether a query's ast is readonly, if it satisfied +// 1. selectstmt; +// 2. need not to set var; +// it is readonly statement. +type readOnlyChecker struct { + readOnly bool +} + +// Enter implements Visitor interface. +func (checker *readOnlyChecker) Enter(in Node) (out Node, skipChildren bool) { + switch node := in.(type) { + case *VariableExpr: + // like func rewriteVariable(), this stands for SetVar. + if !node.IsSystem && node.Value != nil { + checker.readOnly = false + return in, true + } + } + return in, false +} + +// Leave implements Visitor interface. +func (checker *readOnlyChecker) Leave(in Node) (out Node, ok bool) { + return in, checker.readOnly +} diff --git a/parser/bench_test.go b/parser/bench_test.go new file mode 100644 index 0000000..8057b9f --- /dev/null +++ b/parser/bench_test.go @@ -0,0 +1,66 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + "testing" +) + +func BenchmarkSysbenchSelect(b *testing.B) { + parser := New() + sql := "SELECT pad FROM sbtest1 WHERE id=1;" + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, err := parser.Parse(sql, "", "") + if err != nil { + b.Fatal(err) + } + } + b.ReportAllocs() +} + +func BenchmarkParseComplex(b *testing.B) { + var table = []string{ + `SELECT DISTINCT ca.l9_convergence_code AS atb2, cu.cust_sub_type AS account_type, cst.description AS account_type_desc, ss.prim_resource_val AS msisdn, ca.ban AS ban_key, To_char(mo.memo_date, 'YYYYMMDD') AS memo_date, cu.l9_identification AS thai_id, ss.subscriber_no AS subs_key, ss.dealer_code AS shop_code, cd.description AS shop_name, mot.short_desc, Regexp_substr(mo.attr1value, '[^ ;]+', 1, 3) staff_id, mo.operator_id AS user_id, mo.memo_system_text, co2.soc_name AS first_socname, co3.soc_name AS previous_socname, co.soc_name AS current_socname, Regexp_substr(mo.attr1value, '[^ ; ]+', 1, 1) NAME, co.soc_description AS current_pp_desc, co3.soc_description AS prev_pp_desc, co.soc_cd AS soc_cd, ( SELECT Sum(br.amount) FROM bl1_rc_rates BR, customer CU, subscriber SS WHERE br.service_receiver_id = ss.subscriber_no AND br.receiver_customer = ss.customer_id AND br.effective_date <= br.expiration_date AND (( ss. sub_status <> 'C' AND ss. sub_status <> 'T' AND br.expiration_date IS NULL) OR ( ss. sub_status = 'C' AND br.expiration_date LIKE ss.effective_date)) AND br.pp_ind = 'Y' AND br.cycle_code = cu.bill_cycle) AS pp_rate, cu.bill_cycle AS cycle_code, To_char(Nvl(ss.l9_tmv_act_date, ss.init_act_date),'YYYYMMDD') AS activated_date, To_char(cd.effective_date, 'YYYYMMDD') AS shop_effective_date, cd.expiration_date AS shop_expired_date, ca.l9_company_code AS company_code FROM service_details S, product CO, csm_pay_channel CPC, account CA, subscriber SS, customer CU, customer_sub_type CST, csm_dealer CD, service_details S2, product CO2, service_details S3, product CO3, memo MO , memo_type MOT, logical_date LO, charge_details CHD WHERE ss.subscriber_no = chd.agreement_no AND cpc.pym_channel_no = chd.target_pcn AND chd.chg_split_type = 'DR' AND chd.expiration_date IS NULL AND s.soc = co.soc_cd AND co.soc_type = 'P' AND s.agreement_no = ss.subscriber_no AND ss.prim_resource_tp = 'C' AND cpc.payment_category = 'POST' AND ca.ban = cpc.ban AND ( ca.l9_company_code = 'RF' OR ca.l9_company_code = 'RM' OR ca.l9_company_code = 'TM') AND ss.customer_id = cu.customer_id AND cu.cust_sub_type = cst.cust_sub_type AND cu.customer_type = cst.customer_type AND ss.dealer_code = cd.dealer AND s2.effective_date= ( SELECT Max(sa1.effective_date) FROM service_details SA1, product o1 WHERE sa1.agreement_no = ss.subscriber_no AND co.soc_cd = sa1.soc AND co.soc_type = 'P' ) AND s2.agreement_no = s.agreement_no AND s2.soc = co2.soc_cd AND co2.soc_type = 'P' AND s2.effective_date = ( SELECT Min(sa1.effective_date) FROM service_details SA1, product o1 WHERE sa1.agreement_no = ss.subscriber_no AND co2.soc_cd = sa1.soc AND co.soc_type = 'P' ) AND s3.agreement_no = s.agreement_no AND s3.soc = co3.soc_cd AND co3.soc_type = 'P' AND s3.effective_date = ( SELECT Max(sa1.effective_date) FROM service_details SA1, a product o1 WHERE sa1.agreement_no = ss.subscriber_no AND sa1.effective_date < ( SELECT Max(sa1.effective_date) FROM service_details SA1, product o1 WHERE sa1.agreement_no = ss.subscriber_no AND co3.soc_cd = sa1.soc AND co3.soc_type = 'P' ) AND co3.soc_cd = sa1.soc AND o1.soc_type = 'P' ) AND mo.entity_id = ss.subscriber_no AND mo.entity_type_id = 6 AND mo.memo_type_id = mot.memo_type_id AND Trunc(mo.sys_creation_date) = ( SELECT Trunc(lo.logical_date - 1) FROM lo) trunc(lo.logical_date - 1) AND lo.expiration_date IS NULL AND lo.logical_date_type = 'B' AND lo.expiration_date IS NULL AND ( mot.short_desc = 'BCN' OR mot.short_desc = 'BCNM' )`} + parser := New() + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, v := range table { + _, _, err := parser.Parse(v, "", "") + if err != nil { + b.Failed() + } + } + } + b.ReportAllocs() +} + +func BenchmarkParseSimple(b *testing.B) { + var table = []string{ + "insert into t values (1), (2), (3)", + "insert into t values (4), (5), (6), (7)", + "select c from t where c > 2", + } + parser := New() + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, v := range table { + _, _, err := parser.Parse(v, "", "") + if err != nil { + b.Failed() + } + } + } + b.ReportAllocs() +} diff --git a/parser/charset/charset.go b/parser/charset/charset.go new file mode 100644 index 0000000..ef947eb --- /dev/null +++ b/parser/charset/charset.go @@ -0,0 +1,458 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package charset + +import ( + "strings" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" +) + +var ( + ErrUnknownCollation = terror.ClassDDL.New(mysql.ErrUnknownCollation, mysql.MySQLErrName[mysql.ErrUnknownCollation]) + ErrCollationCharsetMismatch = terror.ClassDDL.New(mysql.ErrCollationCharsetMismatch, mysql.MySQLErrName[mysql.ErrCollationCharsetMismatch]) +) + +// Charset is a charset. +// Now we only support MySQL. +type Charset struct { + Name string + DefaultCollation string + Collations map[string]*Collation + Desc string + Maxlen int +} + +// Collation is a collation. +// Now we only support MySQL. +type Collation struct { + ID int + CharsetName string + Name string + IsDefault bool +} + +var charsets = make(map[string]*Charset) +var collationsIDMap = make(map[int]*Collation) +var collationsNameMap = make(map[string]*Collation) +var descs = make([]*Desc, 0, len(charsetInfos)) +var supportedCollations = make([]*Collation, 0, len(supportedCollationNames)) + +// All the supported charsets should be in the following table. +var charsetInfos = []*Charset{ + {CharsetUTF8, CollationUTF8, make(map[string]*Collation), "UTF-8 Unicode", 3}, + {CharsetUTF8MB4, CollationUTF8MB4, make(map[string]*Collation), "UTF-8 Unicode", 4}, + {CharsetASCII, CollationASCII, make(map[string]*Collation), "US ASCII", 1}, + {CharsetLatin1, CollationLatin1, make(map[string]*Collation), "Latin1", 1}, + {CharsetBin, CollationBin, make(map[string]*Collation), "binary", 1}, +} + +// All the names supported collations should be in the following table. +var supportedCollationNames = map[string]struct{}{ + CollationUTF8: {}, + CollationUTF8MB4: {}, + CollationASCII: {}, + CollationLatin1: {}, + CollationBin: {}, +} + +// Desc is a charset description. +type Desc struct { + Name string + Desc string + DefaultCollation string + Maxlen int +} + +// GetSupportedCharsets gets descriptions for all charsets supported so far. +func GetSupportedCharsets() []*Desc { + return descs +} + +// GetSupportedCollations gets information for all collations supported so far. +func GetSupportedCollations() []*Collation { + return supportedCollations +} + +// ValidCharsetAndCollation checks the charset and the collation validity +// and returns a boolean. +func ValidCharsetAndCollation(cs string, co string) bool { + // We will use utf8 as a default charset. + if cs == "" { + cs = "utf8" + } + cs = strings.ToLower(cs) + c, ok := charsets[cs] + if !ok { + return false + } + + if co == "" { + return true + } + co = strings.ToLower(co) + _, ok = c.Collations[co] + if !ok { + return false + } + + return true +} + +// GetDefaultCollation returns the default collation for charset. +func GetDefaultCollation(charset string) (string, error) { + charset = strings.ToLower(charset) + if charset == CharsetBin { + return CollationBin, nil + } + c, ok := charsets[charset] + if !ok { + return "", errors.Errorf("Unknown charset %s", charset) + } + return c.DefaultCollation, nil +} + +// GetDefaultCharsetAndCollate returns the default charset and collation. +func GetDefaultCharsetAndCollate() (string, string) { + return mysql.DefaultCharset, mysql.DefaultCollationName +} + +// GetCharsetInfo returns charset and collation for cs as name. +func GetCharsetInfo(cs string) (string, string, error) { + c, ok := charsets[strings.ToLower(cs)] + if !ok { + return "", "", errors.Errorf("Unknown charset %s", cs) + } + return c.Name, c.DefaultCollation, nil +} + +// GetCharsetDesc gets charset descriptions in the local charsets. +func GetCharsetDesc(cs string) (*Desc, error) { + switch strings.ToLower(cs) { + case CharsetUTF8: + return descs[0], nil + case CharsetUTF8MB4: + return descs[1], nil + case CharsetASCII: + return descs[2], nil + case CharsetLatin1: + return descs[3], nil + case CharsetBin: + return descs[4], nil + default: + return nil, errors.Errorf("Unknown charset %s", cs) + } +} + +// GetCharsetInfoByID returns charset and collation for id as cs_number. +func GetCharsetInfoByID(coID int) (string, string, error) { + if coID == mysql.DefaultCollationID { + return mysql.DefaultCharset, mysql.DefaultCollationName, nil + } + if collation, ok := collationsIDMap[coID]; ok { + return collation.CharsetName, collation.Name, nil + } + return "", "", errors.Errorf("Unknown charset id %d", coID) +} + +// GetCollations returns a list for all collations. +func GetCollations() []*Collation { + return collations +} + +func GetCollationByName(name string) (*Collation, error) { + collation, ok := collationsNameMap[strings.ToLower(name)] + if !ok { + return nil, ErrUnknownCollation.GenWithStackByArgs(name) + } + return collation, nil +} + +const ( + // CharsetBin is used for marking binary charset. + CharsetBin = "binary" + // CollationBin is the default collation for CharsetBin. + CollationBin = "binary" + // CharsetUTF8 is the default charset for string types. + CharsetUTF8 = "utf8" + // CollationUTF8 is the default collation for CharsetUTF8. + CollationUTF8 = "utf8_bin" + // CharsetUTF8MB4 represents 4 bytes utf8, which works the same way as utf8 in Go. + CharsetUTF8MB4 = "utf8mb4" + // CollationUTF8MB4 is the default collation for CharsetUTF8MB4. + CollationUTF8MB4 = "utf8mb4_bin" + // CharsetASCII is a subset of UTF8. + CharsetASCII = "ascii" + // CollationASCII is the default collation for CharsetACSII. + CollationASCII = "ascii_bin" + // CharsetLatin1 is a single byte charset. + CharsetLatin1 = "latin1" + // CollationLatin1 is the default collation for CharsetLatin1. + CollationLatin1 = "latin1_bin" +) + +var collations = []*Collation{ + {1, "big5", "big5_chinese_ci", true}, + {2, "latin2", "latin2_czech_cs", false}, + {3, "dec8", "dec8_swedish_ci", true}, + {4, "cp850", "cp850_general_ci", true}, + {5, "latin1", "latin1_german1_ci", false}, + {6, "hp8", "hp8_english_ci", true}, + {7, "koi8r", "koi8r_general_ci", true}, + {8, "latin1", "latin1_swedish_ci", false}, + {9, "latin2", "latin2_general_ci", true}, + {10, "swe7", "swe7_swedish_ci", true}, + {11, "ascii", "ascii_general_ci", false}, + {12, "ujis", "ujis_japanese_ci", true}, + {13, "sjis", "sjis_japanese_ci", true}, + {14, "cp1251", "cp1251_bulgarian_ci", false}, + {15, "latin1", "latin1_danish_ci", false}, + {16, "hebrew", "hebrew_general_ci", true}, + {18, "tis620", "tis620_thai_ci", true}, + {19, "euckr", "euckr_korean_ci", true}, + {20, "latin7", "latin7_estonian_cs", false}, + {21, "latin2", "latin2_hungarian_ci", false}, + {22, "koi8u", "koi8u_general_ci", true}, + {23, "cp1251", "cp1251_ukrainian_ci", false}, + {24, "gb2312", "gb2312_chinese_ci", true}, + {25, "greek", "greek_general_ci", true}, + {26, "cp1250", "cp1250_general_ci", true}, + {27, "latin2", "latin2_croatian_ci", false}, + {28, "gbk", "gbk_chinese_ci", true}, + {29, "cp1257", "cp1257_lithuanian_ci", false}, + {30, "latin5", "latin5_turkish_ci", true}, + {31, "latin1", "latin1_german2_ci", false}, + {32, "armscii8", "armscii8_general_ci", true}, + {33, "utf8", "utf8_general_ci", false}, + {34, "cp1250", "cp1250_czech_cs", false}, + {35, "ucs2", "ucs2_general_ci", true}, + {36, "cp866", "cp866_general_ci", true}, + {37, "keybcs2", "keybcs2_general_ci", true}, + {38, "macce", "macce_general_ci", true}, + {39, "macroman", "macroman_general_ci", true}, + {40, "cp852", "cp852_general_ci", true}, + {41, "latin7", "latin7_general_ci", true}, + {42, "latin7", "latin7_general_cs", false}, + {43, "macce", "macce_bin", false}, + {44, "cp1250", "cp1250_croatian_ci", false}, + {45, "utf8mb4", "utf8mb4_general_ci", false}, + {46, "utf8mb4", "utf8mb4_bin", true}, + {47, "latin1", "latin1_bin", true}, + {48, "latin1", "latin1_general_ci", false}, + {49, "latin1", "latin1_general_cs", false}, + {50, "cp1251", "cp1251_bin", false}, + {51, "cp1251", "cp1251_general_ci", true}, + {52, "cp1251", "cp1251_general_cs", false}, + {53, "macroman", "macroman_bin", false}, + {54, "utf16", "utf16_general_ci", true}, + {55, "utf16", "utf16_bin", false}, + {56, "utf16le", "utf16le_general_ci", true}, + {57, "cp1256", "cp1256_general_ci", true}, + {58, "cp1257", "cp1257_bin", false}, + {59, "cp1257", "cp1257_general_ci", true}, + {60, "utf32", "utf32_general_ci", true}, + {61, "utf32", "utf32_bin", false}, + {62, "utf16le", "utf16le_bin", false}, + {63, "binary", "binary", true}, + {64, "armscii8", "armscii8_bin", false}, + {65, "ascii", "ascii_bin", true}, + {66, "cp1250", "cp1250_bin", false}, + {67, "cp1256", "cp1256_bin", false}, + {68, "cp866", "cp866_bin", false}, + {69, "dec8", "dec8_bin", false}, + {70, "greek", "greek_bin", false}, + {71, "hebrew", "hebrew_bin", false}, + {72, "hp8", "hp8_bin", false}, + {73, "keybcs2", "keybcs2_bin", false}, + {74, "koi8r", "koi8r_bin", false}, + {75, "koi8u", "koi8u_bin", false}, + {77, "latin2", "latin2_bin", false}, + {78, "latin5", "latin5_bin", false}, + {79, "latin7", "latin7_bin", false}, + {80, "cp850", "cp850_bin", false}, + {81, "cp852", "cp852_bin", false}, + {82, "swe7", "swe7_bin", false}, + {83, "utf8", "utf8_bin", true}, + {84, "big5", "big5_bin", false}, + {85, "euckr", "euckr_bin", false}, + {86, "gb2312", "gb2312_bin", false}, + {87, "gbk", "gbk_bin", false}, + {88, "sjis", "sjis_bin", false}, + {89, "tis620", "tis620_bin", false}, + {90, "ucs2", "ucs2_bin", false}, + {91, "ujis", "ujis_bin", false}, + {92, "geostd8", "geostd8_general_ci", true}, + {93, "geostd8", "geostd8_bin", false}, + {94, "latin1", "latin1_spanish_ci", false}, + {95, "cp932", "cp932_japanese_ci", true}, + {96, "cp932", "cp932_bin", false}, + {97, "eucjpms", "eucjpms_japanese_ci", true}, + {98, "eucjpms", "eucjpms_bin", false}, + {99, "cp1250", "cp1250_polish_ci", false}, + {101, "utf16", "utf16_unicode_ci", false}, + {102, "utf16", "utf16_icelandic_ci", false}, + {103, "utf16", "utf16_latvian_ci", false}, + {104, "utf16", "utf16_romanian_ci", false}, + {105, "utf16", "utf16_slovenian_ci", false}, + {106, "utf16", "utf16_polish_ci", false}, + {107, "utf16", "utf16_estonian_ci", false}, + {108, "utf16", "utf16_spanish_ci", false}, + {109, "utf16", "utf16_swedish_ci", false}, + {110, "utf16", "utf16_turkish_ci", false}, + {111, "utf16", "utf16_czech_ci", false}, + {112, "utf16", "utf16_danish_ci", false}, + {113, "utf16", "utf16_lithuanian_ci", false}, + {114, "utf16", "utf16_slovak_ci", false}, + {115, "utf16", "utf16_spanish2_ci", false}, + {116, "utf16", "utf16_roman_ci", false}, + {117, "utf16", "utf16_persian_ci", false}, + {118, "utf16", "utf16_esperanto_ci", false}, + {119, "utf16", "utf16_hungarian_ci", false}, + {120, "utf16", "utf16_sinhala_ci", false}, + {121, "utf16", "utf16_german2_ci", false}, + {122, "utf16", "utf16_croatian_ci", false}, + {123, "utf16", "utf16_unicode_520_ci", false}, + {124, "utf16", "utf16_vietnamese_ci", false}, + {128, "ucs2", "ucs2_unicode_ci", false}, + {129, "ucs2", "ucs2_icelandic_ci", false}, + {130, "ucs2", "ucs2_latvian_ci", false}, + {131, "ucs2", "ucs2_romanian_ci", false}, + {132, "ucs2", "ucs2_slovenian_ci", false}, + {133, "ucs2", "ucs2_polish_ci", false}, + {134, "ucs2", "ucs2_estonian_ci", false}, + {135, "ucs2", "ucs2_spanish_ci", false}, + {136, "ucs2", "ucs2_swedish_ci", false}, + {137, "ucs2", "ucs2_turkish_ci", false}, + {138, "ucs2", "ucs2_czech_ci", false}, + {139, "ucs2", "ucs2_danish_ci", false}, + {140, "ucs2", "ucs2_lithuanian_ci", false}, + {141, "ucs2", "ucs2_slovak_ci", false}, + {142, "ucs2", "ucs2_spanish2_ci", false}, + {143, "ucs2", "ucs2_roman_ci", false}, + {144, "ucs2", "ucs2_persian_ci", false}, + {145, "ucs2", "ucs2_esperanto_ci", false}, + {146, "ucs2", "ucs2_hungarian_ci", false}, + {147, "ucs2", "ucs2_sinhala_ci", false}, + {148, "ucs2", "ucs2_german2_ci", false}, + {149, "ucs2", "ucs2_croatian_ci", false}, + {150, "ucs2", "ucs2_unicode_520_ci", false}, + {151, "ucs2", "ucs2_vietnamese_ci", false}, + {159, "ucs2", "ucs2_general_mysql500_ci", false}, + {160, "utf32", "utf32_unicode_ci", false}, + {161, "utf32", "utf32_icelandic_ci", false}, + {162, "utf32", "utf32_latvian_ci", false}, + {163, "utf32", "utf32_romanian_ci", false}, + {164, "utf32", "utf32_slovenian_ci", false}, + {165, "utf32", "utf32_polish_ci", false}, + {166, "utf32", "utf32_estonian_ci", false}, + {167, "utf32", "utf32_spanish_ci", false}, + {168, "utf32", "utf32_swedish_ci", false}, + {169, "utf32", "utf32_turkish_ci", false}, + {170, "utf32", "utf32_czech_ci", false}, + {171, "utf32", "utf32_danish_ci", false}, + {172, "utf32", "utf32_lithuanian_ci", false}, + {173, "utf32", "utf32_slovak_ci", false}, + {174, "utf32", "utf32_spanish2_ci", false}, + {175, "utf32", "utf32_roman_ci", false}, + {176, "utf32", "utf32_persian_ci", false}, + {177, "utf32", "utf32_esperanto_ci", false}, + {178, "utf32", "utf32_hungarian_ci", false}, + {179, "utf32", "utf32_sinhala_ci", false}, + {180, "utf32", "utf32_german2_ci", false}, + {181, "utf32", "utf32_croatian_ci", false}, + {182, "utf32", "utf32_unicode_520_ci", false}, + {183, "utf32", "utf32_vietnamese_ci", false}, + {192, "utf8", "utf8_unicode_ci", false}, + {193, "utf8", "utf8_icelandic_ci", false}, + {194, "utf8", "utf8_latvian_ci", false}, + {195, "utf8", "utf8_romanian_ci", false}, + {196, "utf8", "utf8_slovenian_ci", false}, + {197, "utf8", "utf8_polish_ci", false}, + {198, "utf8", "utf8_estonian_ci", false}, + {199, "utf8", "utf8_spanish_ci", false}, + {200, "utf8", "utf8_swedish_ci", false}, + {201, "utf8", "utf8_turkish_ci", false}, + {202, "utf8", "utf8_czech_ci", false}, + {203, "utf8", "utf8_danish_ci", false}, + {204, "utf8", "utf8_lithuanian_ci", false}, + {205, "utf8", "utf8_slovak_ci", false}, + {206, "utf8", "utf8_spanish2_ci", false}, + {207, "utf8", "utf8_roman_ci", false}, + {208, "utf8", "utf8_persian_ci", false}, + {209, "utf8", "utf8_esperanto_ci", false}, + {210, "utf8", "utf8_hungarian_ci", false}, + {211, "utf8", "utf8_sinhala_ci", false}, + {212, "utf8", "utf8_german2_ci", false}, + {213, "utf8", "utf8_croatian_ci", false}, + {214, "utf8", "utf8_unicode_520_ci", false}, + {215, "utf8", "utf8_vietnamese_ci", false}, + {223, "utf8", "utf8_general_mysql500_ci", false}, + {224, "utf8mb4", "utf8mb4_unicode_ci", false}, + {225, "utf8mb4", "utf8mb4_icelandic_ci", false}, + {226, "utf8mb4", "utf8mb4_latvian_ci", false}, + {227, "utf8mb4", "utf8mb4_romanian_ci", false}, + {228, "utf8mb4", "utf8mb4_slovenian_ci", false}, + {229, "utf8mb4", "utf8mb4_polish_ci", false}, + {230, "utf8mb4", "utf8mb4_estonian_ci", false}, + {231, "utf8mb4", "utf8mb4_spanish_ci", false}, + {232, "utf8mb4", "utf8mb4_swedish_ci", false}, + {233, "utf8mb4", "utf8mb4_turkish_ci", false}, + {234, "utf8mb4", "utf8mb4_czech_ci", false}, + {235, "utf8mb4", "utf8mb4_danish_ci", false}, + {236, "utf8mb4", "utf8mb4_lithuanian_ci", false}, + {237, "utf8mb4", "utf8mb4_slovak_ci", false}, + {238, "utf8mb4", "utf8mb4_spanish2_ci", false}, + {239, "utf8mb4", "utf8mb4_roman_ci", false}, + {240, "utf8mb4", "utf8mb4_persian_ci", false}, + {241, "utf8mb4", "utf8mb4_esperanto_ci", false}, + {242, "utf8mb4", "utf8mb4_hungarian_ci", false}, + {243, "utf8mb4", "utf8mb4_sinhala_ci", false}, + {244, "utf8mb4", "utf8mb4_german2_ci", false}, + {245, "utf8mb4", "utf8mb4_croatian_ci", false}, + {246, "utf8mb4", "utf8mb4_unicode_520_ci", false}, + {247, "utf8mb4", "utf8mb4_vietnamese_ci", false}, + {255, "utf8mb4", "utf8mb4_0900_ai_ci", false}, +} + +// init method always puts to the end of file. +func init() { + for _, c := range charsetInfos { + charsets[c.Name] = c + desc := &Desc{ + Name: c.Name, + DefaultCollation: c.DefaultCollation, + Desc: c.Desc, + Maxlen: c.Maxlen, + } + descs = append(descs, desc) + } + + for _, c := range collations { + collationsIDMap[c.ID] = c + + if _, ok := supportedCollationNames[c.Name]; ok { + supportedCollations = append(supportedCollations, c) + } + + if charset, ok := charsets[c.CharsetName]; ok { + charset.Collations[c.Name] = c + } + } + + for id, name := range mysql.Collations { + collationsNameMap[name] = collationsIDMap[int(id)] + } +} diff --git a/parser/charset/charset_test.go b/parser/charset/charset_test.go new file mode 100644 index 0000000..79850df --- /dev/null +++ b/parser/charset/charset_test.go @@ -0,0 +1,175 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package charset + +import ( + "math/rand" + "testing" + + . "github.com/pingcap/check" +) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + TestingT(t) +} + +var _ = Suite(&testCharsetSuite{}) + +type testCharsetSuite struct { +} + +func testValidCharset(c *C, charset string, collation string, expect bool) { + b := ValidCharsetAndCollation(charset, collation) + c.Assert(b, Equals, expect) +} + +func (s *testCharsetSuite) TestValidCharset(c *C) { + tests := []struct { + cs string + co string + succ bool + }{ + {"utf8", "utf8_general_ci", true}, + {"", "utf8_general_ci", true}, + {"utf8mb4", "utf8mb4_bin", true}, + {"latin1", "latin1_bin", true}, + {"utf8", "utf8_invalid_ci", false}, + {"utf16", "utf16_bin", false}, + {"gb2312", "gb2312_chinese_ci", false}, + {"UTF8", "UTF8_BIN", true}, + {"UTF8", "utf8_bin", true}, + {"UTF8MB4", "utf8mb4_bin", true}, + {"UTF8MB4", "UTF8MB4_bin", true}, + {"UTF8MB4", "UTF8MB4_general_ci", true}, + {"Utf8", "uTf8_bIN", true}, + } + for _, tt := range tests { + testValidCharset(c, tt.cs, tt.co, tt.succ) + } +} + +func (s *testCharsetSuite) TestGetSupportedCharsets(c *C) { + charset := &Charset{"test", "test_bin", nil, "Test", 5} + charsetInfos = append(charsetInfos, charset) + descs := GetSupportedCharsets() + c.Assert(len(descs), Equals, len(charsetInfos)-1) +} + +func testGetDefaultCollation(c *C, charset string, expectCollation string, succ bool) { + b, err := GetDefaultCollation(charset) + if !succ { + c.Assert(err, NotNil) + return + } + c.Assert(b, Equals, expectCollation) +} + +func (s *testCharsetSuite) TestGetDefaultCollation(c *C) { + tests := []struct { + cs string + co string + succ bool + }{ + {"utf8", "utf8_bin", true}, + {"UTF8", "utf8_bin", true}, + {"utf8mb4", "utf8mb4_bin", true}, + {"ascii", "ascii_bin", true}, + {"binary", "binary", true}, + {"latin1", "latin1_bin", true}, + {"invalid_cs", "", false}, + {"", "utf8_bin", false}, + } + for _, tt := range tests { + testGetDefaultCollation(c, tt.cs, tt.co, tt.succ) + } + + // Test the consistency of collations table and charset desc table + charset_num := 0 + for _, collate := range collations { + if collate.IsDefault { + if desc, ok := charsets[collate.CharsetName]; ok { + c.Assert(collate.Name, Equals, desc.DefaultCollation) + charset_num += 1 + } + } + } + c.Assert(charset_num, Equals, len(charsets)) +} + +func (s *testCharsetSuite) TestSupportedCollations(c *C) { + // All supportedCollation are defined from their names + c.Assert(len(supportedCollationNames), Equals, len(supportedCollationNames)) + + // The default collations of supported charsets is the subset of supported collations + errMsg := "Charset [%v] is supported but its default collation [%v] is not." + for _, desc := range GetSupportedCharsets() { + found := false + for _, c := range GetSupportedCollations() { + if desc.DefaultCollation == c.Name { + found = true + break + } + } + c.Assert(found, IsTrue, Commentf(errMsg, desc.Name, desc.DefaultCollation)) + } +} + +func (s *testCharsetSuite) TestGetCharsetDesc(c *C) { + tests := []struct { + cs string + result string + succ bool + }{ + {"utf8", "utf8", true}, + {"UTF8", "utf8", true}, + {"utf8mb4", "utf8mb4", true}, + {"ascii", "ascii", true}, + {"binary", "binary", true}, + {"latin1", "latin1", true}, + {"invalid_cs", "", false}, + {"", "utf8_bin", false}, + } + for _, tt := range tests { + desc, err := GetCharsetDesc(tt.cs) + if !tt.succ { + c.Assert(err, NotNil) + } else { + c.Assert(desc.Name, Equals, tt.result) + } + } +} + +func (s *testCharsetSuite) TestGetCollationByName(c *C) { + + for _, collation := range collations { + coll, err := GetCollationByName(collation.Name) + c.Assert(err, IsNil) + c.Assert(coll, Equals, collation) + } + + _, err := GetCollationByName("non_exist") + c.Assert(err, ErrorMatches, "\\[ddl:1273\\]Unknown collation: 'non_exist'") +} + +func BenchmarkGetCharsetDesc(b *testing.B) { + b.ResetTimer() + charsets := []string{CharsetUTF8, CharsetUTF8MB4, CharsetASCII, CharsetLatin1, CharsetBin} + index := rand.Intn(len(charsets)) + cs := charsets[index] + + for i := 0; i < b.N; i++ { + GetCharsetDesc(cs) + } +} diff --git a/parser/charset/encoding_table.go b/parser/charset/encoding_table.go new file mode 100644 index 0000000..37a5550 --- /dev/null +++ b/parser/charset/encoding_table.go @@ -0,0 +1,260 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package charset + +import ( + "strings" + + "golang.org/x/text/encoding" + "golang.org/x/text/encoding/charmap" + "golang.org/x/text/encoding/japanese" + "golang.org/x/text/encoding/korean" + "golang.org/x/text/encoding/simplifiedchinese" + "golang.org/x/text/encoding/traditionalchinese" + "golang.org/x/text/encoding/unicode" +) + +// Lookup returns the encoding with the specified label, and its canonical +// name. It returns nil and the empty string if label is not one of the +// standard encodings for HTML. Matching is case-insensitive and ignores +// leading and trailing whitespace. +func Lookup(label string) (e encoding.Encoding, name string) { + label = strings.ToLower(strings.Trim(label, "\t\n\r\f ")) + enc := encodings[label] + return enc.e, enc.name +} + +var encodings = map[string]struct { + e encoding.Encoding + name string +}{ + "unicode-1-1-utf-8": {encoding.Nop, "utf-8"}, + "utf-8": {encoding.Nop, "utf-8"}, + "utf8": {encoding.Nop, "utf-8"}, + "utf8mb4": {encoding.Nop, "utf-8"}, + "binary": {encoding.Nop, "binary"}, + "866": {charmap.CodePage866, "ibm866"}, + "cp866": {charmap.CodePage866, "ibm866"}, + "csibm866": {charmap.CodePage866, "ibm866"}, + "ibm866": {charmap.CodePage866, "ibm866"}, + "csisolatin2": {charmap.ISO8859_2, "iso-8859-2"}, + "iso-8859-2": {charmap.ISO8859_2, "iso-8859-2"}, + "iso-ir-101": {charmap.ISO8859_2, "iso-8859-2"}, + "iso8859-2": {charmap.ISO8859_2, "iso-8859-2"}, + "iso88592": {charmap.ISO8859_2, "iso-8859-2"}, + "iso_8859-2": {charmap.ISO8859_2, "iso-8859-2"}, + "iso_8859-2:1987": {charmap.ISO8859_2, "iso-8859-2"}, + "l2": {charmap.ISO8859_2, "iso-8859-2"}, + "latin2": {charmap.ISO8859_2, "iso-8859-2"}, + "csisolatin3": {charmap.ISO8859_3, "iso-8859-3"}, + "iso-8859-3": {charmap.ISO8859_3, "iso-8859-3"}, + "iso-ir-109": {charmap.ISO8859_3, "iso-8859-3"}, + "iso8859-3": {charmap.ISO8859_3, "iso-8859-3"}, + "iso88593": {charmap.ISO8859_3, "iso-8859-3"}, + "iso_8859-3": {charmap.ISO8859_3, "iso-8859-3"}, + "iso_8859-3:1988": {charmap.ISO8859_3, "iso-8859-3"}, + "l3": {charmap.ISO8859_3, "iso-8859-3"}, + "latin3": {charmap.ISO8859_3, "iso-8859-3"}, + "csisolatin4": {charmap.ISO8859_4, "iso-8859-4"}, + "iso-8859-4": {charmap.ISO8859_4, "iso-8859-4"}, + "iso-ir-110": {charmap.ISO8859_4, "iso-8859-4"}, + "iso8859-4": {charmap.ISO8859_4, "iso-8859-4"}, + "iso88594": {charmap.ISO8859_4, "iso-8859-4"}, + "iso_8859-4": {charmap.ISO8859_4, "iso-8859-4"}, + "iso_8859-4:1988": {charmap.ISO8859_4, "iso-8859-4"}, + "l4": {charmap.ISO8859_4, "iso-8859-4"}, + "latin4": {charmap.ISO8859_4, "iso-8859-4"}, + "csisolatincyrillic": {charmap.ISO8859_5, "iso-8859-5"}, + "cyrillic": {charmap.ISO8859_5, "iso-8859-5"}, + "iso-8859-5": {charmap.ISO8859_5, "iso-8859-5"}, + "iso-ir-144": {charmap.ISO8859_5, "iso-8859-5"}, + "iso8859-5": {charmap.ISO8859_5, "iso-8859-5"}, + "iso88595": {charmap.ISO8859_5, "iso-8859-5"}, + "iso_8859-5": {charmap.ISO8859_5, "iso-8859-5"}, + "iso_8859-5:1988": {charmap.ISO8859_5, "iso-8859-5"}, + "arabic": {charmap.ISO8859_6, "iso-8859-6"}, + "asmo-708": {charmap.ISO8859_6, "iso-8859-6"}, + "csiso88596e": {charmap.ISO8859_6, "iso-8859-6"}, + "csiso88596i": {charmap.ISO8859_6, "iso-8859-6"}, + "csisolatinarabic": {charmap.ISO8859_6, "iso-8859-6"}, + "ecma-114": {charmap.ISO8859_6, "iso-8859-6"}, + "iso-8859-6": {charmap.ISO8859_6, "iso-8859-6"}, + "iso-8859-6-e": {charmap.ISO8859_6, "iso-8859-6"}, + "iso-8859-6-i": {charmap.ISO8859_6, "iso-8859-6"}, + "iso-ir-127": {charmap.ISO8859_6, "iso-8859-6"}, + "iso8859-6": {charmap.ISO8859_6, "iso-8859-6"}, + "iso88596": {charmap.ISO8859_6, "iso-8859-6"}, + "iso_8859-6": {charmap.ISO8859_6, "iso-8859-6"}, + "iso_8859-6:1987": {charmap.ISO8859_6, "iso-8859-6"}, + "csisolatingreek": {charmap.ISO8859_7, "iso-8859-7"}, + "ecma-118": {charmap.ISO8859_7, "iso-8859-7"}, + "elot_928": {charmap.ISO8859_7, "iso-8859-7"}, + "greek": {charmap.ISO8859_7, "iso-8859-7"}, + "greek8": {charmap.ISO8859_7, "iso-8859-7"}, + "iso-8859-7": {charmap.ISO8859_7, "iso-8859-7"}, + "iso-ir-126": {charmap.ISO8859_7, "iso-8859-7"}, + "iso8859-7": {charmap.ISO8859_7, "iso-8859-7"}, + "iso88597": {charmap.ISO8859_7, "iso-8859-7"}, + "iso_8859-7": {charmap.ISO8859_7, "iso-8859-7"}, + "iso_8859-7:1987": {charmap.ISO8859_7, "iso-8859-7"}, + "sun_eu_greek": {charmap.ISO8859_7, "iso-8859-7"}, + "csiso88598e": {charmap.ISO8859_8, "iso-8859-8"}, + "csisolatinhebrew": {charmap.ISO8859_8, "iso-8859-8"}, + "hebrew": {charmap.ISO8859_8, "iso-8859-8"}, + "iso-8859-8": {charmap.ISO8859_8, "iso-8859-8"}, + "iso-8859-8-e": {charmap.ISO8859_8, "iso-8859-8"}, + "iso-ir-138": {charmap.ISO8859_8, "iso-8859-8"}, + "iso8859-8": {charmap.ISO8859_8, "iso-8859-8"}, + "iso88598": {charmap.ISO8859_8, "iso-8859-8"}, + "iso_8859-8": {charmap.ISO8859_8, "iso-8859-8"}, + "iso_8859-8:1988": {charmap.ISO8859_8, "iso-8859-8"}, + "visual": {charmap.ISO8859_8, "iso-8859-8"}, + "csiso88598i": {charmap.ISO8859_8, "iso-8859-8-i"}, + "iso-8859-8-i": {charmap.ISO8859_8, "iso-8859-8-i"}, + "logical": {charmap.ISO8859_8, "iso-8859-8-i"}, + "csisolatin6": {charmap.ISO8859_10, "iso-8859-10"}, + "iso-8859-10": {charmap.ISO8859_10, "iso-8859-10"}, + "iso-ir-157": {charmap.ISO8859_10, "iso-8859-10"}, + "iso8859-10": {charmap.ISO8859_10, "iso-8859-10"}, + "iso885910": {charmap.ISO8859_10, "iso-8859-10"}, + "l6": {charmap.ISO8859_10, "iso-8859-10"}, + "latin6": {charmap.ISO8859_10, "iso-8859-10"}, + "iso-8859-13": {charmap.ISO8859_13, "iso-8859-13"}, + "iso8859-13": {charmap.ISO8859_13, "iso-8859-13"}, + "iso885913": {charmap.ISO8859_13, "iso-8859-13"}, + "iso-8859-14": {charmap.ISO8859_14, "iso-8859-14"}, + "iso8859-14": {charmap.ISO8859_14, "iso-8859-14"}, + "iso885914": {charmap.ISO8859_14, "iso-8859-14"}, + "csisolatin9": {charmap.ISO8859_15, "iso-8859-15"}, + "iso-8859-15": {charmap.ISO8859_15, "iso-8859-15"}, + "iso8859-15": {charmap.ISO8859_15, "iso-8859-15"}, + "iso885915": {charmap.ISO8859_15, "iso-8859-15"}, + "iso_8859-15": {charmap.ISO8859_15, "iso-8859-15"}, + "l9": {charmap.ISO8859_15, "iso-8859-15"}, + "iso-8859-16": {charmap.ISO8859_16, "iso-8859-16"}, + "cskoi8r": {charmap.KOI8R, "koi8-r"}, + "koi": {charmap.KOI8R, "koi8-r"}, + "koi8": {charmap.KOI8R, "koi8-r"}, + "koi8-r": {charmap.KOI8R, "koi8-r"}, + "koi8_r": {charmap.KOI8R, "koi8-r"}, + "koi8-u": {charmap.KOI8U, "koi8-u"}, + "csmacintosh": {charmap.Macintosh, "macintosh"}, + "mac": {charmap.Macintosh, "macintosh"}, + "macintosh": {charmap.Macintosh, "macintosh"}, + "x-mac-roman": {charmap.Macintosh, "macintosh"}, + "dos-874": {charmap.Windows874, "windows-874"}, + "iso-8859-11": {charmap.Windows874, "windows-874"}, + "iso8859-11": {charmap.Windows874, "windows-874"}, + "iso885911": {charmap.Windows874, "windows-874"}, + "tis-620": {charmap.Windows874, "windows-874"}, + "windows-874": {charmap.Windows874, "windows-874"}, + "cp1250": {charmap.Windows1250, "windows-1250"}, + "windows-1250": {charmap.Windows1250, "windows-1250"}, + "x-cp1250": {charmap.Windows1250, "windows-1250"}, + "cp1251": {charmap.Windows1251, "windows-1251"}, + "windows-1251": {charmap.Windows1251, "windows-1251"}, + "x-cp1251": {charmap.Windows1251, "windows-1251"}, + "ansi_x3.4-1968": {charmap.Windows1252, "windows-1252"}, + "ascii": {charmap.Windows1252, "windows-1252"}, + "cp1252": {charmap.Windows1252, "windows-1252"}, + "cp819": {charmap.Windows1252, "windows-1252"}, + "csisolatin1": {charmap.Windows1252, "windows-1252"}, + "ibm819": {charmap.Windows1252, "windows-1252"}, + "iso-8859-1": {charmap.Windows1252, "windows-1252"}, + "iso-ir-100": {charmap.Windows1252, "windows-1252"}, + "iso8859-1": {charmap.Windows1252, "windows-1252"}, + "iso88591": {charmap.Windows1252, "windows-1252"}, + "iso_8859-1": {charmap.Windows1252, "windows-1252"}, + "iso_8859-1:1987": {charmap.Windows1252, "windows-1252"}, + "l1": {charmap.Windows1252, "windows-1252"}, + "latin1": {charmap.Windows1252, "windows-1252"}, + "us-ascii": {charmap.Windows1252, "windows-1252"}, + "windows-1252": {charmap.Windows1252, "windows-1252"}, + "x-cp1252": {charmap.Windows1252, "windows-1252"}, + "cp1253": {charmap.Windows1253, "windows-1253"}, + "windows-1253": {charmap.Windows1253, "windows-1253"}, + "x-cp1253": {charmap.Windows1253, "windows-1253"}, + "cp1254": {charmap.Windows1254, "windows-1254"}, + "csisolatin5": {charmap.Windows1254, "windows-1254"}, + "iso-8859-9": {charmap.Windows1254, "windows-1254"}, + "iso-ir-148": {charmap.Windows1254, "windows-1254"}, + "iso8859-9": {charmap.Windows1254, "windows-1254"}, + "iso88599": {charmap.Windows1254, "windows-1254"}, + "iso_8859-9": {charmap.Windows1254, "windows-1254"}, + "iso_8859-9:1989": {charmap.Windows1254, "windows-1254"}, + "l5": {charmap.Windows1254, "windows-1254"}, + "latin5": {charmap.Windows1254, "windows-1254"}, + "windows-1254": {charmap.Windows1254, "windows-1254"}, + "x-cp1254": {charmap.Windows1254, "windows-1254"}, + "cp1255": {charmap.Windows1255, "windows-1255"}, + "windows-1255": {charmap.Windows1255, "windows-1255"}, + "x-cp1255": {charmap.Windows1255, "windows-1255"}, + "cp1256": {charmap.Windows1256, "windows-1256"}, + "windows-1256": {charmap.Windows1256, "windows-1256"}, + "x-cp1256": {charmap.Windows1256, "windows-1256"}, + "cp1257": {charmap.Windows1257, "windows-1257"}, + "windows-1257": {charmap.Windows1257, "windows-1257"}, + "x-cp1257": {charmap.Windows1257, "windows-1257"}, + "cp1258": {charmap.Windows1258, "windows-1258"}, + "windows-1258": {charmap.Windows1258, "windows-1258"}, + "x-cp1258": {charmap.Windows1258, "windows-1258"}, + "x-mac-cyrillic": {charmap.MacintoshCyrillic, "x-mac-cyrillic"}, + "x-mac-ukrainian": {charmap.MacintoshCyrillic, "x-mac-cyrillic"}, + "chinese": {simplifiedchinese.GBK, "gbk"}, + "csgb2312": {simplifiedchinese.GBK, "gbk"}, + "csiso58gb231280": {simplifiedchinese.GBK, "gbk"}, + "gb2312": {simplifiedchinese.GBK, "gbk"}, + "gb_2312": {simplifiedchinese.GBK, "gbk"}, + "gb_2312-80": {simplifiedchinese.GBK, "gbk"}, + "gbk": {simplifiedchinese.GBK, "gbk"}, + "iso-ir-58": {simplifiedchinese.GBK, "gbk"}, + "x-gbk": {simplifiedchinese.GBK, "gbk"}, + "gb18030": {simplifiedchinese.GB18030, "gb18030"}, + "hz-gb-2312": {simplifiedchinese.HZGB2312, "hz-gb-2312"}, + "big5": {traditionalchinese.Big5, "big5"}, + "big5-hkscs": {traditionalchinese.Big5, "big5"}, + "cn-big5": {traditionalchinese.Big5, "big5"}, + "csbig5": {traditionalchinese.Big5, "big5"}, + "x-x-big5": {traditionalchinese.Big5, "big5"}, + "cseucpkdfmtjapanese": {japanese.EUCJP, "euc-jp"}, + "euc-jp": {japanese.EUCJP, "euc-jp"}, + "x-euc-jp": {japanese.EUCJP, "euc-jp"}, + "csiso2022jp": {japanese.ISO2022JP, "iso-2022-jp"}, + "iso-2022-jp": {japanese.ISO2022JP, "iso-2022-jp"}, + "csshiftjis": {japanese.ShiftJIS, "shift_jis"}, + "ms_kanji": {japanese.ShiftJIS, "shift_jis"}, + "shift-jis": {japanese.ShiftJIS, "shift_jis"}, + "shift_jis": {japanese.ShiftJIS, "shift_jis"}, + "sjis": {japanese.ShiftJIS, "shift_jis"}, + "windows-31j": {japanese.ShiftJIS, "shift_jis"}, + "x-sjis": {japanese.ShiftJIS, "shift_jis"}, + "cseuckr": {korean.EUCKR, "euc-kr"}, + "csksc56011987": {korean.EUCKR, "euc-kr"}, + "euc-kr": {korean.EUCKR, "euc-kr"}, + "iso-ir-149": {korean.EUCKR, "euc-kr"}, + "korean": {korean.EUCKR, "euc-kr"}, + "ks_c_5601-1987": {korean.EUCKR, "euc-kr"}, + "ks_c_5601-1989": {korean.EUCKR, "euc-kr"}, + "ksc5601": {korean.EUCKR, "euc-kr"}, + "ksc_5601": {korean.EUCKR, "euc-kr"}, + "windows-949": {korean.EUCKR, "euc-kr"}, + "csiso2022kr": {encoding.Replacement, "replacement"}, + "iso-2022-kr": {encoding.Replacement, "replacement"}, + "iso-2022-cn": {encoding.Replacement, "replacement"}, + "iso-2022-cn-ext": {encoding.Replacement, "replacement"}, + "utf-16be": {unicode.UTF16(unicode.BigEndian, unicode.IgnoreBOM), "utf-16be"}, + "utf-16": {unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM), "utf-16le"}, + "utf-16le": {unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM), "utf-16le"}, + "x-user-defined": {charmap.XUserDefined, "x-user-defined"}, +} diff --git a/parser/consistent_test.go b/parser/consistent_test.go new file mode 100644 index 0000000..fc49a5e --- /dev/null +++ b/parser/consistent_test.go @@ -0,0 +1,108 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + "io/ioutil" + "os" + "path" + "runtime" + "sort" + "strings" + + . "github.com/pingcap/check" +) + +var _ = Suite(&testConsistentSuite{}) + +type testConsistentSuite struct { +} + +func (s *testConsistentSuite) TestKeywordConsistent(c *C) { + _, filename, _, _ := runtime.Caller(0) + parserFilename := path.Join(path.Dir(filename), "parser.y") + parserFile, err := os.Open(parserFilename) + c.Assert(err, IsNil) + data, err := ioutil.ReadAll(parserFile) + c.Assert(err, IsNil) + content := string(data) + + reservedKeywordStartMarker := "\t/* The following tokens belong to ReservedKeyword. Notice: make sure these tokens are contained in ReservedKeyword. */" + unreservedKeywordStartMarker := "\t/* The following tokens belong to UnReservedKeyword. Notice: make sure these tokens are contained in UnReservedKeyword. */" + notKeywordTokenStartMarker := "\t/* The following tokens belong to NotKeywordToken. Notice: make sure these tokens are contained in NotKeywordToken. */" + tidbKeywordStartMarker := "\t/* The following tokens belong to TiDBKeyword. Notice: make sure these tokens are contained in TiDBKeyword. */" + identTokenEndMarker := "%token\t" + + reservedKeywords := extractKeywords(content, reservedKeywordStartMarker, unreservedKeywordStartMarker) + + unreservedKeywords := extractKeywords(content, unreservedKeywordStartMarker, notKeywordTokenStartMarker) + + notKeywordTokens := extractKeywords(content, notKeywordTokenStartMarker, tidbKeywordStartMarker) + + tidbKeywords := extractKeywords(content, tidbKeywordStartMarker, identTokenEndMarker) + + for k, v := range aliases { + c.Assert(k != v, IsTrue) + c.Assert(tokenMap[k], Equals, tokenMap[v]) + } + keywordCount := len(reservedKeywords) + len(unreservedKeywords) + len(notKeywordTokens) + len(tidbKeywords) + c.Assert(len(tokenMap)-len(aliases), Equals, keywordCount) + + unreservedCollectionDef := extractKeywordsFromCollectionDef(content, "\nUnReservedKeyword:") + c.Assert(unreservedKeywords, DeepEquals, unreservedCollectionDef) + + notKeywordTokensCollectionDef := extractKeywordsFromCollectionDef(content, "\nNotKeywordToken:") + c.Assert(notKeywordTokens, DeepEquals, notKeywordTokensCollectionDef) + + tidbKeywordsCollectionDef := extractKeywordsFromCollectionDef(content, "\nTiDBKeyword:") + c.Assert(tidbKeywords, DeepEquals, tidbKeywordsCollectionDef) +} + +func extractMiddle(str, startMarker, endMarker string) string { + startIdx := strings.Index(str, startMarker) + if startIdx == -1 { + return "" + } + str = str[startIdx+len(startMarker):] + endIdx := strings.Index(str, endMarker) + if endIdx == -1 { + return "" + } + return str[:endIdx] +} + +func extractQuotedWords(strs []string) []string { + var words []string + for _, str := range strs { + word := extractMiddle(str, "\"", "\"") + if word == "" { + continue + } + words = append(words, word) + } + sort.Strings(words) + return words +} + +func extractKeywords(content, startMarker, endMarker string) []string { + keywordSection := extractMiddle(content, startMarker, endMarker) + lines := strings.Split(keywordSection, "\n") + return extractQuotedWords(lines) +} + +func extractKeywordsFromCollectionDef(content, startMarker string) []string { + keywordSection := extractMiddle(content, startMarker, "\n\n") + words := strings.Split(keywordSection, "|") + return extractQuotedWords(words) +} diff --git a/parser/format/format.go b/parser/format/format.go new file mode 100644 index 0000000..0a14a6d --- /dev/null +++ b/parser/format/format.go @@ -0,0 +1,195 @@ +// Copyright (c) 2014 The sortutil Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSES/STRUTIL-LICENSE file. + +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package format + +import ( + "bytes" + "fmt" + "io" +) + +const ( + st0 = iota + stBOL + stPERC + stBOLPERC +) + +// Formatter is an io.Writer extended formatter by a fmt.Printf like function Format. +type Formatter interface { + io.Writer + Format(format string, args ...interface{}) (n int, errno error) +} + +type indentFormatter struct { + io.Writer + indent []byte + indentLevel int + state int +} + +var replace = map[rune]string{ + '\000': "\\0", + '\'': "''", + '\n': "\\n", + '\r': "\\r", +} + +// IndentFormatter returns a new Formatter which interprets %i and %u in the +// Format() formats string as indent and unindent commands. The commands can +// nest. The Formatter writes to io.Writer 'w' and inserts one 'indent' +// string per current indent level value. +// Behaviour of commands reaching negative indent levels is undefined. +// IndentFormatter(os.Stdout, "\t").Format("abc%d%%e%i\nx\ny\n%uz\n", 3) +// output: +// abc3%e +// x +// y +// z +// The Go quoted string literal form of the above is: +// "abc%%e\n\tx\n\tx\nz\n" +// The commands can be scattered between separate invocations of Format(), +// i.e. the formatter keeps track of the indent level and knows if it is +// positioned on start of a line and should emit indentation(s). +// The same output as above can be produced by e.g.: +// f := IndentFormatter(os.Stdout, " ") +// f.Format("abc%d%%e%i\nx\n", 3) +// f.Format("y\n%uz\n") +func IndentFormatter(w io.Writer, indent string) Formatter { + return &indentFormatter{w, []byte(indent), 0, stBOL} +} + +func (f *indentFormatter) format(flat bool, format string, args ...interface{}) (n int, errno error) { + var buf = make([]byte, 0) + for i := 0; i < len(format); i++ { + c := format[i] + switch f.state { + case st0: + switch c { + case '\n': + cc := c + if flat && f.indentLevel != 0 { + cc = ' ' + } + buf = append(buf, cc) + f.state = stBOL + case '%': + f.state = stPERC + default: + buf = append(buf, c) + } + case stBOL: + switch c { + case '\n': + cc := c + if flat && f.indentLevel != 0 { + cc = ' ' + } + buf = append(buf, cc) + case '%': + f.state = stBOLPERC + default: + if !flat { + for i := 0; i < f.indentLevel; i++ { + buf = append(buf, f.indent...) + } + } + buf = append(buf, c) + f.state = st0 + } + case stBOLPERC: + switch c { + case 'i': + f.indentLevel++ + f.state = stBOL + case 'u': + f.indentLevel-- + f.state = stBOL + default: + if !flat { + for i := 0; i < f.indentLevel; i++ { + buf = append(buf, f.indent...) + } + } + buf = append(buf, '%', c) + f.state = st0 + } + case stPERC: + switch c { + case 'i': + f.indentLevel++ + f.state = st0 + case 'u': + f.indentLevel-- + f.state = st0 + default: + buf = append(buf, '%', c) + f.state = st0 + } + default: + panic("unexpected state") + } + } + switch f.state { + case stPERC, stBOLPERC: + buf = append(buf, '%') + } + return f.Write([]byte(fmt.Sprintf(string(buf), args...))) +} + +// Format implements Format interface. +func (f *indentFormatter) Format(format string, args ...interface{}) (n int, errno error) { + return f.format(false, format, args...) +} + +type flatFormatter indentFormatter + +// FlatFormatter returns a newly created Formatter with the same functionality as the one returned +// by IndentFormatter except it allows a newline in the 'format' string argument of Format +// to pass through if the indent level is current zero. +// +// If the indent level is non-zero then such new lines are changed to a space character. +// There is no indent string, the %i and %u format verbs are used solely to determine the indent level. +// +// The FlatFormatter is intended for flattening of normally nested structure textual representation to +// a one top level structure per line form. +// FlatFormatter(os.Stdout, " ").Format("abc%d%%e%i\nx\ny\n%uz\n", 3) +// output in the form of a Go quoted string literal: +// "abc3%%e x y z\n" +func FlatFormatter(w io.Writer) Formatter { + return (*flatFormatter)(IndentFormatter(w, "").(*indentFormatter)) +} + +// Format implements Format interface. +func (f *flatFormatter) Format(format string, args ...interface{}) (n int, errno error) { + return (*indentFormatter)(f).format(true, format, args...) +} + +// OutputFormat output escape character with backslash. +func OutputFormat(s string) string { + var buf bytes.Buffer + for _, old := range s { + if newVal, ok := replace[old]; ok { + buf.WriteString(newVal) + continue + } + buf.WriteRune(old) + } + + return buf.String() +} diff --git a/parser/format/format_test.go b/parser/format/format_test.go new file mode 100644 index 0000000..b97ae6b --- /dev/null +++ b/parser/format/format_test.go @@ -0,0 +1,58 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package format + +import ( + "bytes" + "io/ioutil" + "testing" + + . "github.com/pingcap/check" +) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + TestingT(t) +} + +var _ = Suite(&testFormatSuite{}) + +type testFormatSuite struct { +} + +func checkFormat(c *C, f Formatter, buf *bytes.Buffer, str, expect string) { + _, err := f.Format(str, 3) + c.Assert(err, IsNil) + b, err := ioutil.ReadAll(buf) + c.Assert(err, IsNil) + c.Assert(string(b), Equals, expect) +} + +func (s *testFormatSuite) TestFormat(c *C) { + str := "abc%d%%e%i\nx\ny\n%uz\n" + buf := &bytes.Buffer{} + f := IndentFormatter(buf, "\t") + expect := `abc3%e + x + y +z +` + checkFormat(c, f, buf, str, expect) + + str = "abc%d%%e%i\nx\ny\n%uz\n%i\n" + buf = &bytes.Buffer{} + f = FlatFormatter(buf) + expect = "abc3%e x y z\n " + checkFormat(c, f, buf, str, expect) +} diff --git a/parser/goyacc/main.go b/parser/goyacc/main.go new file mode 100644 index 0000000..772f9cc --- /dev/null +++ b/parser/goyacc/main.go @@ -0,0 +1,827 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright 2014 The goyacc Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This source code uses portions of code previously published in the Go tool +// yacc[0] program, the respective license can be found in the LICENSE-GO-YACC +// file. + +// Goyacc is a version of yacc generating Go parsers. +// +// Usage +// +// Note: If no non flag arguments are given, goyacc reads standard input. +// +// goyacc [options] [input] +// +// options and (defaults) +// -c Report state closures. (false) +// -cr Check all states are reducible. (false) +// -dlval Debug value when runtime yyDebug >= 3. ("lval") +// -dlvalf Debug format of -dlval. ("%+v") +// -ex Explain how were conflicts resolved. (false) +// -l Disable line directives, for compatibility only - ignored. (false) +// -la Report all lookahead sets. (false) +// -o outputFile Parser output. ("y.go") +// -p prefix Name prefix to use in generated code. ("yy") +// -v reportFile Create grammar report. ("y.output") +// -xe examplesFile Generate error messages by examples. ("") +// -xegen examplesFile Generate a file suitable for -xe automatically from the grammar. +// The file must not exist. ("") +// +// +// +// Changelog +// +// 2015-03-24: The search for a custom error message is now extended to include +// also the last state that was shifted into, if any. This change resolves a +// problem in which a lookahead symbol is valid for a reduce action in state A, +// but the same symbol is later never accepted by any shift action in some +// state B which is popped from the state stack after the reduction is +// performed. The computed from example state is A but when the error is +// actually detected, the state is now B and the custom error was thus not +// used. +// +// 2015-02-23: Added -xegen flag. It can be used to automagically generate a +// skeleton errors by example file which can be, for example, edited and/or +// submited later as an argument of the -xe option. +// +// 2014-12-18: Support %precedence for better bison compatibility[3]. The +// actual changes are in packages goyacc is dependent on. Goyacc users should +// rebuild the binary: +// +// $ go get -u github.com/cznic/goyacc +// +// 2014-12-02: Added support for the optional yyLexerEx interface. The Reduced +// method can be useful for debugging and/or automatically producing examples +// by parsing code fragments. If it returns true the parser exits immediately +// with return value -1. +// +// Overview +// +// The generated parser is reentrant and mostly backwards compatible with +// parsers generated by go tool yacc[0]. yyParse expects to be given an +// argument that conforms to the following interface: +// +// type yyLexer interface { +// Lex(lval *yySymType) int +// Errorf(format string, a ...interface{}) +// Errors() (warns []error, errs []error) +// } +// +// Optionally the argument to yyParse may implement the following interface: +// +// type yyLexerEx interface { +// yyLexer +// // Hook for recording a reduction. +// Reduced(rule, state int, lval *yySymType) (stop bool) // Client should copy *lval. +// } +// +// Lex should return the token identifier, and place other token information in +// lval (which replaces the usual yylval). Error is equivalent to yyerror in +// the original yacc. +// +// Code inside the parser may refer to the variable yylex, which holds the +// yyLexer passed to Parse. +// +// Multiple grammars compiled into a single program should be placed in +// distinct packages. If that is impossible, the "-p prefix" flag to yacc sets +// the prefix, by default yy, that begins the names of symbols, including +// types, the parser, and the lexer, generated and referenced by yacc's +// generated code. Setting it to distinct values allows multiple grammars to be +// placed in a single package. +// +// Differences wrt go tool yacc +// +// - goyacc implements ideas from "Generating LR Syntax Error Messages from +// Examples"[1]. Use the -xe flag to pass a name of the example file. For more +// details about the example format please see [2]. +// +// - The grammar report includes example token sequences leading to the +// particular state. Can help understanding conflicts. +// +// - Minor changes in parser debug output. +// +// Links +// +// Referenced from elsewhere: +// +// [0]: http://golang.org/cmd/yacc/ +// [1]: http://people.via.ecp.fr/~stilgar/doc/compilo/parser/Generating%20LR%20Syntax%20Error%20Messages.pdf +// [2]: http://godoc.org/github.com/cznic/y#hdr-Error_Examples +// [3]: http://www.gnu.org/software/bison/manual/html_node/Precedence-Only.html#Precedence-Only +package main + +import ( + "bufio" + "bytes" + "flag" + "fmt" + "go/format" + "go/scanner" + "go/token" + "io" + "io/ioutil" + "log" + "os" + "runtime" + "sort" + "strings" + + "github.com/cznic/mathutil" + parser "github.com/cznic/parser/yacc" + "github.com/cznic/sortutil" + "github.com/cznic/strutil" + "github.com/cznic/y" +) + +var ( + //oNoDefault = flag.Bool("nodefault", false, "disable generating $default actions") + oClosures = flag.Bool("c", false, "report state closures") + oReducible = flag.Bool("cr", false, "check all states are reducible") + oDlval = flag.String("dlval", "lval", "debug value (runtime yyDebug >= 3)") + oDlvalf = flag.String("dlvalf", "%+v", "debug format of -dlval (runtime yyDebug >= 3)") + oLA = flag.Bool("la", false, "report all lookahead sets") + oNoLines = flag.Bool("l", false, "disable line directives (for compatibility ony - ignored)") + oOut = flag.String("o", "y.go", "parser output") + oPref = flag.String("p", "yy", "name prefix to use in generated code") + oReport = flag.String("v", "y.output", "create grammar report") + oResolved = flag.Bool("ex", false, "explain how were conflicts resolved") + oXErrors = flag.String("xe", "", "generate eXtra errors from examples source file") + oXErrorsGen = flag.String("xegen", "", "generate error from examples source file automatically from the grammar") +) + +func main() { + log.SetFlags(0) + + defer func() { + _, file, line, ok := runtime.Caller(2) + if e := recover(); e != nil { + switch { + case ok: + log.Fatalf("%s:%d: panic: %v", file, line, e) + default: + log.Fatalf("panic: %v", e) + } + } + }() + + flag.Parse() + var in string + switch flag.NArg() { + case 0: + in = os.Stdin.Name() + case 1: + in = flag.Arg(0) + default: + log.Fatal("expected at most one non flag argument") + } + + if err := main1(in); err != nil { + switch x := err.(type) { + case scanner.ErrorList: + for _, v := range x { + fmt.Fprintf(os.Stderr, "%v\n", v) + } + os.Exit(1) + default: + log.Fatal(err) + } + } +} + +type symUsed struct { + sym *y.Symbol + used int +} + +type symsUsed []symUsed + +func (s symsUsed) Len() int { return len(s) } +func (s symsUsed) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func (s symsUsed) Less(i, j int) bool { + if s[i].used > s[j].used { + return true + } + + if s[i].used < s[j].used { + return false + } + + caseFoldedCompare := strings.Compare(strings.ToLower(s[i].sym.Name), strings.ToLower(s[j].sym.Name)) + if caseFoldedCompare < 0 { + return true + } + if caseFoldedCompare > 0 { + return false + } + + return s[i].sym.Name < s[j].sym.Name +} + +func main1(in string) (err error) { + var out io.Writer + if nm := *oOut; nm != "" { + var f *os.File + var e error + if f, err = os.Create(nm); err != nil { + return err + } + + defer func() { + if e1 := f.Close(); e1 != nil && err == nil { + err = e1 + } + }() + w := bufio.NewWriter(f) + defer func() { + if e1 := w.Flush(); e1 != nil && err == nil { + err = e1 + } + }() + buf := bytes.NewBuffer(nil) + out = buf + defer func() { + var dest []byte + if dest, e = format.Source(buf.Bytes()); e != nil { + dest = buf.Bytes() + } + + if _, e = w.Write(dest); e != nil && err == nil { + err = e + } + }() + } + + var rep io.Writer + if nm := *oReport; nm != "" { + f, err1 := os.Create(nm) + if err1 != nil { + return err1 + } + + defer func() { + if e := f.Close(); e != nil && err == nil { + err = e + } + }() + w := bufio.NewWriter(f) + defer func() { + if e := w.Flush(); e != nil && err == nil { + err = e + } + }() + rep = w + } + + var xerrors []byte + if nm := *oXErrors; nm != "" { + b, err1 := ioutil.ReadFile(nm) + if err1 != nil { + return err1 + } + + xerrors = b + } + + p, err := y.ProcessFile(token.NewFileSet(), in, &y.Options{ + //NoDefault: *oNoDefault, + AllowConflicts: true, + Closures: *oClosures, + LA: *oLA, + Reducible: *oReducible, + Report: rep, + Resolved: *oResolved, + XErrorsName: *oXErrors, + XErrorsSrc: xerrors, + }) + if err != nil { + return err + } + + if fn := *oXErrorsGen; fn != "" { + f, err := os.OpenFile(fn, os.O_RDWR|os.O_CREATE, 0666) + if err != nil { + return err + } + + b := bufio.NewWriter(f) + if err := p.SkeletonXErrors(b); err != nil { + return err + } + + if err := b.Flush(); err != nil { + return err + } + + if err := f.Close(); err != nil { + return err + } + } + + msu := make(map[*y.Symbol]int, len(p.Syms)) // sym -> usage + for nm, sym := range p.Syms { + if nm == "" || nm == "ε" || nm == "$accept" || nm == "#" { + continue + } + + msu[sym] = 0 + } + var minArg, maxArg int + for _, state := range p.Table { + for _, act := range state { + msu[act.Sym]++ + k, arg := act.Kind() + if k == 'a' { + continue + } + + if k == 'r' { + arg = -arg + } + minArg, maxArg = mathutil.Min(minArg, arg), mathutil.Max(maxArg, arg) + } + } + su := make(symsUsed, 0, len(msu)) + for sym, used := range msu { + su = append(su, symUsed{sym, used}) + } + sort.Sort(su) + + // ----------------------------------------------------------- Prologue + f := strutil.IndentFormatter(out, "\t") + mustFormat(f, "// CAUTION: Generated file - DO NOT EDIT.\n\n") + mustFormat(f, "%s", injectImport(p.Prologue)) + mustFormat(f, ` +type %[1]sSymType %i%s%u + +type %[1]sXError struct { + state, xsym int +} +`, *oPref, p.UnionSrc) + + // ---------------------------------------------------------- Constants + nsyms := map[string]*y.Symbol{} + a := make([]string, 0, len(msu)) + maxTokName := 0 + for sym := range msu { + nm := sym.Name + if nm == "$default" || nm == "$end" || sym.IsTerminal && nm[0] != '\'' && sym.Value > 0 { + maxTokName = mathutil.Max(maxTokName, len(nm)) + a = append(a, nm) + } + nsyms[nm] = sym + } + sort.Strings(a) + mustFormat(f, "\nconst (%i\n") + for _, v := range a { + nm := v + switch nm { + case "error": + nm = *oPref + "ErrCode" + case "$default": + nm = *oPref + "Default" + case "$end": + nm = *oPref + "EOFCode" + } + mustFormat(f, "%s%s = %d\n", nm, strings.Repeat(" ", maxTokName-len(nm)+1), nsyms[v].Value) + } + minArg-- // eg: [-13, 42], minArg -14 maps -13 to 1 so zero cell values -> empty. + mustFormat(f, "\n%sMaxDepth = 200\n", *oPref) + mustFormat(f, "%sTabOfs = %d\n", *oPref, minArg) + mustFormat(f, "%u)") + + // ---------------------------------------------------------- Variables + mustFormat(f, "\n\nvar (%i\n") + + // Lex translation table + mustFormat(f, "%sXLAT = map[int]int{%i\n", *oPref) + xlat := make(map[int]int, len(su)) + var errSym int + for i, v := range su { + if v.sym.Name == "error" { + errSym = i + } + xlat[v.sym.Value] = i + mustFormat(f, "%6d: %3d, // %s (%dx)\n", v.sym.Value, i, v.sym.Name, msu[v.sym]) + } + mustFormat(f, "%u}\n") + + // Symbol names + mustFormat(f, "\n%sSymNames = []string{%i\n", *oPref) + for _, v := range su { + mustFormat(f, "%q,\n", v.sym.Name) + } + mustFormat(f, "%u}\n") + + // Reduction table + mustFormat(f, "\n%sReductions = []struct{xsym, components int}{%i\n", *oPref) + for _, rule := range p.Rules { + mustFormat(f, "{%d, %d},\n", xlat[rule.Sym.Value], len(rule.Components)) + } + mustFormat(f, "%u}\n") + + // XError table + mustFormat(f, "\n%[1]sXErrors = map[%[1]sXError]string{%i\n", *oPref) + for _, xerr := range p.XErrors { + state := xerr.Stack[len(xerr.Stack)-1] + xsym := -1 + if xerr.Lookahead != nil { + xsym = xlat[xerr.Lookahead.Value] + } + mustFormat(f, "%[1]sXError{%d, %d}: \"%s\",\n", *oPref, state, xsym, xerr.Msg) + } + mustFormat(f, "%u}\n\n") + + // Parse table + tbits := 32 + switch n := mathutil.BitLen(maxArg - minArg + 1); { + case n < 8: + tbits = 8 + case n < 16: + tbits = 16 + } + mustFormat(f, "%sParseTab = [%d][]uint%d{%i\n", *oPref, len(p.Table), tbits) + nCells := 0 + var tabRow sortutil.Uint64Slice + for si, state := range p.Table { + tabRow = tabRow[:0] + max := 0 + for _, act := range state { + sym := act.Sym + xsym, ok := xlat[sym.Value] + if !ok { + panic("internal error 001") + } + + max = mathutil.Max(max, xsym) + kind, arg := act.Kind() + switch kind { + case 'a': + arg = 0 + case 'r': + arg *= -1 + } + tabRow = append(tabRow, uint64(xsym)<<32|uint64(arg-minArg)) + } + nCells += max + tabRow.Sort() + col := -1 + if si%5 == 0 { + mustFormat(f, "// %d\n", si) + } + mustFormat(f, "{") + for i, v := range tabRow { + xsym := int(uint32(v >> 32)) + arg := int(uint32(v)) + if col+1 != xsym { + mustFormat(f, "%d: ", xsym) + } + switch { + case i == len(tabRow)-1: + mustFormat(f, "%d", arg) + default: + mustFormat(f, "%d, ", arg) + } + col = xsym + } + mustFormat(f, "},\n") + } + mustFormat(f, "%u}\n") + fmt.Fprintf(os.Stderr, "Parse table entries: %d of %d, x %d bits == %d bytes\n", nCells, len(p.Table)*len(msu), tbits, nCells*tbits/8) + if n := p.ConflictsSR; n != 0 { + fmt.Fprintf(os.Stderr, "conflicts: %d shift/reduce\n", n) + } + if n := p.ConflictsRR; n != 0 { + fmt.Fprintf(os.Stderr, "conflicts: %d reduce/reduce\n", n) + } + + mustFormat(f, `%u) + +var %[1]sDebug = 0 + +type %[1]sLexer interface { + Lex(lval *%[1]sSymType) int + Errorf(format string, a ...interface{}) error + AppendError(err error) + Errors() (warns []error, errs []error) +} + +type %[1]sLexerEx interface { + %[1]sLexer + Reduced(rule, state int, lval *%[1]sSymType) bool +} + +func %[1]sSymName(c int) (s string) { + x, ok := %[1]sXLAT[c] + if ok { + return %[1]sSymNames[x] + } + + return __yyfmt__.Sprintf("%%d", c) +} + +func %[1]slex1(yylex %[1]sLexer, lval *%[1]sSymType) (n int) { + n = yylex.Lex(lval) + if n <= 0 { + n = %[1]sEOFCode + } + if %[1]sDebug >= 3 { + __yyfmt__.Printf("\nlex %%s(%%#x %%d), %[4]s: %[3]s\n", %[1]sSymName(n), n, n, %[4]s) + } + return n +} + +func %[1]sParse(yylex %[1]sLexer, parser *Parser) int { + const yyError = %[2]d + + yyEx, _ := yylex.(%[1]sLexerEx) + var yyn int + parser.yylval = %[1]sSymType{} + yyS := parser.cache + + Nerrs := 0 /* number of errors */ + Errflag := 0 /* error recovery flag */ + yyerrok := func() { + if %[1]sDebug >= 2 { + __yyfmt__.Printf("yyerrok()\n") + } + Errflag = 0 + } + _ = yyerrok + yystate := 0 + yychar := -1 + var yyxchar int + var yyshift int + yyp := -1 + goto yystack + +ret0: + return 0 + +ret1: + return 1 + +yystack: + /* put a state and value onto the stack */ + yyp++ + if yyp+1 >= len(yyS) { + nyys := make([]%[1]sSymType, len(yyS)*2) + copy(nyys, yyS) + yyS = nyys + parser.cache = yyS + } + parser.yyVAL = &yyS[yyp+1] + yyS[yyp].yys = yystate + +yynewstate: + if yychar < 0 { + yychar = %[1]slex1(yylex, &parser.yylval) + var ok bool + if yyxchar, ok = %[1]sXLAT[yychar]; !ok { + yyxchar = len(%[1]sSymNames) // > tab width + } + } + if %[1]sDebug >= 4 { + var a []int + for _, v := range yyS[:yyp+1] { + a = append(a, v.yys) + } + __yyfmt__.Printf("state stack %%v\n", a) + } + row := %[1]sParseTab[yystate] + yyn = 0 + if yyxchar < len(row) { + if yyn = int(row[yyxchar]); yyn != 0 { + yyn += %[1]sTabOfs + } + } + switch { + case yyn > 0: // shift + yychar = -1 + *parser.yyVAL = parser.yylval + yystate = yyn + yyshift = yyn + if %[1]sDebug >= 2 { + __yyfmt__.Printf("shift, and goto state %%d\n", yystate) + } + if Errflag > 0 { + Errflag-- + } + goto yystack + case yyn < 0: // reduce + case yystate == 1: // accept + if %[1]sDebug >= 2 { + __yyfmt__.Println("accept") + } + goto ret0 + } + + if yyn == 0 { + /* error ... attempt to resume parsing */ + switch Errflag { + case 0: /* brand new error */ + if %[1]sDebug >= 1 { + __yyfmt__.Printf("no action for %%s in state %%d\n", %[1]sSymName(yychar), yystate) + } + msg, ok := %[1]sXErrors[%[1]sXError{yystate, yyxchar}] + if !ok { + msg, ok = %[1]sXErrors[%[1]sXError{yystate, -1}] + } + if !ok && yyshift != 0 { + msg, ok = %[1]sXErrors[%[1]sXError{yyshift, yyxchar}] + } + if !ok { + msg, ok = %[1]sXErrors[%[1]sXError{yyshift, -1}] + } + if !ok || msg == "" { + msg = "syntax error" + } + // ignore goyacc error message + yylex.AppendError(yylex.Errorf("")) + Nerrs++ + fallthrough + + case 1, 2: /* incompletely recovered error ... try again */ + Errflag = 3 + + /* find a state where "error" is a legal shift action */ + for yyp >= 0 { + row := %[1]sParseTab[yyS[yyp].yys] + if yyError < len(row) { + yyn = int(row[yyError])+%[1]sTabOfs + if yyn > 0 { // hit + if %[1]sDebug >= 2 { + __yyfmt__.Printf("error recovery found error shift in state %%d\n", yyS[yyp].yys) + } + yystate = yyn /* simulate a shift of "error" */ + goto yystack + } + } + + /* the current p has no shift on "error", pop stack */ + if %[1]sDebug >= 2 { + __yyfmt__.Printf("error recovery pops state %%d\n", yyS[yyp].yys) + } + yyp-- + } + /* there is no state on the stack with an error shift ... abort */ + if %[1]sDebug >= 2 { + __yyfmt__.Printf("error recovery failed\n") + } + goto ret1 + + case 3: /* no shift yet; clobber input char */ + if %[1]sDebug >= 2 { + __yyfmt__.Printf("error recovery discards %%s\n", %[1]sSymName(yychar)) + } + if yychar == %[1]sEOFCode { + goto ret1 + } + + yychar = -1 + goto yynewstate /* try again in the same state */ + } + } + + r := -yyn + x0 := %[1]sReductions[r] + x, n := x0.xsym, x0.components + yypt := yyp + _ = yypt // guard against "declared and not used" + + yyp -= n + if yyp+1 >= len(yyS) { + nyys := make([]%[1]sSymType, len(yyS)*2) + copy(nyys, yyS) + yyS = nyys + parser.cache = yyS + } + parser.yyVAL = &yyS[yyp+1] + + /* consult goto table to find next state */ + exState := yystate + yystate = int(%[1]sParseTab[yyS[yyp].yys][x])+%[1]sTabOfs + /* reduction by production r */ + if %[1]sDebug >= 2 { + __yyfmt__.Printf("reduce using rule %%v (%%s), and goto state %%d\n", r, %[1]sSymNames[x], yystate) + } + + switch r {%i +`, + *oPref, errSym, *oDlvalf, *oDlval) + for r, rule := range p.Rules { + if rule.Action == nil { + continue + } + + action := rule.Action.Values + if len(action) == 0 { + continue + } + + if len(action) == 1 { + part := action[0] + if part.Type == parser.ActionValueGo { + src := part.Src + src = src[1 : len(src)-1] // Remove lead '{' and trail '}' + if strings.TrimSpace(src) == "" { + continue + } + } + } + + components := rule.Components + typ := rule.Sym.Type + max := len(components) + if p1 := rule.Parent; p1 != nil { + max = rule.MaxParentDlr + components = p1.Components + } + mustFormat(f, "case %d: ", r) + for _, part := range action { + num := part.Num + switch part.Type { + case parser.ActionValueGo: + mustFormat(f, "%s", part.Src) + case parser.ActionValueDlrDlr: + mustFormat(f, "parser.yyVAL.%s", typ) + if typ == "" { + panic("internal error 002") + } + case parser.ActionValueDlrNum: + typ := p.Syms[components[num-1]].Type + if typ == "" { + panic("internal error 003") + } + mustFormat(f, "yyS[yypt-%d].%s", max-num, typ) + case parser.ActionValueDlrTagDlr: + mustFormat(f, "parser.yyVAL.%s", part.Tag) + case parser.ActionValueDlrTagNum: + mustFormat(f, "yyS[yypt-%d].%s", max-num, part.Tag) + } + } + mustFormat(f, "\n") + } + mustFormat(f, `%u + } + + if yyEx != nil && yyEx.Reduced(r, exState, parser.yyVAL) { + return -1 + } + goto yystack /* stack new state and value */ +} + +%[2]s +`, *oPref, p.Tail) + _ = oNoLines //TODO Ignored for now + return nil +} + +func injectImport(src string) string { + const inj = ` + +import __yyfmt__ "fmt" +` + fset := token.NewFileSet() + file := fset.AddFile("", -1, len(src)) + var s scanner.Scanner + s.Init( + file, + []byte(src), + nil, + scanner.ScanComments, + ) + for { + switch _, tok, _ := s.Scan(); tok { + case token.EOF: + return inj + src + case token.PACKAGE: + s.Scan() // ident + pos, _, _ := s.Scan() + ofs := file.Offset(pos) + return src[:ofs] + inj + src[ofs:] + } + } +} + +func mustFormat(f strutil.Formatter, format string, args ...interface{}) { + _, err := f.Format(format, args...) + if err != nil { + log.Fatalf("format error %v", err) + } +} diff --git a/parser/lexer.go b/parser/lexer.go new file mode 100644 index 0000000..b30ffb8 --- /dev/null +++ b/parser/lexer.go @@ -0,0 +1,873 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + "bytes" + "fmt" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "github.com/pingcap/tidb/parser/mysql" +) + +var _ = yyLexer(&Scanner{}) + +// Pos represents the position of a token. +type Pos struct { + Line int + Col int + Offset int +} + +// Scanner implements the yyLexer interface. +type Scanner struct { + r reader + buf bytes.Buffer + + errs []error + warns []error + stmtStartPos int + + // For scanning such kind of comment: /*! MySQL-specific code */ or /*+ optimizer hint */ + specialComment specialCommentScanner + + sqlMode mysql.SQLMode + + // lastScanOffset indicates last offset returned by scan(). + // It's used to substring sql in syntax error message. + lastScanOffset int +} + +type specialCommentScanner interface { + stmtTexter + scan() (tok int, pos Pos, lit string) +} + +type mysqlSpecificCodeScanner struct { + *Scanner + Pos +} + +func (s *mysqlSpecificCodeScanner) scan() (tok int, pos Pos, lit string) { + tok, pos, lit = s.Scanner.scan() + pos.Line += s.Pos.Line + pos.Col += s.Pos.Col + pos.Offset += s.Pos.Offset + return +} + +type optimizerHintScanner struct { + *Scanner + Pos + end bool +} + +func (s *optimizerHintScanner) scan() (tok int, pos Pos, lit string) { + tok, pos, lit = s.Scanner.scan() + pos.Line += s.Pos.Line + pos.Col += s.Pos.Col + pos.Offset += s.Pos.Offset + switch tok { + case 0: + if !s.end { + tok = hintEnd + s.end = true + } + case invalid: + // an optimizer hint is allowed to contain invalid characters, the + // remaining hints are just ignored. + // force advance the lexer even when encountering an invalid character + // to prevent infinite parser loop. (see issue #336) + s.r.inc() + } + return +} + +// Errors returns the errors and warns during a scan. +func (s *Scanner) Errors() (warns []error, errs []error) { + return s.warns, s.errs +} + +// reset resets the sql string to be scanned. +func (s *Scanner) reset(sql string) { + s.r = reader{s: sql, p: Pos{Line: 1}} + s.buf.Reset() + s.errs = s.errs[:0] + s.warns = s.warns[:0] + s.stmtStartPos = 0 + s.specialComment = nil +} + +func (s *Scanner) stmtText() string { + if s.specialComment != nil { + return s.specialComment.stmtText() + } + + endPos := s.r.pos().Offset + if s.r.s[endPos-1] == '\n' { + endPos = endPos - 1 // trim new line + } + if s.r.s[s.stmtStartPos] == '\n' { + s.stmtStartPos++ + } + + text := s.r.s[s.stmtStartPos:endPos] + + s.stmtStartPos = endPos + return text +} + +// Errorf tells scanner something is wrong. +// Scanner satisfies yyLexer interface which need this function. +func (s *Scanner) Errorf(format string, a ...interface{}) (err error) { + str := fmt.Sprintf(format, a...) + val := s.r.s[s.lastScanOffset:] + var lenStr = "" + if len(val) > 2048 { + lenStr = "(total length " + strconv.Itoa(len(val)) + ")" + val = val[:2048] + } + err = fmt.Errorf("line %d column %d near \"%s\"%s %s", + s.r.p.Line, s.r.p.Col, val, str, lenStr) + return +} + +// AppendError sets error into scanner. +// Scanner satisfies yyLexer interface which need this function. +func (s *Scanner) AppendError(err error) { + if err == nil { + return + } + s.errs = append(s.errs, err) +} + +// Lex returns a token and store the token value in v. +// Scanner satisfies yyLexer interface. +// 0 and invalid are special token id this function would return: +// return 0 tells parser that scanner meets EOF, +// return invalid tells parser that scanner meets illegal character. +func (s *Scanner) Lex(v *yySymType) int { + tok, pos, lit := s.scan() + s.lastScanOffset = pos.Offset + v.offset = pos.Offset + v.ident = lit + if tok == identifier { + tok = handleIdent(v) + } + if tok == identifier { + if tok1 := s.isTokenIdentifier(lit, pos.Offset); tok1 != 0 { + tok = tok1 + } + } + if s.sqlMode.HasANSIQuotesMode() && + tok == stringLit && + s.r.s[v.offset] == '"' { + tok = identifier + } + + if tok == pipes && !(s.sqlMode.HasPipesAsConcatMode()) { + return pipesAsOr + } + + if tok == not && s.sqlMode.HasHighNotPrecedenceMode() { + return not2 + } + + switch tok { + case intLit: + return toInt(s, v, lit) + case floatLit: + return toFloat(s, v, lit) + case decLit: + return toDecimal(s, v, lit) + case hexLit: + return toHex(s, v, lit) + case bitLit: + return toBit(s, v, lit) + case singleAtIdentifier, doubleAtIdentifier, cast, extract: + v.item = lit + return tok + case null: + v.item = nil + case quotedIdentifier: + tok = identifier + } + + if tok == unicode.ReplacementChar { + return invalid + } + + return tok +} + +// SetSQLMode sets the SQL mode for scanner. +func (s *Scanner) SetSQLMode(mode mysql.SQLMode) { + s.sqlMode = mode +} + +// GetSQLMode return the SQL mode of scanner. +func (s *Scanner) GetSQLMode() mysql.SQLMode { + return s.sqlMode +} + +// InheritScanner returns a new scanner object which inherits configurations from the parent scanner. +func (s *Scanner) InheritScanner(sql string) *Scanner { + return &Scanner{ + r: reader{s: sql}, + sqlMode: s.sqlMode, + } +} + +// NewScanner returns a new scanner object. +func NewScanner(s string) *Scanner { + return &Scanner{r: reader{s: s}} +} + +func (s *Scanner) skipWhitespace() rune { + return s.r.incAsLongAs(unicode.IsSpace) +} + +func (s *Scanner) scan() (tok int, pos Pos, lit string) { + if s.specialComment != nil { + // Enter specialComment scan mode. + // for scanning such kind of comment: /*! MySQL-specific code */ + specialComment := s.specialComment + tok, pos, lit = specialComment.scan() + if tok != 0 { + // return the specialComment scan result as the result + return + } + // leave specialComment scan mode after all stream consumed. + s.specialComment = nil + } + + ch0 := s.r.peek() + if unicode.IsSpace(ch0) { + ch0 = s.skipWhitespace() + } + pos = s.r.pos() + if s.r.eof() { + // when scanner meets EOF, the returned token should be 0, + // because 0 is a special token id to remind the parser that stream is end. + return 0, pos, "" + } + + if !s.r.eof() && isIdentExtend(ch0) { + return scanIdentifier(s) + } + + // search a trie to get a token. + node := &ruleTable + for ch0 >= 0 && ch0 <= 255 { + if node.childs[ch0] == nil || s.r.eof() { + break + } + node = node.childs[ch0] + if node.fn != nil { + return node.fn(s) + } + s.r.inc() + ch0 = s.r.peek() + } + + tok, lit = node.token, s.r.data(&pos) + return +} + +func startWithXx(s *Scanner) (tok int, pos Pos, lit string) { + pos = s.r.pos() + s.r.inc() + if s.r.peek() == '\'' { + s.r.inc() + s.scanHex() + if s.r.peek() == '\'' { + s.r.inc() + tok, lit = hexLit, s.r.data(&pos) + } else { + tok = unicode.ReplacementChar + } + return + } + s.r.incAsLongAs(isIdentChar) + tok, lit = identifier, s.r.data(&pos) + return +} + +func startWithNn(s *Scanner) (tok int, pos Pos, lit string) { + tok, pos, lit = scanIdentifier(s) + // The National Character Set, N'some text' or n'some test'. + // See https://dev.mysql.com/doc/refman/5.7/en/string-literals.html + // and https://dev.mysql.com/doc/refman/5.7/en/charset-national.html + if lit == "N" || lit == "n" { + if s.r.peek() == '\'' { + tok = underscoreCS + lit = "utf8" + } + } + return +} + +func startWithBb(s *Scanner) (tok int, pos Pos, lit string) { + pos = s.r.pos() + s.r.inc() + if s.r.peek() == '\'' { + s.r.inc() + s.scanBit() + if s.r.peek() == '\'' { + s.r.inc() + tok, lit = bitLit, s.r.data(&pos) + } else { + tok = unicode.ReplacementChar + } + return + } + s.r.incAsLongAs(isIdentChar) + tok, lit = identifier, s.r.data(&pos) + return +} + +func startWithSharp(s *Scanner) (tok int, pos Pos, lit string) { + s.r.incAsLongAs(func(ch rune) bool { + return ch != '\n' + }) + return s.scan() +} + +func startWithDash(s *Scanner) (tok int, pos Pos, lit string) { + pos = s.r.pos() + if strings.HasPrefix(s.r.s[pos.Offset:], "--") { + remainLen := len(s.r.s[pos.Offset:]) + if remainLen == 2 || (remainLen > 2 && unicode.IsSpace(rune(s.r.s[pos.Offset+2]))) { + s.r.incAsLongAs(func(ch rune) bool { + return ch != '\n' + }) + return s.scan() + } + } + if strings.HasPrefix(s.r.s[pos.Offset:], "->>") { + tok = juss + s.r.incN(3) + return + } + if strings.HasPrefix(s.r.s[pos.Offset:], "->") { + tok = jss + s.r.incN(2) + return + } + tok = int('-') + lit = "-" + s.r.inc() + return +} + +func startWithSlash(s *Scanner) (tok int, pos Pos, lit string) { + pos = s.r.pos() + s.r.inc() + ch0 := s.r.peek() + if ch0 == '*' { + s.r.inc() + startWithAsterisk := false + for { + ch0 = s.r.readByte() + if startWithAsterisk && ch0 == '/' { + // Meets */, means comment end. + break + } else if ch0 == '*' { + startWithAsterisk = true + } else { + startWithAsterisk = false + } + + if ch0 == unicode.ReplacementChar && s.r.eof() { + // unclosed comment + s.errs = append(s.errs, ParseErrorWith(s.r.data(&pos), s.r.p.Line)) + return + } + + } + + comment := s.r.data(&pos) + + // See https://dev.mysql.com/doc/refman/5.7/en/optimizer-hints.html + if strings.HasPrefix(comment, "/*+") { + begin := sqlOffsetInComment(comment) + end := len(comment) - 2 + sql := comment[begin:end] + s.specialComment = &optimizerHintScanner{ + Scanner: s.InheritScanner(sql), + Pos: Pos{ + pos.Line, + pos.Col, + pos.Offset + begin, + }, + } + + tok = hintBegin + return + } + + // See http://dev.mysql.com/doc/refman/5.7/en/comments.html + // Convert "/*!VersionNumber MySQL-specific-code */" to "MySQL-specific-code". + if strings.HasPrefix(comment, "/*!") { + sql := specCodePattern.ReplaceAllStringFunc(comment, TrimComment) + s.specialComment = &mysqlSpecificCodeScanner{ + Scanner: s.InheritScanner(sql), + Pos: Pos{ + pos.Line, + pos.Col, + pos.Offset + sqlOffsetInComment(comment), + }, + } + } + + if strings.HasPrefix(comment, "/*T!") { + commentVersion := extractVersionCodeInComment(comment) + if commentVersion != CommentCodeNoVersion && commentVersion <= CommentCodeCurrentVersion { + sql := SpecVersionCodePattern.ReplaceAllStringFunc(comment, TrimCodeVersionComment) + s.specialComment = &mysqlSpecificCodeScanner{ + Scanner: s.InheritScanner(sql), + Pos: Pos{ + pos.Line, + pos.Col, + pos.Offset + sqlOffsetInComment(comment), + }, + } + } + } + + return s.scan() + } + tok = int('/') + return +} + +func sqlOffsetInComment(comment string) int { + // find the first SQL token offset in pattern like "/*!40101 mysql specific code */" + offset := 0 + for i := 0; i < len(comment); i++ { + if unicode.IsSpace(rune(comment[i])) { + offset = i + break + } + } + for offset < len(comment) { + offset++ + if !unicode.IsSpace(rune(comment[offset])) { + break + } + } + return offset +} + +func startWithAt(s *Scanner) (tok int, pos Pos, lit string) { + pos = s.r.pos() + s.r.inc() + + tok, lit = scanIdentifierOrString(s) + switch tok { + case '@': + s.r.inc() + stream := s.r.s[pos.Offset+2:] + var prefix string + for _, v := range []string{"global.", "session.", "local."} { + if len(v) > len(stream) { + continue + } + if strings.EqualFold(stream[:len(v)], v) { + prefix = v + s.r.incN(len(v)) + break + } + } + tok, lit = scanIdentifierOrString(s) + switch tok { + case stringLit, quotedIdentifier: + tok, lit = doubleAtIdentifier, "@@"+prefix+lit + case identifier: + tok, lit = doubleAtIdentifier, s.r.data(&pos) + } + case unicode.ReplacementChar: + break + default: + tok = singleAtIdentifier + } + + return +} + +func scanIdentifier(s *Scanner) (int, Pos, string) { + pos := s.r.pos() + s.r.inc() + s.r.incAsLongAs(isIdentChar) + return identifier, pos, s.r.data(&pos) +} + +func scanIdentifierOrString(s *Scanner) (tok int, lit string) { + ch1 := s.r.peek() + switch ch1 { + case '\'', '"': + tok, _, lit = startString(s) + case '`': + tok, _, lit = scanQuotedIdent(s) + default: + if isUserVarChar(ch1) { + pos := s.r.pos() + s.r.incAsLongAs(isUserVarChar) + tok, lit = identifier, s.r.data(&pos) + } else { + tok = int(ch1) + } + } + return +} + +var ( + quotedIdentifier = -identifier +) + +func scanQuotedIdent(s *Scanner) (tok int, pos Pos, lit string) { + pos = s.r.pos() + s.r.inc() + s.buf.Reset() + for { + ch := s.r.readByte() + if ch == unicode.ReplacementChar && s.r.eof() { + tok = unicode.ReplacementChar + return + } + if ch == '`' { + if s.r.peek() != '`' { + // don't return identifier in case that it's interpreted as keyword token later. + tok, lit = quotedIdentifier, s.buf.String() + return + } + s.r.inc() + } + s.buf.WriteRune(ch) + } +} + +func startString(s *Scanner) (tok int, pos Pos, lit string) { + return s.scanString() +} + +// lazyBuf is used to avoid allocation if possible. +// it has a useBuf field indicates whether bytes.Buffer is necessary. if +// useBuf is false, we can avoid calling bytes.Buffer.String(), which +// make a copy of data and cause allocation. +type lazyBuf struct { + useBuf bool + r *reader + b *bytes.Buffer + p *Pos +} + +func (mb *lazyBuf) setUseBuf(str string) { + if !mb.useBuf { + mb.useBuf = true + mb.b.Reset() + mb.b.WriteString(str) + } +} + +func (mb *lazyBuf) writeRune(r rune, w int) { + if mb.useBuf { + if w > 1 { + mb.b.WriteRune(r) + } else { + mb.b.WriteByte(byte(r)) + } + } +} + +func (mb *lazyBuf) data() string { + var lit string + if mb.useBuf { + lit = mb.b.String() + } else { + lit = mb.r.data(mb.p) + lit = lit[1 : len(lit)-1] + } + return lit +} + +func (s *Scanner) scanString() (tok int, pos Pos, lit string) { + tok, pos = stringLit, s.r.pos() + mb := lazyBuf{false, &s.r, &s.buf, &pos} + ending := s.r.readByte() + ch0 := s.r.peek() + for !s.r.eof() { + if ch0 == ending { + s.r.inc() + if s.r.peek() != ending { + lit = mb.data() + return + } + str := mb.r.data(&pos) + mb.setUseBuf(str[1 : len(str)-1]) + } else if ch0 == '\\' && !s.sqlMode.HasNoBackslashEscapesMode() { + mb.setUseBuf(mb.r.data(&pos)[1:]) + ch0 = handleEscape(s) + } + mb.writeRune(ch0, s.r.w) + if !s.r.eof() { + s.r.inc() + ch0 = s.r.peek() + } + } + + tok = unicode.ReplacementChar + return +} + +// handleEscape handles the case in scanString when previous char is '\'. +func handleEscape(s *Scanner) rune { + s.r.inc() + ch0 := s.r.peek() + /* + \" \' \\ \n \0 \b \Z \r \t ==> escape to one char + \% \_ ==> preserve both char + other ==> remove \ + */ + switch ch0 { + case 'n': + ch0 = '\n' + case '0': + ch0 = 0 + case 'b': + ch0 = 8 + case 'Z': + ch0 = 26 + case 'r': + ch0 = '\r' + case 't': + ch0 = '\t' + case '%', '_': + s.buf.WriteByte('\\') + } + return ch0 +} + +func startWithNumber(s *Scanner) (tok int, pos Pos, lit string) { + pos = s.r.pos() + tok = intLit + ch0 := s.r.readByte() + if ch0 == '0' { + tok = intLit + ch1 := s.r.peek() + switch { + case ch1 >= '0' && ch1 <= '7': + s.r.inc() + s.scanOct() + case ch1 == 'x' || ch1 == 'X': + s.r.inc() + p1 := s.r.pos() + s.scanHex() + p2 := s.r.pos() + // 0x, 0x7fz3 are identifier + if p1 == p2 || isDigit(s.r.peek()) { + s.r.incAsLongAs(isIdentChar) + return identifier, pos, s.r.data(&pos) + } + tok = hexLit + case ch1 == 'b': + s.r.inc() + p1 := s.r.pos() + s.scanBit() + p2 := s.r.pos() + // 0b, 0b123, 0b1ab are identifier + if p1 == p2 || isDigit(s.r.peek()) { + s.r.incAsLongAs(isIdentChar) + return identifier, pos, s.r.data(&pos) + } + tok = bitLit + case ch1 == '.': + return s.scanFloat(&pos) + case ch1 == 'B': + s.r.incAsLongAs(isIdentChar) + return identifier, pos, s.r.data(&pos) + } + } + + s.scanDigits() + ch0 = s.r.peek() + if ch0 == '.' || ch0 == 'e' || ch0 == 'E' { + return s.scanFloat(&pos) + } + + // Identifiers may begin with a digit but unless quoted may not consist solely of digits. + if !s.r.eof() && isIdentChar(ch0) { + s.r.incAsLongAs(isIdentChar) + return identifier, pos, s.r.data(&pos) + } + lit = s.r.data(&pos) + return +} + +func startWithDot(s *Scanner) (tok int, pos Pos, lit string) { + pos = s.r.pos() + s.r.inc() + save := s.r.pos() + if isDigit(s.r.peek()) { + tok, _, lit = s.scanFloat(&pos) + if s.r.eof() || !isIdentChar(s.r.peek()) { + return + } + // Fail to parse a float, reset to dot. + s.r.p = save + } + tok, lit = int('.'), "." + return +} + +func (s *Scanner) scanOct() { + s.r.incAsLongAs(func(ch rune) bool { + return ch >= '0' && ch <= '7' + }) +} + +func (s *Scanner) scanHex() { + s.r.incAsLongAs(func(ch rune) bool { + return ch >= '0' && ch <= '9' || + ch >= 'a' && ch <= 'f' || + ch >= 'A' && ch <= 'F' + }) +} + +func (s *Scanner) scanBit() { + s.r.incAsLongAs(func(ch rune) bool { + return ch == '0' || ch == '1' + }) +} + +func (s *Scanner) scanFloat(beg *Pos) (tok int, pos Pos, lit string) { + s.r.p = *beg + // float = D1 . D2 e D3 + s.scanDigits() + ch0 := s.r.peek() + if ch0 == '.' { + s.r.inc() + s.scanDigits() + ch0 = s.r.peek() + } + if ch0 == 'e' || ch0 == 'E' { + s.r.inc() + ch0 = s.r.peek() + if ch0 == '-' || ch0 == '+' || isDigit(ch0) { + s.r.inc() + s.scanDigits() + tok = floatLit + } else { + // D1 . D2 e XX when XX is not D3, parse the result to an identifier. + // 9e9e = 9e9(float) + e(identifier) + // 9est = 9est(identifier) + s.r.incAsLongAs(isIdentChar) + tok = identifier + } + } else { + tok = decLit + } + pos, lit = *beg, s.r.data(beg) + return +} + +func (s *Scanner) scanDigits() string { + pos := s.r.pos() + s.r.incAsLongAs(isDigit) + return s.r.data(&pos) +} + +type reader struct { + s string + p Pos + w int +} + +var eof = Pos{-1, -1, -1} + +func (r *reader) eof() bool { + return r.p.Offset >= len(r.s) +} + +// peek() peeks a rune from underlying reader. +// if reader meets EOF, it will return unicode.ReplacementChar. to distinguish from +// the real unicode.ReplacementChar, the caller should call r.eof() again to check. +func (r *reader) peek() rune { + if r.eof() { + return unicode.ReplacementChar + } + v, w := rune(r.s[r.p.Offset]), 1 + switch { + case v == 0: + r.w = w + return v // illegal UTF-8 encoding + case v >= 0x80: + v, w = utf8.DecodeRuneInString(r.s[r.p.Offset:]) + if v == utf8.RuneError && w == 1 { + v = rune(r.s[r.p.Offset]) // illegal UTF-8 encoding + } + } + r.w = w + return v +} + +// inc increase the position offset of the reader. +// peek must be called before calling inc! +func (r *reader) inc() { + if r.s[r.p.Offset] == '\n' { + r.p.Line++ + r.p.Col = 0 + } + r.p.Offset += r.w + r.p.Col++ +} + +func (r *reader) incN(n int) { + for i := 0; i < n; i++ { + r.inc() + } +} + +func (r *reader) readByte() (ch rune) { + ch = r.peek() + if ch == unicode.ReplacementChar && r.eof() { + return + } + r.inc() + return +} + +func (r *reader) pos() Pos { + return r.p +} + +func (r *reader) data(from *Pos) string { + return r.s[from.Offset:r.p.Offset] +} + +func (r *reader) incAsLongAs(fn func(rune) bool) rune { + for { + ch := r.peek() + if !fn(ch) { + return ch + } + if ch == unicode.ReplacementChar && r.eof() { + return 0 + } + r.inc() + } +} diff --git a/parser/lexer_test.go b/parser/lexer_test.go new file mode 100644 index 0000000..af122f1 --- /dev/null +++ b/parser/lexer_test.go @@ -0,0 +1,372 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + "fmt" + "unicode" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/mysql" +) + +var _ = Suite(&testLexerSuite{}) + +type testLexerSuite struct { +} + +func (s *testLexerSuite) TestTokenID(c *C) { + for str, tok := range tokenMap { + l := NewScanner(str) + var v yySymType + tok1 := l.Lex(&v) + c.Check(tok, Equals, tok1) + } +} + +func (s *testLexerSuite) TestSingleChar(c *C) { + table := []byte{'|', '&', '-', '+', '*', '/', '%', '^', '~', '(', ',', ')'} + for _, tok := range table { + l := NewScanner(string(tok)) + var v yySymType + tok1 := l.Lex(&v) + c.Check(int(tok), Equals, tok1) + } +} + +type testCaseItem struct { + str string + tok int +} + +func (s *testLexerSuite) TestSingleCharOther(c *C) { + table := []testCaseItem{ + {"AT", identifier}, + {"PLACEHOLDER", identifier}, + {"=", eq}, + {".", int('.')}, + } + runTest(c, table) +} + +func (s *testLexerSuite) TestAtLeadingIdentifier(c *C) { + table := []testCaseItem{ + {"@", singleAtIdentifier}, + {"@''", singleAtIdentifier}, + {"@1", singleAtIdentifier}, + {"@.1_", singleAtIdentifier}, + {"@-1.", singleAtIdentifier}, + {"@~", singleAtIdentifier}, + {"@$", singleAtIdentifier}, + {"@a_3cbbc", singleAtIdentifier}, + {"@`a_3cbbc`", singleAtIdentifier}, + {"@-3cbbc", singleAtIdentifier}, + {"@!3cbbc", singleAtIdentifier}, + {"@@global.test", doubleAtIdentifier}, + {"@@session.test", doubleAtIdentifier}, + {"@@local.test", doubleAtIdentifier}, + {"@@test", doubleAtIdentifier}, + {"@@global.`test`", doubleAtIdentifier}, + {"@@session.`test`", doubleAtIdentifier}, + {"@@local.`test`", doubleAtIdentifier}, + {"@@`test`", doubleAtIdentifier}, + } + runTest(c, table) +} + +func (s *testLexerSuite) TestUnderscoreCS(c *C) { + var v yySymType + scanner := NewScanner(`_utf8"string"`) + tok := scanner.Lex(&v) + c.Check(tok, Equals, underscoreCS) + tok = scanner.Lex(&v) + c.Check(tok, Equals, stringLit) + + scanner.reset("N'string'") + tok = scanner.Lex(&v) + c.Check(tok, Equals, underscoreCS) + tok = scanner.Lex(&v) + c.Check(tok, Equals, stringLit) +} + +func (s *testLexerSuite) TestLiteral(c *C) { + table := []testCaseItem{ + {`'''a'''`, stringLit}, + {`''a''`, stringLit}, + {`""a""`, stringLit}, + {`\'a\'`, int('\\')}, + {`\"a\"`, int('\\')}, + {"0.2314", decLit}, + {"1234567890123456789012345678901234567890", decLit}, + {"132.313", decLit}, + {"132.3e231", floatLit}, + {"132.3e-231", floatLit}, + {"001e-12", floatLit}, + {"23416", intLit}, + {"123test", identifier}, + {"123" + string(unicode.ReplacementChar) + "xxx", identifier}, + {"0", intLit}, + {"0x3c26", hexLit}, + {"x'13181C76734725455A'", hexLit}, + {"0b01", bitLit}, + {fmt.Sprintf("t1%c", 0), identifier}, + {"N'some text'", underscoreCS}, + {"n'some text'", underscoreCS}, + {"\\N", null}, + {".*", int('.')}, // `.`, `*` + {".1_t_1_x", int('.')}, // `.`, `1_t_1_x` + {"9e9e", floatLit}, // 9e9e = 9e9 + e + // Issue #3954 + {".1e23", floatLit}, // `.1e23` + {".123", decLit}, // `.123` + {".1*23", decLit}, // `.1`, `*`, `23` + {".1,23", decLit}, // `.1`, `,`, `23` + {".1 23", decLit}, // `.1`, `23` + // TODO: See #3963. The following test cases do not test the ambiguity. + {".1$23", int('.')}, // `.`, `1$23` + {".1a23", int('.')}, // `.`, `1a23` + {".1e23$23", int('.')}, // `.`, `1e23$23` + {".1e23a23", int('.')}, // `.`, `1e23a23` + {".1C23", int('.')}, // `.`, `1C23` + {".1\u0081", int('.')}, // `.`, `1\u0081` + {".1\uff34", int('.')}, // `.`, `1\uff34` + {`b''`, bitLit}, + {`b'0101'`, bitLit}, + {`0b0101`, bitLit}, + } + runTest(c, table) +} + +func runTest(c *C, table []testCaseItem) { + var val yySymType + for _, v := range table { + l := NewScanner(v.str) + tok := l.Lex(&val) + c.Check(tok, Equals, v.tok, Commentf(v.str)) + } +} + +func (s *testLexerSuite) TestComment(c *C) { + + table := []testCaseItem{ + {"-- select --\n1", intLit}, + {"/*!40101 SET character_set_client = utf8 */;", set}, + {"/*+ BKA(t1) */", hintBegin}, + {"/* SET character_set_client = utf8 */;", int(';')}, + {"/* some comments */ SELECT ", selectKwd}, + {`-- comment continues to the end of line +SELECT`, selectKwd}, + {`# comment continues to the end of line +SELECT`, selectKwd}, + {"#comment\n123", intLit}, + {"--5", int('-')}, + {"--\nSELECT", selectKwd}, + {"--\tSELECT", 0}, + {"--\r\nSELECT", selectKwd}, + {"--", 0}, + } + runTest(c, table) +} + +func (s *testLexerSuite) TestscanQuotedIdent(c *C) { + l := NewScanner("`fk`") + l.r.peek() + tok, pos, lit := scanQuotedIdent(l) + c.Assert(pos.Offset, Equals, 0) + c.Assert(tok, Equals, quotedIdentifier) + c.Assert(lit, Equals, "fk") +} + +func (s *testLexerSuite) TestscanString(c *C) { + table := []struct { + raw string + expect string + }{ + {`' \n\tTest String'`, " \n\tTest String"}, + {`'\x\B'`, "xB"}, + {`'\0\'\"\b\n\r\t\\'`, "\000'\"\b\n\r\t\\"}, + {`'\Z'`, string(26)}, + {`'\%\_'`, `\%\_`}, + {`'hello'`, "hello"}, + {`'"hello"'`, `"hello"`}, + {`'""hello""'`, `""hello""`}, + {`'hel''lo'`, "hel'lo"}, + {`'\'hello'`, "'hello"}, + {`"hello"`, "hello"}, + {`"'hello'"`, "'hello'"}, + {`"''hello''"`, "''hello''"}, + {`"hel""lo"`, `hel"lo`}, + {`"\"hello"`, `"hello`}, + {`'disappearing\ backslash'`, "disappearing backslash"}, + {"'한국의中文UTF8およびテキストトラック'", "한국의中文UTF8およびテキストトラック"}, + {"'\\a\x90'", "a\x90"}, + {`"\aèàø»"`, `aèàø»`}, + } + + for _, v := range table { + l := NewScanner(v.raw) + tok, pos, lit := l.scan() + c.Assert(tok, Equals, stringLit) + c.Assert(pos.Offset, Equals, 0) + c.Assert(lit, Equals, v.expect) + } +} + +func (s *testLexerSuite) TestIdentifier(c *C) { + replacementString := string(unicode.ReplacementChar) + "xxx" + table := [][2]string{ + {`哈哈`, "哈哈"}, + {"`numeric`", "numeric"}, + {"\r\n \r \n \tthere\t \n", "there"}, + {`5number`, `5number`}, + {"1_x", "1_x"}, + {"0_x", "0_x"}, + {replacementString, replacementString}, + {"9e", "9e"}, + {"0b", "0b"}, + {"0b123", "0b123"}, + {"0b1ab", "0b1ab"}, + {"0B01", "0B01"}, + {"0x", "0x"}, + {"0x7fz3", "0x7fz3"}, + {"023a4", "023a4"}, + {"9eTSs", "9eTSs"}, + {fmt.Sprintf("t1%cxxx", 0), "t1"}, + } + l := &Scanner{} + for _, item := range table { + l.reset(item[0]) + var v yySymType + tok := l.Lex(&v) + c.Assert(tok, Equals, identifier) + c.Assert(v.ident, Equals, item[1]) + } +} + +func (s *testLexerSuite) TestSpecialComment(c *C) { + l := NewScanner("/*!40101 select\n5*/") + tok, pos, lit := l.scan() + c.Assert(tok, Equals, identifier) + c.Assert(lit, Equals, "select") + c.Assert(pos, Equals, Pos{0, 0, 9}) + + tok, pos, lit = l.scan() + c.Assert(tok, Equals, intLit) + c.Assert(lit, Equals, "5") + c.Assert(pos, Equals, Pos{1, 1, 16}) +} + +func (s *testLexerSuite) TestOptimizerHint(c *C) { + l := NewScanner(" /*+ BKA(t1) */") + tokens := []struct { + tok int + lit string + pos int + }{ + {hintBegin, "", 2}, + {identifier, "BKA", 6}, + {int('('), "(", 9}, + {identifier, "t1", 10}, + {int(')'), ")", 12}, + {hintEnd, "", 14}, + } + for i := 0; ; i++ { + tok, pos, lit := l.scan() + if tok == 0 { + return + } + c.Assert(tok, Equals, tokens[i].tok, Commentf("%d", i)) + c.Assert(lit, Equals, tokens[i].lit, Commentf("%d", i)) + c.Assert(pos.Offset, Equals, tokens[i].pos, Commentf("%d", i)) + } +} + +func (s *testLexerSuite) TestInt(c *C) { + tests := []struct { + input string + expect uint64 + }{ + {"01000001783", 1000001783}, + {"00001783", 1783}, + {"0", 0}, + {"0000", 0}, + {"01", 1}, + {"10", 10}, + } + scanner := NewScanner("") + for _, t := range tests { + var v yySymType + scanner.reset(t.input) + tok := scanner.Lex(&v) + c.Assert(tok, Equals, intLit) + switch i := v.item.(type) { + case int64: + c.Assert(uint64(i), Equals, t.expect) + case uint64: + c.Assert(i, Equals, t.expect) + default: + c.Fail() + } + } +} + +func (s *testLexerSuite) TestSQLModeANSIQuotes(c *C) { + tests := []struct { + input string + tok int + ident string + }{ + {`"identifier"`, identifier, "identifier"}, + {"`identifier`", identifier, "identifier"}, + {`"identifier""and"`, identifier, `identifier"and`}, + {`'string''string'`, stringLit, "string'string"}, + {`"identifier"'and'`, identifier, "identifier"}, + {`'string'"identifier"`, stringLit, "string"}, + } + scanner := NewScanner("") + scanner.SetSQLMode(mysql.ModeANSIQuotes) + for _, t := range tests { + var v yySymType + scanner.reset(t.input) + tok := scanner.Lex(&v) + c.Assert(tok, Equals, t.tok) + c.Assert(v.ident, Equals, t.ident) + } + scanner.reset(`'string' 'string'`) + var v yySymType + tok := scanner.Lex(&v) + c.Assert(tok, Equals, stringLit) + c.Assert(v.ident, Equals, "string") + tok = scanner.Lex(&v) + c.Assert(tok, Equals, stringLit) + c.Assert(v.ident, Equals, "string") +} + +func (s *testLexerSuite) TestIllegal(c *C) { + table := []testCaseItem{ + {"'", invalid}, + {"'fu", invalid}, + {"'\\n", invalid}, + {"'\\", invalid}, + {fmt.Sprintf("%c", 0), invalid}, + {"`", invalid}, + {`"`, invalid}, + {"@`", invalid}, + {"@'", invalid}, + {`@"`, invalid}, + {"@@`", invalid}, + {"@@global.`", invalid}, + } + runTest(c, table) +} diff --git a/parser/misc.go b/parser/misc.go new file mode 100644 index 0000000..a6df2cb --- /dev/null +++ b/parser/misc.go @@ -0,0 +1,838 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + "fmt" + "strconv" + "strings" + + "github.com/pingcap/tidb/parser/charset" +) + +// CommentCodeVersion is used to track the highest version can be parsed in the comment with pattern /*T!00001 xxx */ +type CommentCodeVersion int + +const ( + CommentCodeNoVersion CommentCodeVersion = iota + CommentCodeAutoRandom CommentCodeVersion = 40000 + + CommentCodeCurrentVersion +) + +func (ccv CommentCodeVersion) String() string { + return fmt.Sprintf("%05d", ccv) +} + +func extractVersionCodeInComment(comment string) CommentCodeVersion { + code, err := strconv.Atoi(specVersionCodeValue.FindString(comment)) + if err != nil { + return CommentCodeNoVersion + } + return CommentCodeVersion(code) +} + +// WrapStringWithCodeVersion convert a string `str` to `/*T!xxxxx str */`, where `xxxxx` is determined by CommentCodeVersion. +func WrapStringWithCodeVersion(str string, ccv CommentCodeVersion) string { + return fmt.Sprintf("/*T!%05d %s */", ccv, str) +} + +func isLetter(ch rune) bool { + return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') +} + +func isDigit(ch rune) bool { + return ch >= '0' && ch <= '9' +} + +func isIdentChar(ch rune) bool { + return isLetter(ch) || isDigit(ch) || ch == '_' || ch == '$' || isIdentExtend(ch) +} + +func isIdentExtend(ch rune) bool { + return ch >= 0x80 && ch <= '\uffff' +} + +func isUserVarChar(ch rune) bool { + return isLetter(ch) || isDigit(ch) || ch == '_' || ch == '$' || ch == '.' || isIdentExtend(ch) +} + +type trieNode struct { + childs [256]*trieNode + token int + fn func(s *Scanner) (int, Pos, string) +} + +var ruleTable trieNode + +func initTokenByte(c byte, tok int) { + if ruleTable.childs[c] == nil { + ruleTable.childs[c] = &trieNode{} + } + ruleTable.childs[c].token = tok +} + +func initTokenString(str string, tok int) { + node := &ruleTable + for _, c := range str { + if node.childs[c] == nil { + node.childs[c] = &trieNode{} + } + node = node.childs[c] + } + node.token = tok +} + +func initTokenFunc(str string, fn func(s *Scanner) (int, Pos, string)) { + for i := 0; i < len(str); i++ { + c := str[i] + if ruleTable.childs[c] == nil { + ruleTable.childs[c] = &trieNode{} + } + ruleTable.childs[c].fn = fn + } + return +} + +func init() { + // invalid is a special token defined in parser.y, when parser meet + // this token, it will throw an error. + // set root trie node's token to invalid, so when input match nothing + // in the trie, invalid will be the default return token. + ruleTable.token = invalid + initTokenByte('*', int('*')) + initTokenByte('/', int('/')) + initTokenByte('+', int('+')) + initTokenByte('>', int('>')) + initTokenByte('<', int('<')) + initTokenByte('(', int('(')) + initTokenByte(')', int(')')) + initTokenByte('[', int('[')) + initTokenByte(']', int(']')) + initTokenByte(';', int(';')) + initTokenByte(',', int(',')) + initTokenByte('&', int('&')) + initTokenByte('%', int('%')) + initTokenByte(':', int(':')) + initTokenByte('|', int('|')) + initTokenByte('!', int('!')) + initTokenByte('^', int('^')) + initTokenByte('~', int('~')) + initTokenByte('\\', int('\\')) + initTokenByte('=', eq) + initTokenByte('{', int('{')) + initTokenByte('}', int('}')) + + initTokenString("||", pipes) + initTokenString("&&", andand) + initTokenString("&^", andnot) + initTokenString(":=", assignmentEq) + initTokenString("<=>", nulleq) + initTokenString(">=", ge) + initTokenString("<=", le) + initTokenString("!=", neq) + initTokenString("<>", neqSynonym) + initTokenString("<<", lsh) + initTokenString(">>", rsh) + initTokenString("\\N", null) + + initTokenFunc("@", startWithAt) + initTokenFunc("/", startWithSlash) + initTokenFunc("-", startWithDash) + initTokenFunc("#", startWithSharp) + initTokenFunc("Xx", startWithXx) + initTokenFunc("Nn", startWithNn) + initTokenFunc("Bb", startWithBb) + initTokenFunc(".", startWithDot) + initTokenFunc("_$ACDEFGHIJKLMOPQRSTUVWYZacdefghijklmopqrstuvwyz", scanIdentifier) + initTokenFunc("`", scanQuotedIdent) + initTokenFunc("0123456789", startWithNumber) + initTokenFunc("'\"", startString) +} + +var tokenMap = map[string]int{ + "ACCOUNT": account, + "ACTION": action, + "ADD": add, + "ADDDATE": addDate, + "ADVISE": advise, + "ADMIN": admin, + "AFTER": after, + "AGAINST": against, + "AGG_TO_COP": hintAggToCop, + "ALL": all, + "ALGORITHM": algorithm, + "ALTER": alter, + "ALWAYS": always, + "ANALYZE": analyze, + "AND": and, + "ANY": any, + "AS": as, + "ASC": asc, + "ASCII": ascii, + "AUTO_INCREMENT": autoIncrement, + "AUTO_RANDOM": autoRandom, + "AVG": avg, + "AVG_ROW_LENGTH": avgRowLength, + "BEGIN": begin, + "BETWEEN": between, + "BIGINT": bigIntType, + "BINARY": binaryType, + "BINLOG": binlog, + "BIT": bitType, + "BIT_AND": bitAnd, + "BIT_OR": bitOr, + "BIT_XOR": bitXor, + "BLOB": blobType, + "BLOCK": block, + "BOOL": boolType, + "BOOLEAN": booleanType, + "BOTH": both, + "BOUND": bound, + "BTREE": btree, + "BUCKETS": buckets, + "BUILTINS": builtins, + "BY": by, + "BYTE": byteType, + "CACHE": cache, + "CANCEL": cancel, + "CASCADE": cascade, + "CASCADED": cascaded, + "CASE": caseKwd, + "CAST": cast, + "CAPTURE": capture, + "CHANGE": change, + "CHAR": charType, + "CHARACTER": character, + "CHARSET": charsetKwd, + "CHECK": check, + "CHECKSUM": checksum, + "CIPHER": cipher, + "CLEANUP": cleanup, + "CLIENT": client, + "CMSKETCH": cmSketch, + "COALESCE": coalesce, + "COLLATE": collate, + "COLLATION": collation, + "COLUMN": column, + "COLUMN_FORMAT": columnFormat, + "COLUMNS": columns, + "COMMENT": comment, + "COMMIT": commit, + "COMMITTED": committed, + "COMPACT": compact, + "COMPRESSED": compressed, + "COMPRESSION": compression, + "CONNECTION": connection, + "CONSISTENT": consistent, + "CONSTRAINT": constraint, + "CONTEXT": context, + "CONVERT": convert, + "COPY": copyKwd, + "COUNT": count, + "CPU": cpu, + "CREATE": create, + "CROSS": cross, + "CURRENT": current, + "CURRENT_DATE": currentDate, + "CURRENT_TIME": currentTime, + "CURRENT_TIMESTAMP": currentTs, + "CURRENT_USER": currentUser, + "CURRENT_ROLE": currentRole, + "CURTIME": curTime, + "CYCLE": cycle, + "DATA": data, + "DATABASE": database, + "DATABASES": databases, + "DATE": dateType, + "DATE_ADD": dateAdd, + "DATE_SUB": dateSub, + "DATETIME": datetimeType, + "DAY": day, + "DAY_HOUR": dayHour, + "DAY_MICROSECOND": dayMicrosecond, + "DAY_MINUTE": dayMinute, + "DAY_SECOND": daySecond, + "DDL": ddl, + "DEALLOCATE": deallocate, + "DEC": decimalType, + "DECIMAL": decimalType, + "DEFAULT": defaultKwd, + "DEFINER": definer, + "DELAY_KEY_WRITE": delayKeyWrite, + "DELAYED": delayed, + "DELETE": deleteKwd, + "DEPTH": depth, + "DESC": desc, + "DESCRIBE": describe, + "DIRECTORY": directory, + "DISABLE": disable, + "DISCARD": discard, + "DISK": disk, + "DISTINCT": distinct, + "DISTINCTROW": distinct, + "DIV": div, + "DO": do, + "DOUBLE": doubleType, + "DRAINER": drainer, + "DROP": drop, + "DUAL": dual, + "DUPLICATE": duplicate, + "DYNAMIC": dynamic, + "ELSE": elseKwd, + "ENABLE": enable, + "ENABLE_PLAN_CACHE": hintEnablePlanCache, + "ENCLOSED": enclosed, + "ENCRYPTION": encryption, + "END": end, + "ENFORCED": enforced, + "ENGINE": engine, + "ENGINES": engines, + "ENUM": enum, + "ESCAPE": escape, + "ESCAPED": escaped, + "EVENT": event, + "EVENTS": events, + "EVOLVE": evolve, + "EXACT": exact, + "EXCLUSIVE": exclusive, + "EXCEPT": except, + "EXCHANGE": exchange, + "EXECUTE": execute, + "EXISTS": exists, + "EXPANSION": expansion, + "EXPIRE": expire, + "EXPLAIN": explain, + "EXTENDED": extended, + "EXTRACT": extract, + "FALSE": falseKwd, + "FAULTS": faultsSym, + "FIELDS": fields, + "FIRST": first, + "FIXED": fixed, + "FLOAT": floatType, + "FLUSH": flush, + "FLASHBACK": flashback, + "FOLLOWING": following, + "FOR": forKwd, + "FORCE": force, + "FOREIGN": foreign, + "FORMAT": format, + "FROM": from, + "FULL": full, + "FULLTEXT": fulltext, + "FUNCTION": function, + "GENERATED": generated, + "GET_FORMAT": getFormat, + "GLOBAL": global, + "GRANT": grant, + "GRANTS": grants, + "GROUP": group, + "GROUP_CONCAT": groupConcat, + "HASH": hash, + "HASH_AGG": hintHASHAGG, + "HASH_JOIN": hintHJ, + "HAVING": having, + "HIGH_PRIORITY": highPriority, + "HISTORY": history, + "HOSTS": hosts, + "HOUR": hour, + "HOUR_MICROSECOND": hourMicrosecond, + "HOUR_MINUTE": hourMinute, + "HOUR_SECOND": hourSecond, + "IDENTIFIED": identified, + "IF": ifKwd, + "IGNORE": ignore, + "IGNORE_INDEX": hintIgnoreIndex, + "IMPORT": importKwd, + "IN": in, + "INCREMENT": increment, + "INCREMENTAL": incremental, + "INDEX": index, + "INDEXES": indexes, + "INFILE": infile, + "INL_JOIN": hintINLJ, + "INL_HASH_JOIN": hintINLHJ, + "INL_MERGE_JOIN": hintINLMJ, + "INNER": inner, + "INPLACE": inplace, + "INSTANT": instant, + "INSERT": insert, + "INSERT_METHOD": insertMethod, + "INT": intType, + "INT1": int1Type, + "INT2": int2Type, + "INT3": int3Type, + "INT4": int4Type, + "INT8": int8Type, + "IO": io, + "IPC": ipc, + "INTEGER": integerType, + "INTERVAL": interval, + "INTERNAL": internal, + "INTO": into, + "INVISIBLE": invisible, + "INVOKER": invoker, + "IS": is, + "ISSUER": issuer, + "ISOLATION": isolation, + "JOBS": jobs, + "JOB": job, + "JOIN": join, + "JSON": jsonType, + "KEY": key, + "KEY_BLOCK_SIZE": keyBlockSize, + "KEYS": keys, + "KILL": kill, + "LABELS": labels, + "LANGUAGE": language, + "LAST": last, + "LEADING": leading, + "LEFT": left, + "LESS": less, + "LEVEL": level, + "LIKE": like, + "LIMIT": limit, + "LINES": lines, + "LINEAR": linear, + "LIST": list, + "LOAD": load, + "LOCAL": local, + "LOCALTIME": localTime, + "LOCALTIMESTAMP": localTs, + "LOCATION": location, + "LOCK": lock, + "LOGS": logs, + "LONG": long, + "LONGBLOB": longblobType, + "LONGTEXT": longtextType, + "LOW_PRIORITY": lowPriority, + "MASTER": master, + "MATCH": match, + "MAX": max, + "MAX_CONNECTIONS_PER_HOUR": maxConnectionsPerHour, + "MAX_EXECUTION_TIME": maxExecutionTime, + "MAX_IDXNUM": max_idxnum, + "MAX_MINUTES": max_minutes, + "MAX_QUERIES_PER_HOUR": maxQueriesPerHour, + "MAX_ROWS": maxRows, + "MAX_UPDATES_PER_HOUR": maxUpdatesPerHour, + "MAX_USER_CONNECTIONS": maxUserConnections, + "MAXVALUE": maxValue, + "MEDIUMBLOB": mediumblobType, + "MEDIUMINT": mediumIntType, + "MEDIUMTEXT": mediumtextType, + "MEMORY": memory, + "MEMORY_QUOTA": hintMemoryQuota, + "MERGE": merge, + "MICROSECOND": microsecond, + "MIN": min, + "MIN_ROWS": minRows, + "MINUTE": minute, + "MINUTE_MICROSECOND": minuteMicrosecond, + "MINUTE_SECOND": minuteSecond, + "MINVALUE": minValue, + "MOD": mod, + "MODE": mode, + "MODIFY": modify, + "MONTH": month, + "NAMES": names, + "NATIONAL": national, + "NATURAL": natural, + "NEVER": never, + "NEXT_ROW_ID": next_row_id, + "NO": no, + "NO_INDEX_MERGE": hintNoIndexMerge, + "NO_SWAP_JOIN_INPUTS": hintNSJI, + "NO_WRITE_TO_BINLOG": noWriteToBinLog, + "NOCACHE": nocache, + "NOCYCLE": nocycle, + "NODE_ID": nodeID, + "NODE_STATE": nodeState, + "NODEGROUP": nodegroup, + "NOMAXVALUE": nomaxvalue, + "NOMINVALUE": nominvalue, + "NONE": none, + "NOORDER": noorder, + "NOT": not, + "NOW": now, + "NULL": null, + "NULLS": nulls, + "NUMERIC": numericType, + "NCHAR": ncharType, + "NVARCHAR": nvarcharType, + "OFFSET": offset, + "OLAP": hintOLAP, + "OLTP": hintOLTP, + "ON": on, + "ONLY": only, + "OPTIMISTIC": optimistic, + "OPTIMIZE": optimize, + "OPTION": option, + "OPTIONALLY": optionally, + "OR": or, + "ORDER": order, + "OUTER": outer, + "PACK_KEYS": packKeys, + "PAGE": pageSym, + "PARSER": parser, + "PARTIAL": partial, + "PARTITION": partition, + "PARTITIONING": partitioning, + "PARTITIONS": partitions, + "PASSWORD": password, + "PESSIMISTIC": pessimistic, + "PER_TABLE": per_table, + "PER_DB": per_db, + "PLUGINS": plugins, + "POSITION": position, + "PRECEDING": preceding, + "PRECISION": precisionType, + "PREPARE": prepare, + "PRIMARY": primary, + "PRIVILEGES": privileges, + "PROCEDURE": procedure, + "PROCESS": process, + "PROCESSLIST": processlist, + "PROFILE": profile, + "PROFILES": profiles, + "PUMP": pump, + "QB_NAME": hintQBName, + "QUARTER": quarter, + "QUERY": query, + "QUERY_TYPE": hintQueryType, + "QUERIES": queries, + "QUICK": quick, + "SHARD_ROW_ID_BITS": shardRowIDBits, + "PRE_SPLIT_REGIONS": preSplitRegions, + "RANGE": rangeKwd, + "RECOVER": recover, + "REBUILD": rebuild, + "READ": read, + "READ_CONSISTENT_REPLICA": hintReadConsistentReplica, + "READ_FROM_STORAGE": hintReadFromStorage, + "REAL": realType, + "RECENT": recent, + "REDUNDANT": redundant, + "REFERENCES": references, + "REGEXP": regexpKwd, + "REGIONS": regions, + "REGION": region, + "RELOAD": reload, + "REMOVE": remove, + "RENAME": rename, + "REORGANIZE": reorganize, + "REPAIR": repair, + "REPEAT": repeat, + "REPEATABLE": repeatable, + "REPLACE": replace, + "RESPECT": respect, + "REPLICA": replica, + "REPLICATION": replication, + "REQUIRE": require, + "RESTRICT": restrict, + "REVERSE": reverse, + "REVOKE": revoke, + "RIGHT": right, + "RLIKE": rlike, + "ROLE": role, + "ROLLBACK": rollback, + "ROUTINE": routine, + "ROW": row, + "ROW_COUNT": rowCount, + "ROW_FORMAT": rowFormat, + "RTREE": rtree, + "SAMPLES": samples, + "SWAP_JOIN_INPUTS": hintSJI, + "SCHEMA": database, + "SCHEMAS": databases, + "SECOND": second, + "SECONDARY_ENGINE": secondaryEngine, + "SECONDARY_LOAD": secondaryLoad, + "SECONDARY_UNLOAD": secondaryUnload, + "SECOND_MICROSECOND": secondMicrosecond, + "SECURITY": security, + "SELECT": selectKwd, + "SEQUENCE": sequence, + "SERIAL": serial, + "SERIALIZABLE": serializable, + "SESSION": session, + "SET": set, + "SEPARATOR": separator, + "SHARE": share, + "SHARED": shared, + "SHOW": show, + "SHUTDOWN": shutdown, + "SIGNED": signed, + "SIMPLE": simple, + "SLAVE": slave, + "SLOW": slow, + "SM_JOIN": hintSMJ, + "SMALLINT": smallIntType, + "SNAPSHOT": snapshot, + "SOME": some, + "SPATIAL": spatial, + "SPLIT": split, + "SQL": sql, + "SQL_BIG_RESULT": sqlBigResult, + "SQL_BUFFER_RESULT": sqlBufferResult, + "SQL_CACHE": sqlCache, + "SQL_CALC_FOUND_ROWS": sqlCalcFoundRows, + "SQL_NO_CACHE": sqlNoCache, + "SQL_SMALL_RESULT": sqlSmallResult, + "SQL_TSI_DAY": sqlTsiDay, + "SQL_TSI_HOUR": sqlTsiHour, + "SQL_TSI_MINUTE": sqlTsiMinute, + "SQL_TSI_MONTH": sqlTsiMonth, + "SQL_TSI_QUARTER": sqlTsiQuarter, + "SQL_TSI_SECOND": sqlTsiSecond, + "SQL_TSI_WEEK": sqlTsiWeek, + "SQL_TSI_YEAR": sqlTsiYear, + "SOURCE": source, + "SSL": ssl, + "STALENESS": staleness, + "START": start, + "STARTING": starting, + "STATS": stats, + "STATS_BUCKETS": statsBuckets, + "STATS_HISTOGRAMS": statsHistograms, + "STATS_HEALTHY": statsHealthy, + "STATS_META": statsMeta, + "STATS_AUTO_RECALC": statsAutoRecalc, + "STATS_PERSISTENT": statsPersistent, + "STATS_SAMPLE_PAGES": statsSamplePages, + "STATUS": status, + "STORAGE": storage, + "SWAPS": swaps, + "SWITCHES": switchesSym, + "SYSTEM_TIME": systemTime, + "OPEN": open, + "STD": stddevPop, + "STDDEV": stddevPop, + "STDDEV_POP": stddevPop, + "STDDEV_SAMP": stddevSamp, + "STORED": stored, + "STRAIGHT_JOIN": straightJoin, + "STREAM_AGG": hintSTREAMAGG, + "STRONG": strong, + "SUBDATE": subDate, + "SUBJECT": subject, + "SUBPARTITION": subpartition, + "SUBPARTITIONS": subpartitions, + "SUBSTR": substring, + "SUBSTRING": substring, + "SUM": sum, + "SUPER": super, + "TABLE": tableKwd, + "TABLE_CHECKSUM": tableChecksum, + "TABLES": tables, + "TABLESPACE": tablespace, + "TEMPORARY": temporary, + "TEMPTABLE": temptable, + "TERMINATED": terminated, + "TEXT": textType, + "THAN": than, + "THEN": then, + "TIDB": tidb, + "TIDB_HJ": hintHJ, + "TIDB_INLJ": hintINLJ, + "TIDB_SMJ": hintSMJ, + "TIKV": hintTiKV, + "TIFLASH": hintTiFlash, + "TIME": timeType, + "TIMESTAMP": timestampType, + "TIMESTAMPADD": timestampAdd, + "TIMESTAMPDIFF": timestampDiff, + "TINYBLOB": tinyblobType, + "TINYINT": tinyIntType, + "TINYTEXT": tinytextType, + "TO": to, + "TOKUDB_DEFAULT": tokudbDefault, + "TOKUDB_FAST": tokudbFast, + "TOKUDB_LZMA": tokudbLzma, + "TOKUDB_QUICKLZ": tokudbQuickLZ, + "TOKUDB_SNAPPY": tokudbSnappy, + "TOKUDB_SMALL": tokudbSmall, + "TOKUDB_UNCOMPRESSED": tokudbUncompressed, + "TOKUDB_ZLIB": tokudbZlib, + "TOP": top, + "TOPN": topn, + "TRACE": trace, + "TRADITIONAL": traditional, + "TRAILING": trailing, + "TRANSACTION": transaction, + "TRIGGER": trigger, + "TRIGGERS": triggers, + "TRIM": trim, + "TRUE": trueKwd, + "TRUNCATE": truncate, + "TYPE": tp, + "UNBOUNDED": unbounded, + "UNCOMMITTED": uncommitted, + "UNICODE": unicodeSym, + "UNDEFINED": undefined, + "UNION": union, + "UNIQUE": unique, + "UNKNOWN": unknown, + "UNLOCK": unlock, + "UNSIGNED": unsigned, + "UNTIL": until, + "UPDATE": update, + "USAGE": usage, + "USE": use, + "USE_INDEX": hintUseIndex, + "USE_INDEX_MERGE": hintUseIndexMerge, + "USE_PLAN_CACHE": hintUsePlanCache, + "USE_TOJA": hintUseToja, + "USER": user, + "USING": using, + "UTC_DATE": utcDate, + "UTC_TIME": utcTime, + "UTC_TIMESTAMP": utcTimestamp, + "VALIDATION": validation, + "VALUE": value, + "VALUES": values, + "VARBINARY": varbinaryType, + "VARCHAR": varcharType, + "VARCHARACTER": varcharacter, + "VARIABLES": variables, + "VARIANCE": varPop, + "VARYING": varying, + "VAR_POP": varPop, + "VAR_SAMP": varSamp, + "VIEW": view, + "VIRTUAL": virtual, + "VISIBLE": visible, + "WARNINGS": warnings, + "ERRORS": identSQLErrors, + "WEEK": week, + "WHEN": when, + "WHERE": where, + "WIDTH": width, + "WITH": with, + "WITHOUT": without, + "WRITE": write, + "XOR": xor, + "X509": x509, + "YEAR": yearType, + "YEAR_MONTH": yearMonth, + "ZEROFILL": zerofill, + "BINDING": binding, + "BINDINGS": bindings, + "EXPR_PUSHDOWN_BLACKLIST": exprPushdownBlacklist, + "OPT_RULE_BLACKLIST": optRuleBlacklist, + "NOWAIT": nowait, +} + +// See https://dev.mysql.com/doc/refman/5.7/en/function-resolution.html for details +var btFuncTokenMap = map[string]int{ + "ADDDATE": builtinAddDate, + "BIT_AND": builtinBitAnd, + "BIT_OR": builtinBitOr, + "BIT_XOR": builtinBitXor, + "CAST": builtinCast, + "COUNT": builtinCount, + "CURDATE": builtinCurDate, + "CURTIME": builtinCurTime, + "DATE_ADD": builtinDateAdd, + "DATE_SUB": builtinDateSub, + "EXTRACT": builtinExtract, + "GROUP_CONCAT": builtinGroupConcat, + "MAX": builtinMax, + "MID": builtinSubstring, + "MIN": builtinMin, + "NOW": builtinNow, + "POSITION": builtinPosition, + "SESSION_USER": builtinUser, + "STD": builtinStddevPop, + "STDDEV": builtinStddevPop, + "STDDEV_POP": builtinStddevPop, + "STDDEV_SAMP": builtinStddevSamp, + "SUBDATE": builtinSubDate, + "SUBSTR": builtinSubstring, + "SUBSTRING": builtinSubstring, + "SUM": builtinSum, + "SYSDATE": builtinSysDate, + "SYSTEM_USER": builtinUser, + "TRIM": builtinTrim, + "VARIANCE": builtinVarPop, + "VAR_POP": builtinVarPop, + "VAR_SAMP": builtinVarSamp, +} + +// aliases are strings directly map to another string and use the same token. +var aliases = map[string]string{ + "SCHEMA": "DATABASE", + "SCHEMAS": "DATABASES", + "DEC": "DECIMAL", + "SUBSTR": "SUBSTRING", + "TIDB_HJ": "HASH_JOIN", + "TIDB_INLJ": "INL_JOIN", + "TIDB_SMJ": "SM_JOIN", +} + +func (s *Scanner) isTokenIdentifier(lit string, offset int) int { + // An identifier before or after '.' means it is part of a qualified identifier. + // We do not parse it as keyword. + if s.r.peek() == '.' { + return 0 + } + if offset > 0 && s.r.s[offset-1] == '.' { + return 0 + } + buf := &s.buf + buf.Reset() + buf.Grow(len(lit)) + data := buf.Bytes()[:len(lit)] + for i := 0; i < len(lit); i++ { + if lit[i] >= 'a' && lit[i] <= 'z' { + data[i] = lit[i] + 'A' - 'a' + } else { + data[i] = lit[i] + } + } + + checkBtFuncToken := false + if s.r.peek() == '(' { + checkBtFuncToken = true + } else if s.sqlMode.HasIgnoreSpaceMode() { + s.skipWhitespace() + if s.r.peek() == '(' { + checkBtFuncToken = true + } + } + if checkBtFuncToken { + if tok := btFuncTokenMap[string(data)]; tok != 0 { + return tok + } + } + tok, _ := tokenMap[string(data)] + return tok +} + +func handleIdent(lval *yySymType) int { + s := lval.ident + // A character string literal may have an optional character set introducer and COLLATE clause: + // [_charset_name]'string' [COLLATE collation_name] + // See https://dev.mysql.com/doc/refman/5.7/en/charset-literal.html + if !strings.HasPrefix(s, "_") { + return identifier + } + cs, _, err := charset.GetCharsetInfo(s[1:]) + if err != nil { + return identifier + } + lval.ident = cs + return underscoreCS +} diff --git a/parser/model/ddl.go b/parser/model/ddl.go new file mode 100644 index 0000000..0930149 --- /dev/null +++ b/parser/model/ddl.go @@ -0,0 +1,414 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "math" + "sync" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/parser/terror" +) + +// ActionType is the type for DDL action. +type ActionType byte + +// List DDL actions. +const ( + ActionNone ActionType = 0 + ActionCreateSchema ActionType = 1 + ActionDropSchema ActionType = 2 + ActionCreateTable ActionType = 3 + ActionDropTable ActionType = 4 + ActionAddColumn ActionType = 5 + ActionDropColumn ActionType = 6 + ActionAddIndex ActionType = 7 + ActionDropIndex ActionType = 8 + ActionAddForeignKey ActionType = 9 + ActionDropForeignKey ActionType = 10 + ActionTruncateTable ActionType = 11 + ActionModifyColumn ActionType = 12 + ActionRebaseAutoID ActionType = 13 + ActionRenameTable ActionType = 14 + ActionSetDefaultValue ActionType = 15 + ActionShardRowID ActionType = 16 + ActionModifyTableComment ActionType = 17 + ActionRenameIndex ActionType = 18 + ActionAddTablePartition ActionType = 19 + ActionDropTablePartition ActionType = 20 + ActionCreateView ActionType = 21 + ActionModifyTableCharsetAndCollate ActionType = 22 + ActionTruncateTablePartition ActionType = 23 + ActionDropView ActionType = 24 + ActionRecoverTable ActionType = 25 + ActionModifySchemaCharsetAndCollate ActionType = 26 + ActionLockTable ActionType = 27 + ActionUnlockTable ActionType = 28 + ActionRepairTable ActionType = 29 + ActionSetTiFlashReplica ActionType = 30 + ActionUpdateTiFlashReplicaStatus ActionType = 31 + ActionAddPrimaryKey ActionType = 32 + ActionDropPrimaryKey ActionType = 33 +) + +const ( + // AddIndexStr is a string related to the operation of "add index". + AddIndexStr = "add index" + AddPrimaryKeyStr = "add primary key" +) + +var actionMap = map[ActionType]string{ + ActionCreateSchema: "create schema", + ActionDropSchema: "drop schema", + ActionCreateTable: "create table", + ActionDropTable: "drop table", + ActionAddColumn: "add column", + ActionDropColumn: "drop column", + ActionAddIndex: AddIndexStr, + ActionDropIndex: "drop index", + ActionAddForeignKey: "add foreign key", + ActionDropForeignKey: "drop foreign key", + ActionTruncateTable: "truncate table", + ActionModifyColumn: "modify column", + ActionRebaseAutoID: "rebase auto_increment ID", + ActionRenameTable: "rename table", + ActionSetDefaultValue: "set default value", + ActionShardRowID: "shard row ID", + ActionModifyTableComment: "modify table comment", + ActionRenameIndex: "rename index", + ActionAddTablePartition: "add partition", + ActionDropTablePartition: "drop partition", + ActionCreateView: "create view", + ActionModifyTableCharsetAndCollate: "modify table charset and collate", + ActionTruncateTablePartition: "truncate partition", + ActionDropView: "drop view", + ActionRecoverTable: "recover table", + ActionModifySchemaCharsetAndCollate: "modify schema charset and collate", + ActionLockTable: "lock table", + ActionUnlockTable: "unlock table", + ActionRepairTable: "repair table", + ActionSetTiFlashReplica: "set tiflash replica", + ActionUpdateTiFlashReplicaStatus: "update tiflash replica status", + ActionAddPrimaryKey: AddPrimaryKeyStr, + ActionDropPrimaryKey: "drop primary key", +} + +// String return current ddl action in string +func (action ActionType) String() string { + if v, ok := actionMap[action]; ok { + return v + } + return "none" +} + +// HistoryInfo is used for binlog. +type HistoryInfo struct { + SchemaVersion int64 + DBInfo *DBInfo + TableInfo *TableInfo + FinishedTS uint64 +} + +// AddDBInfo adds schema version and schema information that are used for binlog. +// dbInfo is added in the following operations: create database, drop database. +func (h *HistoryInfo) AddDBInfo(schemaVer int64, dbInfo *DBInfo) { + h.SchemaVersion = schemaVer + h.DBInfo = dbInfo +} + +// AddTableInfo adds schema version and table information that are used for binlog. +// tblInfo is added except for the following operations: create database, drop database. +func (h *HistoryInfo) AddTableInfo(schemaVer int64, tblInfo *TableInfo) { + h.SchemaVersion = schemaVer + h.TableInfo = tblInfo +} + +// Clean cleans history information. +func (h *HistoryInfo) Clean() { + h.SchemaVersion = 0 + h.DBInfo = nil + h.TableInfo = nil +} + +// DDLReorgMeta is meta info of DDL reorganization. +type DDLReorgMeta struct { + // EndHandle is the last handle of the adding indices table. + // We should only backfill indices in the range [startHandle, EndHandle]. + EndHandle int64 `json:"end_handle"` +} + +// NewDDLReorgMeta new a DDLReorgMeta. +func NewDDLReorgMeta() *DDLReorgMeta { + return &DDLReorgMeta{ + EndHandle: math.MaxInt64, + } +} + +// Job is for a DDL operation. +type Job struct { + ID int64 `json:"id"` + Type ActionType `json:"type"` + SchemaID int64 `json:"schema_id"` + TableID int64 `json:"table_id"` + SchemaName string `json:"schema_name"` + State JobState `json:"state"` + Error *terror.Error `json:"err"` + // ErrorCount will be increased, every time we meet an error when running job. + ErrorCount int64 `json:"err_count"` + // RowCount means the number of rows that are processed. + RowCount int64 `json:"row_count"` + Mu sync.Mutex `json:"-"` + Args []interface{} `json:"-"` + // RawArgs : We must use json raw message to delay parsing special args. + RawArgs json.RawMessage `json:"raw_args"` + SchemaState SchemaState `json:"schema_state"` + // SnapshotVer means snapshot version for this job. + SnapshotVer uint64 `json:"snapshot_ver"` + // StartTS uses timestamp allocated by TSO. + // Now it's the TS when we put the job to TiKV queue. + StartTS uint64 `json:"start_ts"` + // DependencyID is the job's ID that the current job depends on. + DependencyID int64 `json:"dependency_id"` + // Query string of the ddl job. + Query string `json:"query"` + BinlogInfo *HistoryInfo `json:"binlog"` + + // Version indicates the DDL job version. For old jobs, it will be 0. + Version int64 `json:"version"` + + // ReorgMeta is meta info of ddl reorganization. + // This field is depreciated. + ReorgMeta *DDLReorgMeta `json:"reorg_meta"` + + // Priority is only used to set the operation priority of adding indices. + Priority int `json:"priority"` +} + +// FinishTableJob is called when a job is finished. +// It updates the job's state information and adds tblInfo to the binlog. +func (job *Job) FinishTableJob(jobState JobState, schemaState SchemaState, ver int64, tblInfo *TableInfo) { + job.State = jobState + job.SchemaState = schemaState + job.BinlogInfo.AddTableInfo(ver, tblInfo) +} + +// FinishDBJob is called when a job is finished. +// It updates the job's state information and adds dbInfo the binlog. +func (job *Job) FinishDBJob(jobState JobState, schemaState SchemaState, ver int64, dbInfo *DBInfo) { + job.State = jobState + job.SchemaState = schemaState + job.BinlogInfo.AddDBInfo(ver, dbInfo) +} + +// TSConvert2Time converts timestamp to time. +func TSConvert2Time(ts uint64) time.Time { + t := int64(ts >> 18) // 18 is for the logical time. + return time.Unix(t/1e3, (t%1e3)*1e6) +} + +// SetRowCount sets the number of rows. Make sure it can pass `make race`. +func (job *Job) SetRowCount(count int64) { + job.Mu.Lock() + defer job.Mu.Unlock() + + job.RowCount = count +} + +// GetRowCount gets the number of rows. Make sure it can pass `make race`. +func (job *Job) GetRowCount() int64 { + job.Mu.Lock() + defer job.Mu.Unlock() + + return job.RowCount +} + +// Encode encodes job with json format. +// updateRawArgs is used to determine whether to update the raw args. +func (job *Job) Encode(updateRawArgs bool) ([]byte, error) { + var err error + if updateRawArgs { + job.RawArgs, err = json.Marshal(job.Args) + if err != nil { + return nil, errors.Trace(err) + } + } + + var b []byte + job.Mu.Lock() + defer job.Mu.Unlock() + b, err = json.Marshal(job) + + return b, errors.Trace(err) +} + +// Decode decodes job from the json buffer, we must use DecodeArgs later to +// decode special args for this job. +func (job *Job) Decode(b []byte) error { + err := json.Unmarshal(b, job) + return errors.Trace(err) +} + +// DecodeArgs decodes job args. +func (job *Job) DecodeArgs(args ...interface{}) error { + job.Args = args + err := json.Unmarshal(job.RawArgs, &job.Args) + return errors.Trace(err) +} + +// String implements fmt.Stringer interface. +func (job *Job) String() string { + rowCount := job.GetRowCount() + return fmt.Sprintf("ID:%d, Type:%s, State:%s, SchemaState:%s, SchemaID:%d, TableID:%d, RowCount:%d, ArgLen:%d, start time: %v, Err:%v, ErrCount:%d, SnapshotVersion:%v", + job.ID, job.Type, job.State, job.SchemaState, job.SchemaID, job.TableID, rowCount, len(job.Args), TSConvert2Time(job.StartTS), job.Error, job.ErrorCount, job.SnapshotVer) +} + +func (job *Job) hasDependentSchema(other *Job) (bool, error) { + if other.Type == ActionDropSchema || other.Type == ActionCreateSchema { + if other.SchemaID == job.SchemaID { + return true, nil + } + if job.Type == ActionRenameTable { + var oldSchemaID int64 + if err := job.DecodeArgs(&oldSchemaID); err != nil { + return false, errors.Trace(err) + } + if other.SchemaID == oldSchemaID { + return true, nil + } + } + } + return false, nil +} + +// IsDependentOn returns whether the job depends on "other". +// How to check the job depends on "other"? +// 1. The two jobs handle the same database when one of the two jobs is an ActionDropSchema or ActionCreateSchema type. +// 2. Or the two jobs handle the same table. +func (job *Job) IsDependentOn(other *Job) (bool, error) { + isDependent, err := job.hasDependentSchema(other) + if err != nil || isDependent { + return isDependent, errors.Trace(err) + } + isDependent, err = other.hasDependentSchema(job) + if err != nil || isDependent { + return isDependent, errors.Trace(err) + } + + // TODO: If a job is ActionRenameTable, we need to check table name. + if other.TableID == job.TableID { + return true, nil + } + return false, nil +} + +// IsFinished returns whether job is finished or not. +// If the job state is Done or Cancelled, it is finished. +func (job *Job) IsFinished() bool { + return job.State == JobStateDone || job.State == JobStateRollbackDone || job.State == JobStateCancelled +} + +// IsCancelled returns whether the job is cancelled or not. +func (job *Job) IsCancelled() bool { + return job.State == JobStateCancelled +} + +// IsRollbackDone returns whether the job is rolled back or not. +func (job *Job) IsRollbackDone() bool { + return job.State == JobStateRollbackDone +} + +// IsRollingback returns whether the job is rolling back or not. +func (job *Job) IsRollingback() bool { + return job.State == JobStateRollingback +} + +// IsCancelling returns whether the job is cancelling or not. +func (job *Job) IsCancelling() bool { + return job.State == JobStateCancelling +} + +// IsSynced returns whether the DDL modification is synced among all TiDB servers. +func (job *Job) IsSynced() bool { + return job.State == JobStateSynced +} + +// IsDone returns whether job is done. +func (job *Job) IsDone() bool { + return job.State == JobStateDone +} + +// IsRunning returns whether job is still running or not. +func (job *Job) IsRunning() bool { + return job.State == JobStateRunning +} + +// JobState is for job state. +type JobState byte + +// List job states. +const ( + JobStateNone JobState = 0 + JobStateRunning JobState = 1 + // When DDL encountered an unrecoverable error at reorganization state, + // some keys has been added already, we need to remove them. + // JobStateRollingback is the state to do the rolling back job. + JobStateRollingback JobState = 2 + JobStateRollbackDone JobState = 3 + JobStateDone JobState = 4 + JobStateCancelled JobState = 5 + // JobStateSynced is used to mark the information about the completion of this job + // has been synchronized to all servers. + JobStateSynced JobState = 6 + // JobStateCancelling is used to mark the DDL job is cancelled by the client, but the DDL work hasn't handle it. + JobStateCancelling JobState = 7 +) + +// String implements fmt.Stringer interface. +func (s JobState) String() string { + switch s { + case JobStateRunning: + return "running" + case JobStateRollingback: + return "rollingback" + case JobStateRollbackDone: + return "rollback done" + case JobStateDone: + return "done" + case JobStateCancelled: + return "cancelled" + case JobStateCancelling: + return "cancelling" + case JobStateSynced: + return "synced" + default: + return "none" + } +} + +// SchemaDiff contains the schema modification at a particular schema version. +// It is used to reduce schema reload cost. +type SchemaDiff struct { + Version int64 `json:"version"` + Type ActionType `json:"type"` + SchemaID int64 `json:"schema_id"` + TableID int64 `json:"table_id"` + + // OldTableID is the table ID before truncate, only used by truncate table DDL. + OldTableID int64 `json:"old_table_id"` + // OldSchemaID is the schema ID before rename table, only used by rename table DDL. + OldSchemaID int64 `json:"old_schema_id"` +} diff --git a/parser/model/flags.go b/parser/model/flags.go new file mode 100644 index 0000000..428ac16 --- /dev/null +++ b/parser/model/flags.go @@ -0,0 +1,47 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +// Flags are used by tipb.SelectRequest.Flags to handle execution mode, like how to handle truncate error. +const ( + // FlagIgnoreTruncate indicates if truncate error should be ignored. + // Read-only statements should ignore truncate error, write statements should not ignore truncate error. + FlagIgnoreTruncate uint64 = 1 + // FlagTruncateAsWarning indicates if truncate error should be returned as warning. + // This flag only matters if FlagIgnoreTruncate is not set, in strict sql mode, truncate error should + // be returned as error, in non-strict sql mode, truncate error should be saved as warning. + FlagTruncateAsWarning = 1 << 1 + // FlagPadCharToFullLength indicates if sql_mode 'PAD_CHAR_TO_FULL_LENGTH' is set. + FlagPadCharToFullLength = 1 << 2 + // FlagInInsertStmt indicates if this is a INSERT statement. + FlagInInsertStmt = 1 << 3 + // FlagInUpdateOrDeleteStmt indicates if this is a UPDATE statement or a DELETE statement. + FlagInUpdateOrDeleteStmt = 1 << 4 + // FlagInSelectStmt indicates if this is a SELECT statement. + FlagInSelectStmt = 1 << 5 + // FlagOverflowAsWarning indicates if overflow error should be returned as warning. + // In strict sql mode, overflow error should be returned as error, + // in non-strict sql mode, overflow error should be saved as warning. + FlagOverflowAsWarning = 1 << 6 + // FlagIgnoreZeroInDate indicates if ZeroInDate error should be ignored. + // Read-only statements should ignore ZeroInDate error. + // Write statements should not ignore ZeroInDate error in strict sql mode. + FlagIgnoreZeroInDate = 1 << 7 + // FlagDividedByZeroAsWarning indicates if DividedByZero should be returned as warning. + FlagDividedByZeroAsWarning = 1 << 8 + // FlagInUnionStmt indicates if this is a UNION statement. + FlagInUnionStmt = 1 << 9 + // FlagInLoadDataStmt indicates if this is a LOAD DATA statement. + FlagInLoadDataStmt = 1 << 10 +) diff --git a/parser/model/model.go b/parser/model/model.go new file mode 100644 index 0000000..acf0115 --- /dev/null +++ b/parser/model/model.go @@ -0,0 +1,701 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "strconv" + "strings" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/types" + "github.com/pingcap/tipb/go-tipb" +) + +// SchemaState is the state for schema elements. +type SchemaState byte + +const ( + // StateNone means this schema element is absent and can't be used. + StateNone SchemaState = iota + // StateDeleteOnly means we can only delete items for this schema element. + StateDeleteOnly + // StateWriteOnly means we can use any write operation on this schema element, + // but outer can't read the changed data. + StateWriteOnly + // StateWriteReorganization means we are re-organizing whole data after write only state. + StateWriteReorganization + // StateDeleteReorganization means we are re-organizing whole data after delete only state. + StateDeleteReorganization + // StatePublic means this schema element is ok for all write and read operations. + StatePublic +) + +// String implements fmt.Stringer interface. +func (s SchemaState) String() string { + switch s { + case StateDeleteOnly: + return "delete only" + case StateWriteOnly: + return "write only" + case StateWriteReorganization: + return "write reorganization" + case StateDeleteReorganization: + return "delete reorganization" + case StatePublic: + return "public" + default: + return "none" + } +} + +const ( + // ColumnInfoVersion0 means the column info version is 0. + ColumnInfoVersion0 = uint64(0) + // ColumnInfoVersion1 means the column info version is 1. + ColumnInfoVersion1 = uint64(1) + // ColumnInfoVersion2 means the column info version is 2. + // This is for v2.1.7 to Compatible with older versions charset problem. + // Old version such as v2.0.8 treat utf8 as utf8mb4, because there is no UTF8 check in v2.0.8. + // After version V2.1.2 (PR#8738) , TiDB add UTF8 check, then the user upgrade from v2.0.8 insert some UTF8MB4 characters will got error. + // This is not compatibility for user. Then we try to fix this in PR #9820, and increase the version number. + ColumnInfoVersion2 = uint64(2) + + // CurrLatestColumnInfoVersion means the latest column info in the current TiDB. + CurrLatestColumnInfoVersion = ColumnInfoVersion2 +) + +// ColumnInfo provides meta data describing of a table column. +type ColumnInfo struct { + ID int64 `json:"id"` + Name CIStr `json:"name"` + Offset int `json:"offset"` + OriginDefaultValue interface{} `json:"origin_default"` + DefaultValue interface{} `json:"default"` + types.FieldType `json:"type"` + State SchemaState `json:"state"` + Comment string `json:"comment"` + // A hidden column is used internally(expression index) and are not accessible by users. + Hidden bool `json:"hidden"` + // Version means the version of the column info. + // Version = 0: For OriginDefaultValue and DefaultValue of timestamp column will stores the default time in system time zone. + // That is a bug if multiple TiDB servers in different system time zone. + // Version = 1: For OriginDefaultValue and DefaultValue of timestamp column will stores the default time in UTC time zone. + // This will fix bug in version 0. For compatibility with version 0, we add version field in column info struct. + Version uint64 `json:"version"` +} + +// Clone clones ColumnInfo. +func (c *ColumnInfo) Clone() *ColumnInfo { + nc := *c + return &nc +} + +// SetDefaultValue sets the default value. +func (c *ColumnInfo) SetDefaultValue(value interface{}) error { + c.DefaultValue = value + return nil +} + +// GetDefaultValue gets the default value of the column. +func (c *ColumnInfo) GetDefaultValue() interface{} { + return c.DefaultValue +} + +// GetTypeDesc gets the description for column type. +func (c *ColumnInfo) GetTypeDesc() string { + desc := c.FieldType.CompactStr() + if mysql.HasUnsignedFlag(c.Flag) && c.Tp != mysql.TypeBit && c.Tp != mysql.TypeYear { + desc += " unsigned" + } + if mysql.HasZerofillFlag(c.Flag) && c.Tp != mysql.TypeYear { + desc += " zerofill" + } + return desc +} + +// FindColumnInfo finds ColumnInfo in cols by name. +func FindColumnInfo(cols []*ColumnInfo, name string) *ColumnInfo { + name = strings.ToLower(name) + for _, col := range cols { + if col.Name.L == name { + return col + } + } + + return nil +} + +// ExtraHandleID is the column ID of column which we need to append to schema to occupy the handle's position +// for use of execution phase. +const ExtraHandleID = -1 + +const ( + // TableInfoVersion0 means the table info version is 0. + // Upgrade from v2.1.1 or v2.1.2 to v2.1.3 and later, and then execute a "change/modify column" statement + // that does not specify a charset value for column. Then the following error may be reported: + // ERROR 1105 (HY000): unsupported modify charset from utf8mb4 to utf8. + // To eliminate this error, we will not modify the charset of this column + // when executing a change/modify column statement that does not specify a charset value for column. + // This behavior is not compatible with MySQL. + TableInfoVersion0 = uint16(0) + // TableInfoVersion1 means the table info version is 1. + // When we execute a change/modify column statement that does not specify a charset value for column, + // we set the charset of this column to the charset of table. This behavior is compatible with MySQL. + TableInfoVersion1 = uint16(1) + // TableInfoVersion2 means the table info version is 2. + // This is for v2.1.7 to Compatible with older versions charset problem. + // Old version such as v2.0.8 treat utf8 as utf8mb4, because there is no UTF8 check in v2.0.8. + // After version V2.1.2 (PR#8738) , TiDB add UTF8 check, then the user upgrade from v2.0.8 insert some UTF8MB4 characters will got error. + // This is not compatibility for user. Then we try to fix this in PR #9820, and increase the version number. + TableInfoVersion2 = uint16(2) + // TableInfoVersion3 means the table info version is 3. + // This version aims to deal with upper-cased charset name in TableInfo stored by versions prior to TiDB v2.1.9: + // TiDB always suppose all charsets / collations as lower-cased and try to convert them if they're not. + // However, the convert is missed in some scenarios before v2.1.9, so for all those tables prior to TableInfoVersion3, their + // charsets / collations will be converted to lower-case while loading from the storage. + TableInfoVersion3 = uint16(3) + + // CurrLatestTableInfoVersion means the latest table info in the current TiDB. + CurrLatestTableInfoVersion = TableInfoVersion3 +) + +// ExtraHandleName is the name of ExtraHandle Column. +var ExtraHandleName = NewCIStr("_tidb_rowid") + +// TableInfo provides meta data describing a DB table. +type TableInfo struct { + ID int64 `json:"id"` + Name CIStr `json:"name"` + Charset string `json:"charset"` + Collate string `json:"collate"` + // Columns are listed in the order in which they appear in the schema. + Columns []*ColumnInfo `json:"cols"` + Indices []*IndexInfo `json:"index_info"` + ForeignKeys []*FKInfo `json:"fk_info"` + State SchemaState `json:"state"` + PKIsHandle bool `json:"pk_is_handle"` + Comment string `json:"comment"` + AutoIncID int64 `json:"auto_inc_id"` + MaxColumnID int64 `json:"max_col_id"` + MaxIndexID int64 `json:"max_idx_id"` + // UpdateTS is used to record the timestamp of updating the table's schema information. + // These changing schema operations don't include 'truncate table' and 'rename table'. + UpdateTS uint64 `json:"update_timestamp"` + // OldSchemaID : + // Because auto increment ID has schemaID as prefix, + // We need to save original schemaID to keep autoID unchanged + // while renaming a table from one database to another. + // TODO: Remove it. + // Now it only uses for compatibility with the old version that already uses this field. + OldSchemaID int64 `json:"old_schema_id,omitempty"` + + // ShardRowIDBits specify if the implicit row ID is sharded. + ShardRowIDBits uint64 + // MaxShardRowIDBits uses to record the max ShardRowIDBits be used so far. + MaxShardRowIDBits uint64 `json:"max_shard_row_id_bits"` + // AutoRandomBits is used to set the bit number to shard automatically when PKIsHandle. + AutoRandomBits uint64 `json:"auto_shard_bits"` + // PreSplitRegions specify the pre-split region when create table. + // The pre-split region num is 2^(PreSplitRegions-1). + // And the PreSplitRegions should less than or equal to ShardRowIDBits. + PreSplitRegions uint64 `json:"pre_split_regions"` + + Compression string `json:"compression"` + + // Lock represent the table lock info. + Lock *TableLockInfo `json:"Lock"` + + // Version means the version of the table info. + Version uint16 `json:"version"` + + // TiFlashReplica means the TiFlash replica info. + TiFlashReplica *TiFlashReplicaInfo `json:"tiflash_replica"` +} + +// TableLockInfo provides meta data describing a table lock. +type TableLockInfo struct { + Tp TableLockType + // Use array because there may be multiple sessions holding the same read lock. + Sessions []SessionInfo + State TableLockState + // TS is used to record the timestamp this table lock been locked. + TS uint64 +} + +// SessionInfo contain the session ID and the server ID. +type SessionInfo struct { + ServerID string + SessionID uint64 +} + +func (s SessionInfo) String() string { + return "server: " + s.ServerID + "_session: " + strconv.FormatUint(s.SessionID, 10) +} + +// TableLockTpInfo is composed by schema ID, table ID and table lock type. +type TableLockTpInfo struct { + SchemaID int64 + TableID int64 + Tp TableLockType +} + +// TableLockState is the state for table lock. +type TableLockState byte + +const ( + // TableLockStateNone means this table lock is absent. + TableLockStateNone TableLockState = iota + // TableLockStatePreLock means this table lock is pre-lock state. Other session doesn't hold this lock should't do corresponding operation according to the lock type. + TableLockStatePreLock + // TableLockStatePublic means this table lock is public state. + TableLockStatePublic +) + +// String implements fmt.Stringer interface. +func (t TableLockState) String() string { + switch t { + case TableLockStatePreLock: + return "pre-lock" + case TableLockStatePublic: + return "public" + default: + return "none" + } +} + +// TableLockType is the type of the table lock. +type TableLockType byte + +const ( + TableLockNone TableLockType = iota + // TableLockRead means the session with this lock can read the table (but not write it). + // Multiple sessions can acquire a READ lock for the table at the same time. + // Other sessions can read the table without explicitly acquiring a READ lock. + TableLockRead + // TableLockReadLocal is not supported. + TableLockReadLocal + // TableLockWrite means only the session with this lock has write/read permission. + // Only the session that holds the lock can access the table. No other session can access it until the lock is released. + TableLockWrite + // TableLockWriteLocal means the session with this lock has write/read permission, and the other session still has read permission. + TableLockWriteLocal +) + +func (t TableLockType) String() string { + switch t { + case TableLockNone: + return "NONE" + case TableLockRead: + return "READ" + case TableLockReadLocal: + return "READ LOCAL" + case TableLockWriteLocal: + return "WRITE LOCAL" + case TableLockWrite: + return "WRITE" + } + return "" +} + +// TiFlashReplicaInfo means the flash replica info. +type TiFlashReplicaInfo struct { + Count uint64 + LocationLabels []string + Available bool +} + +// GetUpdateTime gets the table's updating time. +func (t *TableInfo) GetUpdateTime() time.Time { + return TSConvert2Time(t.UpdateTS) +} + +// GetDBID returns the schema ID that is used to create an allocator. +// TODO: Remove it after removing OldSchemaID. +func (t *TableInfo) GetDBID(dbID int64) int64 { + if t.OldSchemaID != 0 { + return t.OldSchemaID + } + return dbID +} + +// Clone clones TableInfo. +func (t *TableInfo) Clone() *TableInfo { + nt := *t + nt.Columns = make([]*ColumnInfo, len(t.Columns)) + nt.Indices = make([]*IndexInfo, len(t.Indices)) + nt.ForeignKeys = make([]*FKInfo, len(t.ForeignKeys)) + + for i := range t.Columns { + nt.Columns[i] = t.Columns[i].Clone() + } + + for i := range t.Indices { + nt.Indices[i] = t.Indices[i].Clone() + } + + for i := range t.ForeignKeys { + nt.ForeignKeys[i] = t.ForeignKeys[i].Clone() + } + + return &nt +} + +// GetPkName will return the pk name if pk exists. +func (t *TableInfo) GetPkName() CIStr { + for _, colInfo := range t.Columns { + if mysql.HasPriKeyFlag(colInfo.Flag) { + return colInfo.Name + } + } + return CIStr{} +} + +// GetPkColInfo gets the ColumnInfo of pk if exists. +// Make sure PkIsHandle checked before call this method. +func (t *TableInfo) GetPkColInfo() *ColumnInfo { + for _, colInfo := range t.Columns { + if mysql.HasPriKeyFlag(colInfo.Flag) { + return colInfo + } + } + return nil +} + +func (t *TableInfo) GetAutoIncrementColInfo() *ColumnInfo { + for _, colInfo := range t.Columns { + if mysql.HasAutoIncrementFlag(colInfo.Flag) { + return colInfo + } + } + return nil +} + +func (t *TableInfo) IsAutoIncColUnsigned() bool { + col := t.GetAutoIncrementColInfo() + if col == nil { + return false + } + return mysql.HasUnsignedFlag(col.Flag) +} + +// ContainsAutoRandomBits indicates whether a table contains auto_random column. +func (t *TableInfo) ContainsAutoRandomBits() bool { + return t.AutoRandomBits != 0 +} + +// IsAutoRandomBitColUnsigned indicates whether the auto_random column is unsigned. Make sure the table contains auto_random before calling this method. +func (t *TableInfo) IsAutoRandomBitColUnsigned() bool { + if !t.PKIsHandle || t.AutoRandomBits == 0 { + return false + } + return mysql.HasUnsignedFlag(t.GetPkColInfo().Flag) +} + +// Cols returns the columns of the table in public state. +func (t *TableInfo) Cols() []*ColumnInfo { + publicColumns := make([]*ColumnInfo, len(t.Columns)) + maxOffset := -1 + for _, col := range t.Columns { + if col.State != StatePublic { + continue + } + publicColumns[col.Offset] = col + if maxOffset < col.Offset { + maxOffset = col.Offset + } + } + return publicColumns[0 : maxOffset+1] +} + +// FindIndexByName finds index by name. +func (t *TableInfo) FindIndexByName(idxName string) *IndexInfo { + for _, idx := range t.Indices { + if idx.Name.L == idxName { + return idx + } + } + return nil +} + +// IsLocked checks whether the table was locked. +func (t *TableInfo) IsLocked() bool { + return t.Lock != nil && len(t.Lock.Sessions) > 0 +} + +// NewExtraHandleColInfo mocks a column info for extra handle column. +func NewExtraHandleColInfo() *ColumnInfo { + colInfo := &ColumnInfo{ + ID: ExtraHandleID, + Name: ExtraHandleName, + } + colInfo.Flag = mysql.PriKeyFlag + colInfo.Tp = mysql.TypeLonglong + colInfo.Flen, colInfo.Decimal = mysql.GetDefaultFieldLengthAndDecimal(mysql.TypeLonglong) + return colInfo +} + +// ColumnIsInIndex checks whether c is included in any indices of t. +func (t *TableInfo) ColumnIsInIndex(c *ColumnInfo) bool { + for _, index := range t.Indices { + for _, column := range index.Columns { + if column.Name.L == c.Name.L { + return true + } + } + } + return false +} + +// IndexColumn provides index column info. +type IndexColumn struct { + Name CIStr `json:"name"` // Index name + Offset int `json:"offset"` // Index offset + // Length of prefix when using column prefix + // for indexing; + // UnspecifedLength if not using prefix indexing + Length int `json:"length"` +} + +// Clone clones IndexColumn. +func (i *IndexColumn) Clone() *IndexColumn { + ni := *i + return &ni +} + +// IndexType is the type of index +type IndexType int + +// String implements Stringer interface. +func (t IndexType) String() string { + switch t { + case IndexTypeBtree: + return "BTREE" + case IndexTypeHash: + return "HASH" + case IndexTypeRtree: + return "RTREE" + default: + return "" + } +} + +// IndexTypes +const ( + IndexTypeInvalid IndexType = iota + IndexTypeBtree + IndexTypeHash + IndexTypeRtree +) + +// IndexInfo provides meta data describing a DB index. +// It corresponds to the statement `CREATE INDEX Name ON Table (Column);` +// See https://dev.mysql.com/doc/refman/5.7/en/create-index.html +type IndexInfo struct { + ID int64 `json:"id"` + Name CIStr `json:"idx_name"` // Index name. + Table CIStr `json:"tbl_name"` // Table name. + Columns []*IndexColumn `json:"idx_cols"` // Index columns. + Unique bool `json:"is_unique"` // Whether the index is unique. + Primary bool `json:"is_primary"` // Whether the index is primary key. + State SchemaState `json:"state"` + Comment string `json:"comment"` // Comment + Tp IndexType `json:"index_type"` // Index type: Btree, Hash or Rtree +} + +// Clone clones IndexInfo. +func (index *IndexInfo) Clone() *IndexInfo { + ni := *index + ni.Columns = make([]*IndexColumn, len(index.Columns)) + for i := range index.Columns { + ni.Columns[i] = index.Columns[i].Clone() + } + return &ni +} + +// HasPrefixIndex returns whether any columns of this index uses prefix length. +func (index *IndexInfo) HasPrefixIndex() bool { + for _, ic := range index.Columns { + if ic.Length != types.UnspecifiedLength { + return true + } + } + return false +} + +// FKInfo provides meta data describing a foreign key constraint. +type FKInfo struct { + ID int64 `json:"id"` + Name CIStr `json:"fk_name"` + RefTable CIStr `json:"ref_table"` + RefCols []CIStr `json:"ref_cols"` + Cols []CIStr `json:"cols"` + OnDelete int `json:"on_delete"` + OnUpdate int `json:"on_update"` + State SchemaState `json:"state"` +} + +// Clone clones FKInfo. +func (fk *FKInfo) Clone() *FKInfo { + nfk := *fk + + nfk.RefCols = make([]CIStr, len(fk.RefCols)) + nfk.Cols = make([]CIStr, len(fk.Cols)) + copy(nfk.RefCols, fk.RefCols) + copy(nfk.Cols, fk.Cols) + + return &nfk +} + +// DBInfo provides meta data describing a DB. +type DBInfo struct { + ID int64 `json:"id"` // Database ID + Name CIStr `json:"db_name"` // DB name. + Charset string `json:"charset"` + Collate string `json:"collate"` + Tables []*TableInfo `json:"-"` // Tables in the DB. + State SchemaState `json:"state"` +} + +// Clone clones DBInfo. +func (db *DBInfo) Clone() *DBInfo { + newInfo := *db + newInfo.Tables = make([]*TableInfo, len(db.Tables)) + for i := range db.Tables { + newInfo.Tables[i] = db.Tables[i].Clone() + } + return &newInfo +} + +// Copy shallow copies DBInfo. +func (db *DBInfo) Copy() *DBInfo { + newInfo := *db + newInfo.Tables = make([]*TableInfo, len(db.Tables)) + copy(newInfo.Tables, db.Tables) + return &newInfo +} + +// CIStr is case insensitive string. +type CIStr struct { + O string `json:"O"` // Original string. + L string `json:"L"` // Lower case string. +} + +// String implements fmt.Stringer interface. +func (cis CIStr) String() string { + return cis.O +} + +// NewCIStr creates a new CIStr. +func NewCIStr(s string) (cs CIStr) { + cs.O = s + cs.L = strings.ToLower(s) + return +} + +// UnmarshalJSON implements the user defined unmarshal method. +// CIStr can be unmarshaled from a single string, so PartitionDefinition.Name +// in this change https://github.com/pingcap/tidb/pull/6460/files would be +// compatible during TiDB upgrading. +func (cis *CIStr) UnmarshalJSON(b []byte) error { + type T CIStr + if err := json.Unmarshal(b, (*T)(cis)); err == nil { + return nil + } + + // Unmarshal CIStr from a single string. + err := json.Unmarshal(b, &cis.O) + if err != nil { + return errors.Trace(err) + } + cis.L = strings.ToLower(cis.O) + return nil +} + +// ColumnsToProto converts a slice of model.ColumnInfo to a slice of tipb.ColumnInfo. +func ColumnsToProto(columns []*ColumnInfo, pkIsHandle bool) []*tipb.ColumnInfo { + cols := make([]*tipb.ColumnInfo, 0, len(columns)) + for _, c := range columns { + col := ColumnToProto(c) + // TODO: Here `PkHandle`'s meaning is changed, we will change it to `IsHandle` when tikv's old select logic + // is abandoned. + if (pkIsHandle && mysql.HasPriKeyFlag(c.Flag)) || c.ID == ExtraHandleID { + col.PkHandle = true + } else { + col.PkHandle = false + } + cols = append(cols, col) + } + return cols +} + +// IndexToProto converts a model.IndexInfo to a tipb.IndexInfo. +func IndexToProto(t *TableInfo, idx *IndexInfo) *tipb.IndexInfo { + pi := &tipb.IndexInfo{ + TableId: t.ID, + IndexId: idx.ID, + Unique: idx.Unique, + } + cols := make([]*tipb.ColumnInfo, 0, len(idx.Columns)+1) + for _, c := range idx.Columns { + cols = append(cols, ColumnToProto(t.Columns[c.Offset])) + } + if t.PKIsHandle { + // Coprocessor needs to know PKHandle column info, so we need to append it. + for _, col := range t.Columns { + if mysql.HasPriKeyFlag(col.Flag) { + colPB := ColumnToProto(col) + colPB.PkHandle = true + cols = append(cols, colPB) + break + } + } + } + pi.Columns = cols + return pi +} + +// ColumnToProto converts model.ColumnInfo to tipb.ColumnInfo. +func ColumnToProto(c *ColumnInfo) *tipb.ColumnInfo { + pc := &tipb.ColumnInfo{ + ColumnId: c.ID, + Collation: collationToProto(c.FieldType.Collate), + ColumnLen: int32(c.FieldType.Flen), + Decimal: int32(c.FieldType.Decimal), + Flag: int32(c.Flag), + Elems: c.Elems, + } + pc.Tp = int32(c.FieldType.Tp) + return pc +} + +// TODO: update it when more collate is supported. +func collationToProto(c string) int32 { + v := mysql.CollationNames[c] + if v == mysql.BinaryDefaultCollationID { + return int32(mysql.BinaryDefaultCollationID) + } + // We only support binary and utf8_bin collation. + // Setting other collations to utf8_bin for old data compatibility. + // For the data created when we didn't enforce utf8_bin collation in create table. + return int32(mysql.DefaultCollationID) +} + +// TableColumnID is composed by table ID and column ID. +type TableColumnID struct { + TableID int64 + ColumnID int64 +} diff --git a/parser/model/model_test.go b/parser/model/model_test.go new file mode 100644 index 0000000..5f15031 --- /dev/null +++ b/parser/model/model_test.go @@ -0,0 +1,292 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "testing" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/types" +) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + TestingT(t) +} + +var _ = Suite(&testModelSuite{}) + +type testModelSuite struct { +} + +func (*testModelSuite) TestT(c *C) { + abc := NewCIStr("aBC") + c.Assert(abc.O, Equals, "aBC") + c.Assert(abc.L, Equals, "abc") + c.Assert(abc.String(), Equals, "aBC") +} + +func (*testModelSuite) TestModelBasic(c *C) { + column := &ColumnInfo{ + ID: 1, + Name: NewCIStr("c"), + Offset: 0, + DefaultValue: 0, + FieldType: *types.NewFieldType(0), + Hidden: true, + } + column.Flag |= mysql.PriKeyFlag + + index := &IndexInfo{ + Name: NewCIStr("key"), + Table: NewCIStr("t"), + Columns: []*IndexColumn{ + { + Name: NewCIStr("c"), + Offset: 0, + Length: 10, + }}, + Unique: true, + Primary: true, + } + + fk := &FKInfo{ + RefCols: []CIStr{NewCIStr("a")}, + Cols: []CIStr{NewCIStr("a")}, + } + + table := &TableInfo{ + ID: 1, + Name: NewCIStr("t"), + Charset: "utf8", + Collate: "utf8_bin", + Columns: []*ColumnInfo{column}, + Indices: []*IndexInfo{index}, + ForeignKeys: []*FKInfo{fk}, + PKIsHandle: true, + } + + dbInfo := &DBInfo{ + ID: 1, + Name: NewCIStr("test"), + Charset: "utf8", + Collate: "utf8_bin", + Tables: []*TableInfo{table}, + } + + n := dbInfo.Clone() + c.Assert(n, DeepEquals, dbInfo) + + pkName := table.GetPkName() + c.Assert(pkName, Equals, NewCIStr("c")) + newColumn := table.GetPkColInfo() + c.Assert(newColumn.Hidden, Equals, true) + c.Assert(newColumn, DeepEquals, column) + inIdx := table.ColumnIsInIndex(column) + c.Assert(inIdx, Equals, true) + tp := IndexTypeBtree + c.Assert(tp.String(), Equals, "BTREE") + tp = IndexTypeHash + c.Assert(tp.String(), Equals, "HASH") + tp = 1e5 + c.Assert(tp.String(), Equals, "") + has := index.HasPrefixIndex() + c.Assert(has, Equals, true) + t := table.GetUpdateTime() + c.Assert(t, Equals, TSConvert2Time(table.UpdateTS)) + + // Corner cases + column.Flag ^= mysql.PriKeyFlag + pkName = table.GetPkName() + c.Assert(pkName, Equals, NewCIStr("")) + newColumn = table.GetPkColInfo() + c.Assert(newColumn, IsNil) + anCol := &ColumnInfo{ + Name: NewCIStr("d"), + } + exIdx := table.ColumnIsInIndex(anCol) + c.Assert(exIdx, Equals, false) + anIndex := &IndexInfo{ + Columns: []*IndexColumn{}, + } + no := anIndex.HasPrefixIndex() + c.Assert(no, Equals, false) +} + +func (*testModelSuite) TestJobStartTime(c *C) { + job := &Job{ + ID: 123, + BinlogInfo: &HistoryInfo{}, + } + t := time.Unix(0, 0) + c.Assert(t, Equals, TSConvert2Time(job.StartTS)) + ret := fmt.Sprintf("%s", job) + c.Assert(job.String(), Equals, ret) +} + +func (*testModelSuite) TestJobCodec(c *C) { + type A struct { + Name string + } + job := &Job{ + ID: 1, + TableID: 2, + SchemaID: 1, + BinlogInfo: &HistoryInfo{}, + Args: []interface{}{NewCIStr("a"), A{Name: "abc"}}, + } + job.BinlogInfo.AddDBInfo(123, &DBInfo{ID: 1, Name: NewCIStr("test_history_db")}) + job.BinlogInfo.AddTableInfo(123, &TableInfo{ID: 1, Name: NewCIStr("test_history_tbl")}) + + // Test IsDependentOn. + // job: table ID is 2 + // job1: table ID is 2 + var err error + job1 := &Job{ + ID: 2, + TableID: 2, + SchemaID: 1, + Type: ActionRenameTable, + BinlogInfo: &HistoryInfo{}, + Args: []interface{}{int64(3), NewCIStr("new_table_name")}, + } + job1.RawArgs, err = json.Marshal(job1.Args) + c.Assert(err, IsNil) + isDependent, err := job.IsDependentOn(job1) + c.Assert(err, IsNil) + c.Assert(isDependent, IsTrue) + // job1: rename table, old schema ID is 3 + // job2: create schema, schema ID is 3 + job2 := &Job{ + ID: 3, + TableID: 3, + SchemaID: 3, + Type: ActionCreateSchema, + BinlogInfo: &HistoryInfo{}, + } + isDependent, err = job2.IsDependentOn(job1) + c.Assert(err, IsNil) + c.Assert(isDependent, IsTrue) + + c.Assert(job.IsCancelled(), Equals, false) + b, err := job.Encode(false) + c.Assert(err, IsNil) + newJob := &Job{} + err = newJob.Decode(b) + c.Assert(err, IsNil) + c.Assert(newJob.BinlogInfo, DeepEquals, job.BinlogInfo) + name := CIStr{} + a := A{} + err = newJob.DecodeArgs(&name, &a) + c.Assert(err, IsNil) + c.Assert(name, DeepEquals, NewCIStr("")) + c.Assert(a, DeepEquals, A{Name: ""}) + c.Assert(len(newJob.String()), Greater, 0) + + job.BinlogInfo.Clean() + b1, err := job.Encode(true) + c.Assert(err, IsNil) + newJob = &Job{} + err = newJob.Decode(b1) + c.Assert(err, IsNil) + c.Assert(newJob.BinlogInfo, DeepEquals, &HistoryInfo{}) + name = CIStr{} + a = A{} + err = newJob.DecodeArgs(&name, &a) + c.Assert(err, IsNil) + c.Assert(name, DeepEquals, NewCIStr("a")) + c.Assert(a, DeepEquals, A{Name: "abc"}) + c.Assert(len(newJob.String()), Greater, 0) + + b2, err := job.Encode(true) + c.Assert(err, IsNil) + newJob = &Job{} + err = newJob.Decode(b2) + c.Assert(err, IsNil) + name = CIStr{} + // Don't decode to a here. + err = newJob.DecodeArgs(&name) + c.Assert(err, IsNil) + c.Assert(name, DeepEquals, NewCIStr("a")) + c.Assert(len(newJob.String()), Greater, 0) + + job.State = JobStateDone + c.Assert(job.IsDone(), IsTrue) + c.Assert(job.IsFinished(), IsTrue) + c.Assert(job.IsRunning(), IsFalse) + c.Assert(job.IsSynced(), IsFalse) + c.Assert(job.IsRollbackDone(), IsFalse) + job.SetRowCount(3) + c.Assert(job.GetRowCount(), Equals, int64(3)) +} + +func (testModelSuite) TestState(c *C) { + schemaTbl := []SchemaState{ + StateDeleteOnly, + StateWriteOnly, + StateWriteReorganization, + StateDeleteReorganization, + StatePublic, + } + + for _, state := range schemaTbl { + c.Assert(len(state.String()), Greater, 0) + } + + jobTbl := []JobState{ + JobStateRunning, + JobStateDone, + JobStateCancelled, + JobStateRollingback, + JobStateRollbackDone, + JobStateSynced, + } + + for _, state := range jobTbl { + c.Assert(len(state.String()), Greater, 0) + } +} + +func (testModelSuite) TestString(c *C) { + acts := []struct { + act ActionType + result string + }{ + {ActionNone, "none"}, + {ActionAddForeignKey, "add foreign key"}, + {ActionDropForeignKey, "drop foreign key"}, + {ActionTruncateTable, "truncate table"}, + {ActionModifyColumn, "modify column"}, + {ActionRenameTable, "rename table"}, + {ActionSetDefaultValue, "set default value"}, + {ActionCreateSchema, "create schema"}, + {ActionDropSchema, "drop schema"}, + {ActionCreateTable, "create table"}, + {ActionDropTable, "drop table"}, + {ActionAddIndex, "add index"}, + {ActionDropIndex, "drop index"}, + {ActionAddColumn, "add column"}, + {ActionDropColumn, "drop column"}, + {ActionModifySchemaCharsetAndCollate, "modify schema charset and collate"}, + } + + for _, v := range acts { + str := v.act.String() + c.Assert(str, Equals, v.result) + } +} diff --git a/parser/mysql/charset.go b/parser/mysql/charset.go new file mode 100644 index 0000000..d3115df --- /dev/null +++ b/parser/mysql/charset.go @@ -0,0 +1,635 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mysql + +import "unicode" + +// CharsetNameToID maps charset name to its default collation ID. +func CharsetNameToID(charset string) uint8 { + // Use quick path for TiDB to avoid access CharsetIDs map + // "SHOW CHARACTER SET;" to see all the supported character sets. + if charset == "utf8mb4" { + return UTF8MB4DefaultCollationID + } else if charset == "binary" { + return BinaryDefaultCollationID + } else if charset == "utf8" { + return UTF8DefaultCollationID + } else if charset == "ascii" { + return ASCIIDefaultCollationID + } else if charset == "latin1" { + return Latin1DefaultCollationID + } else { + return CharsetIDs[charset] + } +} + +// CharsetIDs maps charset name to its default collation ID. +var CharsetIDs = map[string]uint8{ + "big5": 1, + "dec8": 3, + "cp850": 4, + "hp8": 6, + "koi8r": 7, + "latin1": Latin1DefaultCollationID, + "latin2": 9, + "swe7": 10, + "ascii": ASCIIDefaultCollationID, + "ujis": 12, + "sjis": 13, + "hebrew": 16, + "tis620": 18, + "euckr": 19, + "koi8u": 22, + "gb2312": 24, + "greek": 25, + "cp1250": 26, + "gbk": 28, + "latin5": 30, + "armscii8": 32, + "utf8": UTF8DefaultCollationID, + "ucs2": 35, + "cp866": 36, + "keybcs2": 37, + "macce": 38, + "macroman": 39, + "cp852": 40, + "latin7": 41, + "utf8mb4": UTF8MB4DefaultCollationID, + "cp1251": 51, + "utf16": 54, + "utf16le": 56, + "cp1256": 57, + "cp1257": 59, + "utf32": 60, + "binary": BinaryDefaultCollationID, + "geostd8": 92, + "cp932": 95, + "eucjpms": 97, +} + +// Charsets maps charset name to its default collation name. +var Charsets = map[string]string{ + "big5": "big5_chinese_ci", + "dec8": "dec8_swedish_ci", + "cp850": "cp850_general_ci", + "hp8": "hp8_english_ci", + "koi8r": "koi8r_general_ci", + "latin1": "latin1_bin", + "latin2": "latin2_general_ci", + "swe7": "swe7_swedish_ci", + "ascii": "ascii_bin", + "ujis": "ujis_japanese_ci", + "sjis": "sjis_japanese_ci", + "hebrew": "hebrew_general_ci", + "tis620": "tis620_thai_ci", + "euckr": "euckr_korean_ci", + "koi8u": "koi8u_general_ci", + "gb2312": "gb2312_chinese_ci", + "greek": "greek_general_ci", + "cp1250": "cp1250_general_ci", + "gbk": "gbk_chinese_ci", + "latin5": "latin5_turkish_ci", + "armscii8": "armscii8_general_ci", + "utf8": "utf8_bin", + "ucs2": "ucs2_general_ci", + "cp866": "cp866_general_ci", + "keybcs2": "keybcs2_general_ci", + "macce": "macce_general_ci", + "macroman": "macroman_general_ci", + "cp852": "cp852_general_ci", + "latin7": "latin7_general_ci", + "utf8mb4": "utf8mb4_bin", + "cp1251": "cp1251_general_ci", + "utf16": "utf16_general_ci", + "utf16le": "utf16le_general_ci", + "cp1256": "cp1256_general_ci", + "cp1257": "cp1257_general_ci", + "utf32": "utf32_general_ci", + "binary": "binary", + "geostd8": "geostd8_general_ci", + "cp932": "cp932_japanese_ci", + "eucjpms": "eucjpms_japanese_ci", +} + +// Collations maps MySQL collation ID to its name. +var Collations = map[uint8]string{ + 1: "big5_chinese_ci", + 2: "latin2_czech_cs", + 3: "dec8_swedish_ci", + 4: "cp850_general_ci", + 5: "latin1_german1_ci", + 6: "hp8_english_ci", + 7: "koi8r_general_ci", + 8: "latin1_swedish_ci", + 9: "latin2_general_ci", + 10: "swe7_swedish_ci", + 11: "ascii_general_ci", + 12: "ujis_japanese_ci", + 13: "sjis_japanese_ci", + 14: "cp1251_bulgarian_ci", + 15: "latin1_danish_ci", + 16: "hebrew_general_ci", + 18: "tis620_thai_ci", + 19: "euckr_korean_ci", + 20: "latin7_estonian_cs", + 21: "latin2_hungarian_ci", + 22: "koi8u_general_ci", + 23: "cp1251_ukrainian_ci", + 24: "gb2312_chinese_ci", + 25: "greek_general_ci", + 26: "cp1250_general_ci", + 27: "latin2_croatian_ci", + 28: "gbk_chinese_ci", + 29: "cp1257_lithuanian_ci", + 30: "latin5_turkish_ci", + 31: "latin1_german2_ci", + 32: "armscii8_general_ci", + 33: "utf8_general_ci", + 34: "cp1250_czech_cs", + 35: "ucs2_general_ci", + 36: "cp866_general_ci", + 37: "keybcs2_general_ci", + 38: "macce_general_ci", + 39: "macroman_general_ci", + 40: "cp852_general_ci", + 41: "latin7_general_ci", + 42: "latin7_general_cs", + 43: "macce_bin", + 44: "cp1250_croatian_ci", + 45: "utf8mb4_general_ci", + 46: "utf8mb4_bin", + 47: "latin1_bin", + 48: "latin1_general_ci", + 49: "latin1_general_cs", + 50: "cp1251_bin", + 51: "cp1251_general_ci", + 52: "cp1251_general_cs", + 53: "macroman_bin", + 54: "utf16_general_ci", + 55: "utf16_bin", + 56: "utf16le_general_ci", + 57: "cp1256_general_ci", + 58: "cp1257_bin", + 59: "cp1257_general_ci", + 60: "utf32_general_ci", + 61: "utf32_bin", + 62: "utf16le_bin", + 63: "binary", + 64: "armscii8_bin", + 65: "ascii_bin", + 66: "cp1250_bin", + 67: "cp1256_bin", + 68: "cp866_bin", + 69: "dec8_bin", + 70: "greek_bin", + 71: "hebrew_bin", + 72: "hp8_bin", + 73: "keybcs2_bin", + 74: "koi8r_bin", + 75: "koi8u_bin", + 77: "latin2_bin", + 78: "latin5_bin", + 79: "latin7_bin", + 80: "cp850_bin", + 81: "cp852_bin", + 82: "swe7_bin", + 83: "utf8_bin", + 84: "big5_bin", + 85: "euckr_bin", + 86: "gb2312_bin", + 87: "gbk_bin", + 88: "sjis_bin", + 89: "tis620_bin", + 90: "ucs2_bin", + 91: "ujis_bin", + 92: "geostd8_general_ci", + 93: "geostd8_bin", + 94: "latin1_spanish_ci", + 95: "cp932_japanese_ci", + 96: "cp932_bin", + 97: "eucjpms_japanese_ci", + 98: "eucjpms_bin", + 99: "cp1250_polish_ci", + 101: "utf16_unicode_ci", + 102: "utf16_icelandic_ci", + 103: "utf16_latvian_ci", + 104: "utf16_romanian_ci", + 105: "utf16_slovenian_ci", + 106: "utf16_polish_ci", + 107: "utf16_estonian_ci", + 108: "utf16_spanish_ci", + 109: "utf16_swedish_ci", + 110: "utf16_turkish_ci", + 111: "utf16_czech_ci", + 112: "utf16_danish_ci", + 113: "utf16_lithuanian_ci", + 114: "utf16_slovak_ci", + 115: "utf16_spanish2_ci", + 116: "utf16_roman_ci", + 117: "utf16_persian_ci", + 118: "utf16_esperanto_ci", + 119: "utf16_hungarian_ci", + 120: "utf16_sinhala_ci", + 121: "utf16_german2_ci", + 122: "utf16_croatian_ci", + 123: "utf16_unicode_520_ci", + 124: "utf16_vietnamese_ci", + 128: "ucs2_unicode_ci", + 129: "ucs2_icelandic_ci", + 130: "ucs2_latvian_ci", + 131: "ucs2_romanian_ci", + 132: "ucs2_slovenian_ci", + 133: "ucs2_polish_ci", + 134: "ucs2_estonian_ci", + 135: "ucs2_spanish_ci", + 136: "ucs2_swedish_ci", + 137: "ucs2_turkish_ci", + 138: "ucs2_czech_ci", + 139: "ucs2_danish_ci", + 140: "ucs2_lithuanian_ci", + 141: "ucs2_slovak_ci", + 142: "ucs2_spanish2_ci", + 143: "ucs2_roman_ci", + 144: "ucs2_persian_ci", + 145: "ucs2_esperanto_ci", + 146: "ucs2_hungarian_ci", + 147: "ucs2_sinhala_ci", + 148: "ucs2_german2_ci", + 149: "ucs2_croatian_ci", + 150: "ucs2_unicode_520_ci", + 151: "ucs2_vietnamese_ci", + 159: "ucs2_general_mysql500_ci", + 160: "utf32_unicode_ci", + 161: "utf32_icelandic_ci", + 162: "utf32_latvian_ci", + 163: "utf32_romanian_ci", + 164: "utf32_slovenian_ci", + 165: "utf32_polish_ci", + 166: "utf32_estonian_ci", + 167: "utf32_spanish_ci", + 168: "utf32_swedish_ci", + 169: "utf32_turkish_ci", + 170: "utf32_czech_ci", + 171: "utf32_danish_ci", + 172: "utf32_lithuanian_ci", + 173: "utf32_slovak_ci", + 174: "utf32_spanish2_ci", + 175: "utf32_roman_ci", + 176: "utf32_persian_ci", + 177: "utf32_esperanto_ci", + 178: "utf32_hungarian_ci", + 179: "utf32_sinhala_ci", + 180: "utf32_german2_ci", + 181: "utf32_croatian_ci", + 182: "utf32_unicode_520_ci", + 183: "utf32_vietnamese_ci", + 192: "utf8_unicode_ci", + 193: "utf8_icelandic_ci", + 194: "utf8_latvian_ci", + 195: "utf8_romanian_ci", + 196: "utf8_slovenian_ci", + 197: "utf8_polish_ci", + 198: "utf8_estonian_ci", + 199: "utf8_spanish_ci", + 200: "utf8_swedish_ci", + 201: "utf8_turkish_ci", + 202: "utf8_czech_ci", + 203: "utf8_danish_ci", + 204: "utf8_lithuanian_ci", + 205: "utf8_slovak_ci", + 206: "utf8_spanish2_ci", + 207: "utf8_roman_ci", + 208: "utf8_persian_ci", + 209: "utf8_esperanto_ci", + 210: "utf8_hungarian_ci", + 211: "utf8_sinhala_ci", + 212: "utf8_german2_ci", + 213: "utf8_croatian_ci", + 214: "utf8_unicode_520_ci", + 215: "utf8_vietnamese_ci", + 223: "utf8_general_mysql500_ci", + 224: "utf8mb4_unicode_ci", + 225: "utf8mb4_icelandic_ci", + 226: "utf8mb4_latvian_ci", + 227: "utf8mb4_romanian_ci", + 228: "utf8mb4_slovenian_ci", + 229: "utf8mb4_polish_ci", + 230: "utf8mb4_estonian_ci", + 231: "utf8mb4_spanish_ci", + 232: "utf8mb4_swedish_ci", + 233: "utf8mb4_turkish_ci", + 234: "utf8mb4_czech_ci", + 235: "utf8mb4_danish_ci", + 236: "utf8mb4_lithuanian_ci", + 237: "utf8mb4_slovak_ci", + 238: "utf8mb4_spanish2_ci", + 239: "utf8mb4_roman_ci", + 240: "utf8mb4_persian_ci", + 241: "utf8mb4_esperanto_ci", + 242: "utf8mb4_hungarian_ci", + 243: "utf8mb4_sinhala_ci", + 244: "utf8mb4_german2_ci", + 245: "utf8mb4_croatian_ci", + 246: "utf8mb4_unicode_520_ci", + 247: "utf8mb4_vietnamese_ci", + 255: "utf8mb4_0900_ai_ci", +} + +// CollationNames maps MySQL collation name to its ID +var CollationNames = map[string]uint8{ + "big5_chinese_ci": 1, + "latin2_czech_cs": 2, + "dec8_swedish_ci": 3, + "cp850_general_ci": 4, + "latin1_german1_ci": 5, + "hp8_english_ci": 6, + "koi8r_general_ci": 7, + "latin1_swedish_ci": 8, + "latin2_general_ci": 9, + "swe7_swedish_ci": 10, + "ascii_general_ci": 11, + "ujis_japanese_ci": 12, + "sjis_japanese_ci": 13, + "cp1251_bulgarian_ci": 14, + "latin1_danish_ci": 15, + "hebrew_general_ci": 16, + "tis620_thai_ci": 18, + "euckr_korean_ci": 19, + "latin7_estonian_cs": 20, + "latin2_hungarian_ci": 21, + "koi8u_general_ci": 22, + "cp1251_ukrainian_ci": 23, + "gb2312_chinese_ci": 24, + "greek_general_ci": 25, + "cp1250_general_ci": 26, + "latin2_croatian_ci": 27, + "gbk_chinese_ci": 28, + "cp1257_lithuanian_ci": 29, + "latin5_turkish_ci": 30, + "latin1_german2_ci": 31, + "armscii8_general_ci": 32, + "utf8_general_ci": 33, + "cp1250_czech_cs": 34, + "ucs2_general_ci": 35, + "cp866_general_ci": 36, + "keybcs2_general_ci": 37, + "macce_general_ci": 38, + "macroman_general_ci": 39, + "cp852_general_ci": 40, + "latin7_general_ci": 41, + "latin7_general_cs": 42, + "macce_bin": 43, + "cp1250_croatian_ci": 44, + "utf8mb4_general_ci": 45, + "utf8mb4_bin": 46, + "latin1_bin": 47, + "latin1_general_ci": 48, + "latin1_general_cs": 49, + "cp1251_bin": 50, + "cp1251_general_ci": 51, + "cp1251_general_cs": 52, + "macroman_bin": 53, + "utf16_general_ci": 54, + "utf16_bin": 55, + "utf16le_general_ci": 56, + "cp1256_general_ci": 57, + "cp1257_bin": 58, + "cp1257_general_ci": 59, + "utf32_general_ci": 60, + "utf32_bin": 61, + "utf16le_bin": 62, + "binary": 63, + "armscii8_bin": 64, + "ascii_bin": 65, + "cp1250_bin": 66, + "cp1256_bin": 67, + "cp866_bin": 68, + "dec8_bin": 69, + "greek_bin": 70, + "hebrew_bin": 71, + "hp8_bin": 72, + "keybcs2_bin": 73, + "koi8r_bin": 74, + "koi8u_bin": 75, + "latin2_bin": 77, + "latin5_bin": 78, + "latin7_bin": 79, + "cp850_bin": 80, + "cp852_bin": 81, + "swe7_bin": 82, + "utf8_bin": 83, + "big5_bin": 84, + "euckr_bin": 85, + "gb2312_bin": 86, + "gbk_bin": 87, + "sjis_bin": 88, + "tis620_bin": 89, + "ucs2_bin": 90, + "ujis_bin": 91, + "geostd8_general_ci": 92, + "geostd8_bin": 93, + "latin1_spanish_ci": 94, + "cp932_japanese_ci": 95, + "cp932_bin": 96, + "eucjpms_japanese_ci": 97, + "eucjpms_bin": 98, + "cp1250_polish_ci": 99, + "utf16_unicode_ci": 101, + "utf16_icelandic_ci": 102, + "utf16_latvian_ci": 103, + "utf16_romanian_ci": 104, + "utf16_slovenian_ci": 105, + "utf16_polish_ci": 106, + "utf16_estonian_ci": 107, + "utf16_spanish_ci": 108, + "utf16_swedish_ci": 109, + "utf16_turkish_ci": 110, + "utf16_czech_ci": 111, + "utf16_danish_ci": 112, + "utf16_lithuanian_ci": 113, + "utf16_slovak_ci": 114, + "utf16_spanish2_ci": 115, + "utf16_roman_ci": 116, + "utf16_persian_ci": 117, + "utf16_esperanto_ci": 118, + "utf16_hungarian_ci": 119, + "utf16_sinhala_ci": 120, + "utf16_german2_ci": 121, + "utf16_croatian_ci": 122, + "utf16_unicode_520_ci": 123, + "utf16_vietnamese_ci": 124, + "ucs2_unicode_ci": 128, + "ucs2_icelandic_ci": 129, + "ucs2_latvian_ci": 130, + "ucs2_romanian_ci": 131, + "ucs2_slovenian_ci": 132, + "ucs2_polish_ci": 133, + "ucs2_estonian_ci": 134, + "ucs2_spanish_ci": 135, + "ucs2_swedish_ci": 136, + "ucs2_turkish_ci": 137, + "ucs2_czech_ci": 138, + "ucs2_danish_ci": 139, + "ucs2_lithuanian_ci": 140, + "ucs2_slovak_ci": 141, + "ucs2_spanish2_ci": 142, + "ucs2_roman_ci": 143, + "ucs2_persian_ci": 144, + "ucs2_esperanto_ci": 145, + "ucs2_hungarian_ci": 146, + "ucs2_sinhala_ci": 147, + "ucs2_german2_ci": 148, + "ucs2_croatian_ci": 149, + "ucs2_unicode_520_ci": 150, + "ucs2_vietnamese_ci": 151, + "ucs2_general_mysql500_ci": 159, + "utf32_unicode_ci": 160, + "utf32_icelandic_ci": 161, + "utf32_latvian_ci": 162, + "utf32_romanian_ci": 163, + "utf32_slovenian_ci": 164, + "utf32_polish_ci": 165, + "utf32_estonian_ci": 166, + "utf32_spanish_ci": 167, + "utf32_swedish_ci": 168, + "utf32_turkish_ci": 169, + "utf32_czech_ci": 170, + "utf32_danish_ci": 171, + "utf32_lithuanian_ci": 172, + "utf32_slovak_ci": 173, + "utf32_spanish2_ci": 174, + "utf32_roman_ci": 175, + "utf32_persian_ci": 176, + "utf32_esperanto_ci": 177, + "utf32_hungarian_ci": 178, + "utf32_sinhala_ci": 179, + "utf32_german2_ci": 180, + "utf32_croatian_ci": 181, + "utf32_unicode_520_ci": 182, + "utf32_vietnamese_ci": 183, + "utf8_unicode_ci": 192, + "utf8_icelandic_ci": 193, + "utf8_latvian_ci": 194, + "utf8_romanian_ci": 195, + "utf8_slovenian_ci": 196, + "utf8_polish_ci": 197, + "utf8_estonian_ci": 198, + "utf8_spanish_ci": 199, + "utf8_swedish_ci": 200, + "utf8_turkish_ci": 201, + "utf8_czech_ci": 202, + "utf8_danish_ci": 203, + "utf8_lithuanian_ci": 204, + "utf8_slovak_ci": 205, + "utf8_spanish2_ci": 206, + "utf8_roman_ci": 207, + "utf8_persian_ci": 208, + "utf8_esperanto_ci": 209, + "utf8_hungarian_ci": 210, + "utf8_sinhala_ci": 211, + "utf8_german2_ci": 212, + "utf8_croatian_ci": 213, + "utf8_unicode_520_ci": 214, + "utf8_vietnamese_ci": 215, + "utf8_general_mysql500_ci": 223, + "utf8mb4_unicode_ci": 224, + "utf8mb4_icelandic_ci": 225, + "utf8mb4_latvian_ci": 226, + "utf8mb4_romanian_ci": 227, + "utf8mb4_slovenian_ci": 228, + "utf8mb4_polish_ci": 229, + "utf8mb4_estonian_ci": 230, + "utf8mb4_spanish_ci": 231, + "utf8mb4_swedish_ci": 232, + "utf8mb4_turkish_ci": 233, + "utf8mb4_czech_ci": 234, + "utf8mb4_danish_ci": 235, + "utf8mb4_lithuanian_ci": 236, + "utf8mb4_slovak_ci": 237, + "utf8mb4_spanish2_ci": 238, + "utf8mb4_roman_ci": 239, + "utf8mb4_persian_ci": 240, + "utf8mb4_esperanto_ci": 241, + "utf8mb4_hungarian_ci": 242, + "utf8mb4_sinhala_ci": 243, + "utf8mb4_german2_ci": 244, + "utf8mb4_croatian_ci": 245, + "utf8mb4_unicode_520_ci": 246, + "utf8mb4_vietnamese_ci": 247, + "utf8mb4_0900_ai_ci": 255, +} + +// MySQL collation information. +const ( + UTF8Charset = "utf8" + UTF8MB4Charset = "utf8mb4" + DefaultCharset = UTF8MB4Charset + // DefaultCollationID is utf8mb4_bin(46) + DefaultCollationID = 46 + Latin1DefaultCollationID = 47 + ASCIIDefaultCollationID = 65 + UTF8DefaultCollationID = 83 + UTF8MB4DefaultCollationID = 46 + BinaryDefaultCollationID = 63 + UTF8DefaultCollation = "utf8_bin" + UTF8MB4DefaultCollation = "utf8mb4_bin" + DefaultCollationName = UTF8MB4DefaultCollation + + // MaxBytesOfCharacter, is the max bytes length of a character, + // refer to RFC3629, in UTF-8, characters from the U+0000..U+10FFFF range + // (the UTF-16 accessible range) are encoded using sequences of 1 to 4 octets. + MaxBytesOfCharacter = 4 +) + +// IsUTF8Charset checks if charset is utf8 or utf8mb4 +func IsUTF8Charset(charset string) bool { + return charset == UTF8Charset || charset == UTF8MB4Charset +} + +// RangeGraph defines valid unicode characters to use in column names. It strictly follows MySQL's definition. +// See #3994. +var RangeGraph = []*unicode.RangeTable{ + // _MY_PNT + unicode.No, + unicode.Mn, + unicode.Me, + unicode.Pc, + unicode.Pd, + unicode.Pd, + unicode.Ps, + unicode.Pe, + unicode.Pi, + unicode.Pf, + unicode.Po, + unicode.Sm, + unicode.Sc, + unicode.Sk, + unicode.So, + // _MY_U + unicode.Lu, + unicode.Lt, + unicode.Nl, + // _MY_L + unicode.Ll, + unicode.Lm, + unicode.Lo, + unicode.Nl, + unicode.Mn, + unicode.Mc, + unicode.Me, + // _MY_NMR + unicode.Nd, + unicode.Nl, + unicode.No, +} diff --git a/parser/mysql/const.go b/parser/mysql/const.go new file mode 100644 index 0000000..c12475c --- /dev/null +++ b/parser/mysql/const.go @@ -0,0 +1,754 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mysql + +import ( + "fmt" + "strings" +) + +func newInvalidModeErr(s string) error { + return NewErr(ErrWrongValueForVar, "sql_mode", s) +} + +// Version information. +var ( + // TiDBReleaseVersion is initialized by (git describe --tags) in Makefile. + TiDBReleaseVersion = "None" + + // ServerVersion is the version information of this tidb-server in MySQL's format. + ServerVersion = fmt.Sprintf("5.7.25-TiDB-%s", TiDBReleaseVersion) +) + +// Header information. +const ( + OKHeader byte = 0x00 + ErrHeader byte = 0xff + EOFHeader byte = 0xfe + LocalInFileHeader byte = 0xfb +) + +// Server information. +const ( + ServerStatusInTrans uint16 = 0x0001 + ServerStatusAutocommit uint16 = 0x0002 + ServerMoreResultsExists uint16 = 0x0008 + ServerStatusNoGoodIndexUsed uint16 = 0x0010 + ServerStatusNoIndexUsed uint16 = 0x0020 + ServerStatusCursorExists uint16 = 0x0040 + ServerStatusLastRowSend uint16 = 0x0080 + ServerStatusDBDropped uint16 = 0x0100 + ServerStatusNoBackslashEscaped uint16 = 0x0200 + ServerStatusMetadataChanged uint16 = 0x0400 + ServerStatusWasSlow uint16 = 0x0800 + ServerPSOutParams uint16 = 0x1000 +) + +// HasCursorExistsFlag return true if cursor exists indicated by server status. +func HasCursorExistsFlag(serverStatus uint16) bool { + return serverStatus&ServerStatusCursorExists > 0 +} + +// Identifier length limitations. +// See https://dev.mysql.com/doc/refman/5.7/en/identifiers.html +const ( + // MaxPayloadLen is the max packet payload length. + MaxPayloadLen = 1<<24 - 1 + // MaxTableNameLength is max length of table name identifier. + MaxTableNameLength = 64 + // MaxDatabaseNameLength is max length of database name identifier. + MaxDatabaseNameLength = 64 + // MaxColumnNameLength is max length of column name identifier. + MaxColumnNameLength = 64 + // MaxKeyParts is max length of key parts. + MaxKeyParts = 16 + // MaxIndexIdentifierLen is max length of index identifier. + MaxIndexIdentifierLen = 64 + // MaxConstraintIdentifierLen is max length of constrain identifier. + MaxConstraintIdentifierLen = 64 + // MaxViewIdentifierLen is max length of view identifier. + MaxViewIdentifierLen = 64 + // MaxAliasIdentifierLen is max length of alias identifier. + MaxAliasIdentifierLen = 256 + // MaxUserDefinedVariableLen is max length of user-defined variable. + MaxUserDefinedVariableLen = 64 +) + +// ErrTextLength error text length limit. +const ErrTextLength = 80 + +// Command information. +const ( + ComSleep byte = iota + ComQuit + ComInitDB + ComQuery + ComFieldList + ComCreateDB + ComDropDB + ComRefresh + ComShutdown + ComStatistics + ComProcessInfo + ComConnect + ComProcessKill + ComDebug + ComPing + ComTime + ComDelayedInsert + ComChangeUser + ComBinlogDump + ComTableDump + ComConnectOut + ComRegisterSlave + ComStmtPrepare + ComStmtExecute + ComStmtSendLongData + ComStmtClose + ComStmtReset + ComSetOption + ComStmtFetch + ComDaemon + ComBinlogDumpGtid + ComResetConnection + ComEnd +) + +// Client information. +const ( + ClientLongPassword uint32 = 1 << iota + ClientFoundRows + ClientLongFlag + ClientConnectWithDB + ClientNoSchema + ClientCompress + ClientODBC + ClientLocalFiles + ClientIgnoreSpace + ClientProtocol41 + ClientInteractive + ClientSSL + ClientIgnoreSigpipe + ClientTransactions + ClientReserved + ClientSecureConnection + ClientMultiStatements + ClientMultiResults + ClientPSMultiResults + ClientPluginAuth + ClientConnectAtts + ClientPluginAuthLenencClientData +) + +// Cache type information. +const ( + TypeNoCache byte = 0xff +) + +// Auth name information. +const ( + AuthName = "mysql_native_password" +) + +// MySQL database and tables. +const ( + // SystemDB is the name of system database. + SystemDB = "mysql" + // GlobalPrivTable is the table in system db contains global scope privilege info. + GlobalPrivTable = "global_priv" + // UserTable is the table in system db contains user info. + UserTable = "User" + // DBTable is the table in system db contains db scope privilege info. + DBTable = "DB" + // TablePrivTable is the table in system db contains table scope privilege info. + TablePrivTable = "Tables_priv" + // ColumnPrivTable is the table in system db contains column scope privilege info. + ColumnPrivTable = "Columns_priv" + // GlobalVariablesTable is the table contains global system variables. + GlobalVariablesTable = "GLOBAL_VARIABLES" + // GlobalStatusTable is the table contains global status variables. + GlobalStatusTable = "GLOBAL_STATUS" + // TiDBTable is the table contains tidb info. + TiDBTable = "tidb" + // RoleEdgesTable is the table contains role relation info + RoleEdgeTable = "role_edges" + // DefaultRoleTable is the table contain default active role info + DefaultRoleTable = "default_roles" +) + +// PrivilegeType privilege +type PrivilegeType uint32 + +const ( + _ PrivilegeType = 1 << iota + // CreatePriv is the privilege to create schema/table. + CreatePriv + // SelectPriv is the privilege to read from table. + SelectPriv + // InsertPriv is the privilege to insert data into table. + InsertPriv + // UpdatePriv is the privilege to update data in table. + UpdatePriv + // DeletePriv is the privilege to delete data from table. + DeletePriv + // ShowDBPriv is the privilege to run show databases statement. + ShowDBPriv + // SuperPriv enables many operations and server behaviors. + SuperPriv + // CreateUserPriv is the privilege to create user. + CreateUserPriv + // TriggerPriv is not checked yet. + TriggerPriv + // DropPriv is the privilege to drop schema/table. + DropPriv + // ProcessPriv pertains to display of information about the threads executing within the server. + ProcessPriv + // GrantPriv is the privilege to grant privilege to user. + GrantPriv + // ReferencesPriv is not checked yet. + ReferencesPriv + // AlterPriv is the privilege to run alter statement. + AlterPriv + // ExecutePriv is the privilege to run execute statement. + ExecutePriv + // IndexPriv is the privilege to create/drop index. + IndexPriv + // CreateViewPriv is the privilege to create view. + CreateViewPriv + // ShowViewPriv is the privilege to show create view. + ShowViewPriv + // CreateRolePriv the privilege to create a role. + CreateRolePriv + // DropRolePriv is the privilege to drop a role. + DropRolePriv + + CreateTMPTablePriv + LockTablesPriv + CreateRoutinePriv + AlterRoutinePriv + EventPriv + + // ShutdownPriv the privilege to shutdown a server. + ShutdownPriv + + // AllPriv is the privilege for all actions. + AllPriv +) + +// AllPrivMask is the mask for PrivilegeType with all bits set to 1. +// If it's passed to RequestVerification, it means any privilege would be OK. +const AllPrivMask = AllPriv - 1 + +// MySQL type maximum length. +const ( + // For arguments that have no fixed number of decimals, the decimals value is set to 31, + // which is 1 more than the maximum number of decimals permitted for the DECIMAL, FLOAT, and DOUBLE data types. + NotFixedDec = 31 + + MaxIntWidth = 20 + MaxRealWidth = 23 + MaxFloatingTypeScale = 30 + MaxFloatingTypeWidth = 255 + MaxDecimalScale = 30 + MaxDecimalWidth = 65 + MaxDateWidth = 10 // YYYY-MM-DD. + MaxDatetimeWidthNoFsp = 19 // YYYY-MM-DD HH:MM:SS + MaxDatetimeWidthWithFsp = 26 // YYYY-MM-DD HH:MM:SS[.fraction] + MaxDatetimeFullWidth = 29 // YYYY-MM-DD HH:MM:SS.###### AM + MaxDurationWidthNoFsp = 10 // HH:MM:SS + MaxDurationWidthWithFsp = 15 // HH:MM:SS[.fraction] + MaxBlobWidth = 16777216 + MaxBitDisplayWidth = 64 + MaxFloatPrecisionLength = 24 + MaxDoublePrecisionLength = 53 +) + +// MySQL max type field length. +const ( + MaxFieldCharLength = 255 + MaxFieldVarCharLength = 65535 +) + +// MaxTypeSetMembers is the number of set members. +const MaxTypeSetMembers = 64 + +// PWDHashLen is the length of password's hash. +const PWDHashLen = 40 + +// Priv2UserCol is the privilege to mysql.user table column name. +var Priv2UserCol = map[PrivilegeType]string{ + CreatePriv: "Create_priv", + SelectPriv: "Select_priv", + InsertPriv: "Insert_priv", + UpdatePriv: "Update_priv", + DeletePriv: "Delete_priv", + ShowDBPriv: "Show_db_priv", + SuperPriv: "Super_priv", + CreateUserPriv: "Create_user_priv", + TriggerPriv: "Trigger_priv", + DropPriv: "Drop_priv", + ProcessPriv: "Process_priv", + GrantPriv: "Grant_priv", + ReferencesPriv: "References_priv", + AlterPriv: "Alter_priv", + ExecutePriv: "Execute_priv", + IndexPriv: "Index_priv", + CreateViewPriv: "Create_view_priv", + ShowViewPriv: "Show_view_priv", + CreateRolePriv: "Create_role_priv", + DropRolePriv: "Drop_role_priv", + CreateTMPTablePriv: "Create_tmp_table_priv", + LockTablesPriv: "Lock_tables_priv", + CreateRoutinePriv: "Create_routine_priv", + AlterRoutinePriv: "Alter_routine_priv", + EventPriv: "Event_priv", + ShutdownPriv: "Shutdown_priv", +} + +// Col2PrivType is the privilege tables column name to privilege type. +var Col2PrivType = map[string]PrivilegeType{ + "Create_priv": CreatePriv, + "Select_priv": SelectPriv, + "Insert_priv": InsertPriv, + "Update_priv": UpdatePriv, + "Delete_priv": DeletePriv, + "Show_db_priv": ShowDBPriv, + "Super_priv": SuperPriv, + "Create_user_priv": CreateUserPriv, + "Trigger_priv": TriggerPriv, + "Drop_priv": DropPriv, + "Process_priv": ProcessPriv, + "Grant_priv": GrantPriv, + "References_priv": ReferencesPriv, + "Alter_priv": AlterPriv, + "Execute_priv": ExecutePriv, + "Index_priv": IndexPriv, + "Create_view_priv": CreateViewPriv, + "Show_view_priv": ShowViewPriv, + "Create_role_priv": CreateRolePriv, + "Drop_role_priv": DropRolePriv, + "Create_tmp_table_priv": CreateTMPTablePriv, + "Lock_tables_priv": LockTablesPriv, + "Create_routine_priv": CreateRoutinePriv, + "Alter_routine_priv": AlterRoutinePriv, + "Event_priv": EventPriv, + "Shutdown_priv": ShutdownPriv, +} + +// Command2Str is the command information to command name. +var Command2Str = map[byte]string{ + ComSleep: "Sleep", + ComQuit: "Quit", + ComInitDB: "Init DB", + ComQuery: "Query", + ComFieldList: "Field List", + ComCreateDB: "Create DB", + ComDropDB: "Drop DB", + ComRefresh: "Refresh", + ComShutdown: "Shutdown", + ComStatistics: "Statistics", + ComProcessInfo: "Processlist", + ComConnect: "Connect", + ComProcessKill: "Kill", + ComDebug: "Debug", + ComPing: "Ping", + ComTime: "Time", + ComDelayedInsert: "Delayed Insert", + ComChangeUser: "Change User", + ComBinlogDump: "Binlog Dump", + ComTableDump: "Table Dump", + ComConnectOut: "Connect out", + ComRegisterSlave: "Register Slave", + ComStmtPrepare: "Prepare", + ComStmtExecute: "Execute", + ComStmtSendLongData: "Long Data", + ComStmtClose: "Close stmt", + ComStmtReset: "Reset stmt", + ComSetOption: "Set option", + ComStmtFetch: "Fetch", + ComDaemon: "Daemon", + ComBinlogDumpGtid: "Binlog Dump", + ComResetConnection: "Reset connect", +} + +// Priv2Str is the map for privilege to string. +var Priv2Str = map[PrivilegeType]string{ + CreatePriv: "Create", + SelectPriv: "Select", + InsertPriv: "Insert", + UpdatePriv: "Update", + DeletePriv: "Delete", + ShowDBPriv: "Show Databases", + SuperPriv: "Super", + CreateUserPriv: "Create User", + TriggerPriv: "Trigger", + DropPriv: "Drop", + ProcessPriv: "Process", + GrantPriv: "Grant Option", + ReferencesPriv: "References", + AlterPriv: "Alter", + ExecutePriv: "Execute", + IndexPriv: "Index", + CreateViewPriv: "Create View", + ShowViewPriv: "Show View", + CreateRolePriv: "Create Role", + DropRolePriv: "Drop Role", + CreateTMPTablePriv: "CREATE TEMPORARY TABLES", + LockTablesPriv: "LOCK TABLES", + CreateRoutinePriv: "CREATE ROUTINE", + AlterRoutinePriv: "ALTER ROUTINE", + EventPriv: "EVENT", + ShutdownPriv: "SHUTDOWN", +} + +// Priv2SetStr is the map for privilege to string. +var Priv2SetStr = map[PrivilegeType]string{ + CreatePriv: "Create", + SelectPriv: "Select", + InsertPriv: "Insert", + UpdatePriv: "Update", + DeletePriv: "Delete", + DropPriv: "Drop", + GrantPriv: "Grant", + AlterPriv: "Alter", + ExecutePriv: "Execute", + IndexPriv: "Index", + CreateViewPriv: "Create View", + ShowViewPriv: "Show View", + CreateRolePriv: "Create Role", + DropRolePriv: "Drop Role", + ShutdownPriv: "Shutdown Role", +} + +// SetStr2Priv is the map for privilege set string to privilege type. +var SetStr2Priv = map[string]PrivilegeType{ + "Create": CreatePriv, + "Select": SelectPriv, + "Insert": InsertPriv, + "Update": UpdatePriv, + "Delete": DeletePriv, + "Drop": DropPriv, + "Grant": GrantPriv, + "Alter": AlterPriv, + "Execute": ExecutePriv, + "Index": IndexPriv, + "Create View": CreateViewPriv, + "Show View": ShowViewPriv, +} + +// AllGlobalPrivs is all the privileges in global scope. +var AllGlobalPrivs = []PrivilegeType{SelectPriv, InsertPriv, UpdatePriv, DeletePriv, CreatePriv, DropPriv, ProcessPriv, ReferencesPriv, AlterPriv, ShowDBPriv, SuperPriv, ExecutePriv, IndexPriv, CreateUserPriv, TriggerPriv, CreateViewPriv, ShowViewPriv, CreateRolePriv, DropRolePriv, CreateTMPTablePriv, LockTablesPriv, CreateRoutinePriv, AlterRoutinePriv, EventPriv, ShutdownPriv} + +// AllDBPrivs is all the privileges in database scope. +var AllDBPrivs = []PrivilegeType{SelectPriv, InsertPriv, UpdatePriv, DeletePriv, CreatePriv, DropPriv, AlterPriv, ExecutePriv, IndexPriv, CreateViewPriv, ShowViewPriv} + +// AllTablePrivs is all the privileges in table scope. +var AllTablePrivs = []PrivilegeType{SelectPriv, InsertPriv, UpdatePriv, DeletePriv, CreatePriv, DropPriv, AlterPriv, IndexPriv} + +// AllColumnPrivs is all the privileges in column scope. +var AllColumnPrivs = []PrivilegeType{SelectPriv, InsertPriv, UpdatePriv} + +// AllPrivilegeLiteral is the string literal for All Privilege. +const AllPrivilegeLiteral = "ALL PRIVILEGES" + +// DefaultSQLMode for GLOBAL_VARIABLES +const DefaultSQLMode = "ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION" + +// DefaultLengthOfMysqlTypes is the map for default physical length of MySQL data types. +// See http://dev.mysql.com/doc/refman/5.7/en/storage-requirements.html +var DefaultLengthOfMysqlTypes = map[byte]int{ + TypeYear: 1, + TypeDate: 3, + TypeDuration: 3, + TypeDatetime: 8, + TypeTimestamp: 4, + + TypeTiny: 1, + TypeShort: 2, + TypeInt24: 3, + TypeLong: 4, + TypeLonglong: 8, + TypeFloat: 4, + TypeDouble: 8, + + TypeEnum: 2, + TypeString: 1, + TypeSet: 8, +} + +// DefaultLengthOfTimeFraction is the map for default physical length of time fractions. +var DefaultLengthOfTimeFraction = map[int]int{ + 0: 0, + + 1: 1, + 2: 1, + + 3: 2, + 4: 2, + + 5: 3, + 6: 3, +} + +// SQLMode is the type for MySQL sql_mode. +// See https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html +type SQLMode int + +// HasNoZeroDateMode detects if 'NO_ZERO_DATE' mode is set in SQLMode +func (m SQLMode) HasNoZeroDateMode() bool { + return m&ModeNoZeroDate == ModeNoZeroDate +} + +// HasNoZeroInDateMode detects if 'NO_ZERO_IN_DATE' mode is set in SQLMode +func (m SQLMode) HasNoZeroInDateMode() bool { + return m&ModeNoZeroInDate == ModeNoZeroInDate +} + +// HasErrorForDivisionByZeroMode detects if 'ERROR_FOR_DIVISION_BY_ZERO' mode is set in SQLMode +func (m SQLMode) HasErrorForDivisionByZeroMode() bool { + return m&ModeErrorForDivisionByZero == ModeErrorForDivisionByZero +} + +// HasOnlyFullGroupBy detects if 'ONLY_FULL_GROUP_BY' mode is set in SQLMode +func (m SQLMode) HasOnlyFullGroupBy() bool { + return m&ModeOnlyFullGroupBy == ModeOnlyFullGroupBy +} + +// HasStrictMode detects if 'STRICT_TRANS_TABLES' or 'STRICT_ALL_TABLES' mode is set in SQLMode +func (m SQLMode) HasStrictMode() bool { + return m&ModeStrictTransTables == ModeStrictTransTables || m&ModeStrictAllTables == ModeStrictAllTables +} + +// HasPipesAsConcatMode detects if 'PIPES_AS_CONCAT' mode is set in SQLMode +func (m SQLMode) HasPipesAsConcatMode() bool { + return m&ModePipesAsConcat == ModePipesAsConcat +} + +// HasNoUnsignedSubtractionMode detects if 'NO_UNSIGNED_SUBTRACTION' mode is set in SQLMode +func (m SQLMode) HasNoUnsignedSubtractionMode() bool { + return m&ModeNoUnsignedSubtraction == ModeNoUnsignedSubtraction +} + +// HasHighNotPrecedenceMode detects if 'HIGH_NOT_PRECEDENCE' mode is set in SQLMode +func (m SQLMode) HasHighNotPrecedenceMode() bool { + return m&ModeHighNotPrecedence == ModeHighNotPrecedence +} + +// HasANSIQuotesMode detects if 'ANSI_QUOTES' mode is set in SQLMode +func (m SQLMode) HasANSIQuotesMode() bool { + return m&ModeANSIQuotes == ModeANSIQuotes +} + +// HasRealAsFloatMode detects if 'REAL_AS_FLOAT' mode is set in SQLMode +func (m SQLMode) HasRealAsFloatMode() bool { + return m&ModeRealAsFloat == ModeRealAsFloat +} + +// HasPadCharToFullLengthMode detects if 'PAD_CHAR_TO_FULL_LENGTH' mode is set in SQLMode +func (m SQLMode) HasPadCharToFullLengthMode() bool { + return m&ModePadCharToFullLength == ModePadCharToFullLength +} + +// HasNoBackslashEscapesMode detects if 'NO_BACKSLASH_ESCAPES' mode is set in SQLMode +func (m SQLMode) HasNoBackslashEscapesMode() bool { + return m&ModeNoBackslashEscapes == ModeNoBackslashEscapes +} + +// HasIgnoreSpaceMode detects if 'IGNORE_SPACE' mode is set in SQLMode +func (m SQLMode) HasIgnoreSpaceMode() bool { + return m&ModeIgnoreSpace == ModeIgnoreSpace +} + +// HasNoAutoCreateUserMode detects if 'NO_AUTO_CREATE_USER' mode is set in SQLMode +func (m SQLMode) HasNoAutoCreateUserMode() bool { + return m&ModeNoAutoCreateUser == ModeNoAutoCreateUser +} + +// HasAllowInvalidDatesMode detects if 'ALLOW_INVALID_DATES' mode is set in SQLMode +func (m SQLMode) HasAllowInvalidDatesMode() bool { + return m&ModeAllowInvalidDates == ModeAllowInvalidDates +} + +// consts for sql modes. +const ( + ModeNone SQLMode = 0 + ModeRealAsFloat SQLMode = 1 << iota + ModePipesAsConcat + ModeANSIQuotes + ModeIgnoreSpace + ModeNotUsed + ModeOnlyFullGroupBy + ModeNoUnsignedSubtraction + ModeNoDirInCreate + ModePostgreSQL + ModeOracle + ModeMsSQL + ModeDb2 + ModeMaxdb + ModeNoKeyOptions + ModeNoTableOptions + ModeNoFieldOptions + ModeMySQL323 + ModeMySQL40 + ModeANSI + ModeNoAutoValueOnZero + ModeNoBackslashEscapes + ModeStrictTransTables + ModeStrictAllTables + ModeNoZeroInDate + ModeNoZeroDate + ModeInvalidDates + ModeErrorForDivisionByZero + ModeTraditional + ModeNoAutoCreateUser + ModeHighNotPrecedence + ModeNoEngineSubstitution + ModePadCharToFullLength + ModeAllowInvalidDates +) + +// FormatSQLModeStr re-format 'SQL_MODE' variable. +func FormatSQLModeStr(s string) string { + s = strings.ToUpper(strings.TrimRight(s, " ")) + parts := strings.Split(s, ",") + var nonEmptyParts []string + existParts := make(map[string]string) + for _, part := range parts { + if len(part) == 0 { + continue + } + if modeParts, ok := CombinationSQLMode[part]; ok { + for _, modePart := range modeParts { + if _, exist := existParts[modePart]; !exist { + nonEmptyParts = append(nonEmptyParts, modePart) + existParts[modePart] = modePart + } + } + } + if _, exist := existParts[part]; !exist { + nonEmptyParts = append(nonEmptyParts, part) + existParts[part] = part + } + } + return strings.Join(nonEmptyParts, ",") +} + +// GetSQLMode gets the sql mode for string literal. SQL_mode is a list of different modes separated by commas. +// The input string must be formatted by 'FormatSQLModeStr' +func GetSQLMode(s string) (SQLMode, error) { + strs := strings.Split(s, ",") + var sqlMode SQLMode + for i, length := 0, len(strs); i < length; i++ { + mode, ok := Str2SQLMode[strs[i]] + if !ok && strs[i] != "" { + return sqlMode, newInvalidModeErr(strs[i]) + } + sqlMode = sqlMode | mode + } + return sqlMode, nil +} + +// Str2SQLMode is the string represent of sql_mode to sql_mode map. +var Str2SQLMode = map[string]SQLMode{ + "REAL_AS_FLOAT": ModeRealAsFloat, + "PIPES_AS_CONCAT": ModePipesAsConcat, + "ANSI_QUOTES": ModeANSIQuotes, + "IGNORE_SPACE": ModeIgnoreSpace, + "NOT_USED": ModeNotUsed, + "ONLY_FULL_GROUP_BY": ModeOnlyFullGroupBy, + "NO_UNSIGNED_SUBTRACTION": ModeNoUnsignedSubtraction, + "NO_DIR_IN_CREATE": ModeNoDirInCreate, + "POSTGRESQL": ModePostgreSQL, + "ORACLE": ModeOracle, + "MSSQL": ModeMsSQL, + "DB2": ModeDb2, + "MAXDB": ModeMaxdb, + "NO_KEY_OPTIONS": ModeNoKeyOptions, + "NO_TABLE_OPTIONS": ModeNoTableOptions, + "NO_FIELD_OPTIONS": ModeNoFieldOptions, + "MYSQL323": ModeMySQL323, + "MYSQL40": ModeMySQL40, + "ANSI": ModeANSI, + "NO_AUTO_VALUE_ON_ZERO": ModeNoAutoValueOnZero, + "NO_BACKSLASH_ESCAPES": ModeNoBackslashEscapes, + "STRICT_TRANS_TABLES": ModeStrictTransTables, + "STRICT_ALL_TABLES": ModeStrictAllTables, + "NO_ZERO_IN_DATE": ModeNoZeroInDate, + "NO_ZERO_DATE": ModeNoZeroDate, + "INVALID_DATES": ModeInvalidDates, + "ERROR_FOR_DIVISION_BY_ZERO": ModeErrorForDivisionByZero, + "TRADITIONAL": ModeTraditional, + "NO_AUTO_CREATE_USER": ModeNoAutoCreateUser, + "HIGH_NOT_PRECEDENCE": ModeHighNotPrecedence, + "NO_ENGINE_SUBSTITUTION": ModeNoEngineSubstitution, + "PAD_CHAR_TO_FULL_LENGTH": ModePadCharToFullLength, + "ALLOW_INVALID_DATES": ModeAllowInvalidDates, +} + +// CombinationSQLMode is the special modes that provided as shorthand for combinations of mode values. +// See https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html#sql-mode-combo. +var CombinationSQLMode = map[string][]string{ + "ANSI": {"REAL_AS_FLOAT", "PIPES_AS_CONCAT", "ANSI_QUOTES", "IGNORE_SPACE", "ONLY_FULL_GROUP_BY"}, + "DB2": {"PIPES_AS_CONCAT", "ANSI_QUOTES", "IGNORE_SPACE", "NO_KEY_OPTIONS", "NO_TABLE_OPTIONS", "NO_FIELD_OPTIONS"}, + "MAXDB": {"PIPES_AS_CONCAT", "ANSI_QUOTES", "IGNORE_SPACE", "NO_KEY_OPTIONS", "NO_TABLE_OPTIONS", "NO_FIELD_OPTIONS", "NO_AUTO_CREATE_USER"}, + "MSSQL": {"PIPES_AS_CONCAT", "ANSI_QUOTES", "IGNORE_SPACE", "NO_KEY_OPTIONS", "NO_TABLE_OPTIONS", "NO_FIELD_OPTIONS"}, + "MYSQL323": {"MYSQL323", "HIGH_NOT_PRECEDENCE"}, + "MYSQL40": {"MYSQL40", "HIGH_NOT_PRECEDENCE"}, + "ORACLE": {"PIPES_AS_CONCAT", "ANSI_QUOTES", "IGNORE_SPACE", "NO_KEY_OPTIONS", "NO_TABLE_OPTIONS", "NO_FIELD_OPTIONS", "NO_AUTO_CREATE_USER"}, + "POSTGRESQL": {"PIPES_AS_CONCAT", "ANSI_QUOTES", "IGNORE_SPACE", "NO_KEY_OPTIONS", "NO_TABLE_OPTIONS", "NO_FIELD_OPTIONS"}, + "TRADITIONAL": {"STRICT_TRANS_TABLES", "STRICT_ALL_TABLES", "NO_ZERO_IN_DATE", "NO_ZERO_DATE", "ERROR_FOR_DIVISION_BY_ZERO", "NO_AUTO_CREATE_USER", "NO_ENGINE_SUBSTITUTION"}, +} + +// FormatFunc is the locale format function signature. +type FormatFunc func(string, string) (string, error) + +// GetLocaleFormatFunction get the format function for sepcific locale. +func GetLocaleFormatFunction(loc string) FormatFunc { + locale, exist := locale2FormatFunction[loc] + if !exist { + return formatNotSupport + } + return locale +} + +// locale2FormatFunction is the string represent of locale format function. +var locale2FormatFunction = map[string]FormatFunc{ + "en_US": formatENUS, + "zh_CN": formatZHCN, +} + +// PriorityEnum is defined for Priority const values. +type PriorityEnum int + +// Priority const values. +// See https://dev.mysql.com/doc/refman/5.7/en/insert.html +const ( + NoPriority PriorityEnum = iota + LowPriority + HighPriority + DelayedPriority +) + +// Priority2Str is used to convert the statement priority to string. +var Priority2Str = map[PriorityEnum]string{ + NoPriority: "NO_PRIORITY", + LowPriority: "LOW_PRIORITY", + HighPriority: "HIGH_PRIORITY", + DelayedPriority: "DELAYED", +} + +// PrimaryKeyName defines primary key name. +const ( + PrimaryKeyName = "PRIMARY" +) diff --git a/parser/mysql/const_test.go b/parser/mysql/const_test.go new file mode 100644 index 0000000..7272634 --- /dev/null +++ b/parser/mysql/const_test.go @@ -0,0 +1,54 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mysql + +import ( + "testing" + + . "github.com/pingcap/check" +) + +var _ = Suite(&testConstSuite{}) + +type testConstSuite struct{} + +func TestT(t *testing.T) { + TestingT(t) +} + +func (s *testConstSuite) TestPrivAllConsistency(c *C) { + // AllPriv in mysql.user columns. + for priv := PrivilegeType(CreatePriv); priv != AllPriv; priv = priv << 1 { + _, ok := Priv2UserCol[priv] + c.Assert(ok, IsTrue, Commentf("priv fail %d", priv)) + } + + for _, v := range AllGlobalPrivs { + _, ok := Priv2UserCol[v] + c.Assert(ok, IsTrue) + } + + c.Assert(len(Priv2UserCol), Equals, len(AllGlobalPrivs)+1) + + for _, v := range Priv2UserCol { + _, ok := Col2PrivType[v] + c.Assert(ok, IsTrue) + } + for _, v := range Col2PrivType { + _, ok := Priv2UserCol[v] + c.Assert(ok, IsTrue) + } + + c.Assert(len(Priv2Str), Equals, len(Priv2UserCol)) +} diff --git a/parser/mysql/errcode.go b/parser/mysql/errcode.go new file mode 100644 index 0000000..445dc1d --- /dev/null +++ b/parser/mysql/errcode.go @@ -0,0 +1,1062 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mysql + +// MySQL error code. +// This value is numeric. It is not portable to other database systems. +const ( + ErrErrorFirst uint16 = 1000 + ErrHashchk = 1000 + ErrNisamchk = 1001 + ErrNo = 1002 + ErrYes = 1003 + ErrCantCreateFile = 1004 + ErrCantCreateTable = 1005 + ErrCantCreateDB = 1006 + ErrDBCreateExists = 1007 + ErrDBDropExists = 1008 + ErrDBDropDelete = 1009 + ErrDBDropRmdir = 1010 + ErrCantDeleteFile = 1011 + ErrCantFindSystemRec = 1012 + ErrCantGetStat = 1013 + ErrCantGetWd = 1014 + ErrCantLock = 1015 + ErrCantOpenFile = 1016 + ErrFileNotFound = 1017 + ErrCantReadDir = 1018 + ErrCantSetWd = 1019 + ErrCheckread = 1020 + ErrDiskFull = 1021 + ErrDupKey = 1022 + ErrErrorOnClose = 1023 + ErrErrorOnRead = 1024 + ErrErrorOnRename = 1025 + ErrErrorOnWrite = 1026 + ErrFileUsed = 1027 + ErrFilsortAbort = 1028 + ErrFormNotFound = 1029 + ErrGetErrno = 1030 + ErrIllegalHa = 1031 + ErrKeyNotFound = 1032 + ErrNotFormFile = 1033 + ErrNotKeyFile = 1034 + ErrOldKeyFile = 1035 + ErrOpenAsReadonly = 1036 + ErrOutofMemory = 1037 + ErrOutOfSortMemory = 1038 + ErrUnexpectedEOF = 1039 + ErrConCount = 1040 + ErrOutOfResources = 1041 + ErrBadHost = 1042 + ErrHandshake = 1043 + ErrDBaccessDenied = 1044 + ErrAccessDenied = 1045 + ErrNoDB = 1046 + ErrUnknownCom = 1047 + ErrBadNull = 1048 + ErrBadDB = 1049 + ErrTableExists = 1050 + ErrBadTable = 1051 + ErrNonUniq = 1052 + ErrServerShutdown = 1053 + ErrBadField = 1054 + ErrFieldNotInGroupBy = 1055 + ErrWrongGroupField = 1056 + ErrWrongSumSelect = 1057 + ErrWrongValueCount = 1058 + ErrTooLongIdent = 1059 + ErrDupFieldName = 1060 + ErrDupKeyName = 1061 + ErrDupEntry = 1062 + ErrWrongFieldSpec = 1063 + ErrParse = 1064 + ErrEmptyQuery = 1065 + ErrNonuniqTable = 1066 + ErrInvalidDefault = 1067 + ErrMultiplePriKey = 1068 + ErrTooManyKeys = 1069 + ErrTooManyKeyParts = 1070 + ErrTooLongKey = 1071 + ErrKeyColumnDoesNotExits = 1072 + ErrBlobUsedAsKey = 1073 + ErrTooBigFieldlength = 1074 + ErrWrongAutoKey = 1075 + ErrReady = 1076 + ErrNormalShutdown = 1077 + ErrGotSignal = 1078 + ErrShutdownComplete = 1079 + ErrForcingClose = 1080 + ErrIpsock = 1081 + ErrNoSuchIndex = 1082 + ErrWrongFieldTerminators = 1083 + ErrBlobsAndNoTerminated = 1084 + ErrTextFileNotReadable = 1085 + ErrFileExists = 1086 + ErrLoadInfo = 1087 + ErrAlterInfo = 1088 + ErrWrongSubKey = 1089 + ErrCantRemoveAllFields = 1090 + ErrCantDropFieldOrKey = 1091 + ErrInsertInfo = 1092 + ErrUpdateTableUsed = 1093 + ErrNoSuchThread = 1094 + ErrKillDenied = 1095 + ErrNoTablesUsed = 1096 + ErrTooBigSet = 1097 + ErrNoUniqueLogFile = 1098 + ErrTableNotLockedForWrite = 1099 + ErrTableNotLocked = 1100 + ErrBlobCantHaveDefault = 1101 + ErrWrongDBName = 1102 + ErrWrongTableName = 1103 + ErrTooBigSelect = 1104 + ErrUnknown = 1105 + ErrUnknownProcedure = 1106 + ErrWrongParamcountToProcedure = 1107 + ErrWrongParametersToProcedure = 1108 + ErrUnknownTable = 1109 + ErrFieldSpecifiedTwice = 1110 + ErrInvalidGroupFuncUse = 1111 + ErrUnsupportedExtension = 1112 + ErrTableMustHaveColumns = 1113 + ErrRecordFileFull = 1114 + ErrUnknownCharacterSet = 1115 + ErrTooManyTables = 1116 + ErrTooManyFields = 1117 + ErrTooBigRowsize = 1118 + ErrStackOverrun = 1119 + ErrWrongOuterJoin = 1120 + ErrNullColumnInIndex = 1121 + ErrCantFindUdf = 1122 + ErrCantInitializeUdf = 1123 + ErrUdfNoPaths = 1124 + ErrUdfExists = 1125 + ErrCantOpenLibrary = 1126 + ErrCantFindDlEntry = 1127 + ErrFunctionNotDefined = 1128 + ErrHostIsBlocked = 1129 + ErrHostNotPrivileged = 1130 + ErrPasswordAnonymousUser = 1131 + ErrPasswordNotAllowed = 1132 + ErrPasswordNoMatch = 1133 + ErrUpdateInfo = 1134 + ErrCantCreateThread = 1135 + ErrWrongValueCountOnRow = 1136 + ErrCantReopenTable = 1137 + ErrInvalidUseOfNull = 1138 + ErrRegexp = 1139 + ErrMixOfGroupFuncAndFields = 1140 + ErrNonexistingGrant = 1141 + ErrTableaccessDenied = 1142 + ErrColumnaccessDenied = 1143 + ErrIllegalGrantForTable = 1144 + ErrGrantWrongHostOrUser = 1145 + ErrNoSuchTable = 1146 + ErrNonexistingTableGrant = 1147 + ErrNotAllowedCommand = 1148 + ErrSyntax = 1149 + ErrDelayedCantChangeLock = 1150 + ErrTooManyDelayedThreads = 1151 + ErrAbortingConnection = 1152 + ErrNetPacketTooLarge = 1153 + ErrNetReadErrorFromPipe = 1154 + ErrNetFcntl = 1155 + ErrNetPacketsOutOfOrder = 1156 + ErrNetUncompress = 1157 + ErrNetRead = 1158 + ErrNetReadInterrupted = 1159 + ErrNetErrorOnWrite = 1160 + ErrNetWriteInterrupted = 1161 + ErrTooLongString = 1162 + ErrTableCantHandleBlob = 1163 + ErrTableCantHandleAutoIncrement = 1164 + ErrDelayedInsertTableLocked = 1165 + ErrWrongColumnName = 1166 + ErrWrongKeyColumn = 1167 + ErrWrongMrgTable = 1168 + ErrDupUnique = 1169 + ErrBlobKeyWithoutLength = 1170 + ErrPrimaryCantHaveNull = 1171 + ErrTooManyRows = 1172 + ErrRequiresPrimaryKey = 1173 + ErrNoRaidCompiled = 1174 + ErrUpdateWithoutKeyInSafeMode = 1175 + ErrKeyDoesNotExist = 1176 + ErrCheckNoSuchTable = 1177 + ErrCheckNotImplemented = 1178 + ErrCantDoThisDuringAnTransaction = 1179 + ErrErrorDuringCommit = 1180 + ErrErrorDuringRollback = 1181 + ErrErrorDuringFlushLogs = 1182 + ErrErrorDuringCheckpoint = 1183 + ErrNewAbortingConnection = 1184 + ErrDumpNotImplemented = 1185 + ErrFlushMasterBinlogClosed = 1186 + ErrIndexRebuild = 1187 + ErrMaster = 1188 + ErrMasterNetRead = 1189 + ErrMasterNetWrite = 1190 + ErrFtMatchingKeyNotFound = 1191 + ErrLockOrActiveTransaction = 1192 + ErrUnknownSystemVariable = 1193 + ErrCrashedOnUsage = 1194 + ErrCrashedOnRepair = 1195 + ErrWarningNotCompleteRollback = 1196 + ErrTransCacheFull = 1197 + ErrSlaveMustStop = 1198 + ErrSlaveNotRunning = 1199 + ErrBadSlave = 1200 + ErrMasterInfo = 1201 + ErrSlaveThread = 1202 + ErrTooManyUserConnections = 1203 + ErrSetConstantsOnly = 1204 + ErrLockWaitTimeout = 1205 + ErrLockTableFull = 1206 + ErrReadOnlyTransaction = 1207 + ErrDropDBWithReadLock = 1208 + ErrCreateDBWithReadLock = 1209 + ErrWrongArguments = 1210 + ErrNoPermissionToCreateUser = 1211 + ErrUnionTablesInDifferentDir = 1212 + ErrLockDeadlock = 1213 + ErrTableCantHandleFt = 1214 + ErrCannotAddForeign = 1215 + ErrNoReferencedRow = 1216 + ErrRowIsReferenced = 1217 + ErrConnectToMaster = 1218 + ErrQueryOnMaster = 1219 + ErrErrorWhenExecutingCommand = 1220 + ErrWrongUsage = 1221 + ErrWrongNumberOfColumnsInSelect = 1222 + ErrCantUpdateWithReadlock = 1223 + ErrMixingNotAllowed = 1224 + ErrDupArgument = 1225 + ErrUserLimitReached = 1226 + ErrSpecificAccessDenied = 1227 + ErrLocalVariable = 1228 + ErrGlobalVariable = 1229 + ErrNoDefault = 1230 + ErrWrongValueForVar = 1231 + ErrWrongTypeForVar = 1232 + ErrVarCantBeRead = 1233 + ErrCantUseOptionHere = 1234 + ErrNotSupportedYet = 1235 + ErrMasterFatalErrorReadingBinlog = 1236 + ErrSlaveIgnoredTable = 1237 + ErrIncorrectGlobalLocalVar = 1238 + ErrWrongFkDef = 1239 + ErrKeyRefDoNotMatchTableRef = 1240 + ErrOperandColumns = 1241 + ErrSubqueryNo1Row = 1242 + ErrUnknownStmtHandler = 1243 + ErrCorruptHelpDB = 1244 + ErrCyclicReference = 1245 + ErrAutoConvert = 1246 + ErrIllegalReference = 1247 + ErrDerivedMustHaveAlias = 1248 + ErrSelectReduced = 1249 + ErrTablenameNotAllowedHere = 1250 + ErrNotSupportedAuthMode = 1251 + ErrSpatialCantHaveNull = 1252 + ErrCollationCharsetMismatch = 1253 + ErrSlaveWasRunning = 1254 + ErrSlaveWasNotRunning = 1255 + ErrTooBigForUncompress = 1256 + ErrZlibZMem = 1257 + ErrZlibZBuf = 1258 + ErrZlibZData = 1259 + ErrCutValueGroupConcat = 1260 + ErrWarnTooFewRecords = 1261 + ErrWarnTooManyRecords = 1262 + ErrWarnNullToNotnull = 1263 + ErrWarnDataOutOfRange = 1264 + WarnDataTruncated = 1265 + ErrWarnUsingOtherHandler = 1266 + ErrCantAggregate2collations = 1267 + ErrDropUser = 1268 + ErrRevokeGrants = 1269 + ErrCantAggregate3collations = 1270 + ErrCantAggregateNcollations = 1271 + ErrVariableIsNotStruct = 1272 + ErrUnknownCollation = 1273 + ErrSlaveIgnoredSslParams = 1274 + ErrServerIsInSecureAuthMode = 1275 + ErrWarnFieldResolved = 1276 + ErrBadSlaveUntilCond = 1277 + ErrMissingSkipSlave = 1278 + ErrUntilCondIgnored = 1279 + ErrWrongNameForIndex = 1280 + ErrWrongNameForCatalog = 1281 + ErrWarnQcResize = 1282 + ErrBadFtColumn = 1283 + ErrUnknownKeyCache = 1284 + ErrWarnHostnameWontWork = 1285 + ErrUnknownStorageEngine = 1286 + ErrWarnDeprecatedSyntax = 1287 + ErrNonUpdatableTable = 1288 + ErrFeatureDisabled = 1289 + ErrOptionPreventsStatement = 1290 + ErrDuplicatedValueInType = 1291 + ErrTruncatedWrongValue = 1292 + ErrTooMuchAutoTimestampCols = 1293 + ErrInvalidOnUpdate = 1294 + ErrUnsupportedPs = 1295 + ErrGetErrmsg = 1296 + ErrGetTemporaryErrmsg = 1297 + ErrUnknownTimeZone = 1298 + ErrWarnInvalidTimestamp = 1299 + ErrInvalidCharacterString = 1300 + ErrWarnAllowedPacketOverflowed = 1301 + ErrConflictingDeclarations = 1302 + ErrSpNoRecursiveCreate = 1303 + ErrSpAlreadyExists = 1304 + ErrSpDoesNotExist = 1305 + ErrSpDropFailed = 1306 + ErrSpStoreFailed = 1307 + ErrSpLilabelMismatch = 1308 + ErrSpLabelRedefine = 1309 + ErrSpLabelMismatch = 1310 + ErrSpUninitVar = 1311 + ErrSpBadselect = 1312 + ErrSpBadreturn = 1313 + ErrSpBadstatement = 1314 + ErrUpdateLogDeprecatedIgnored = 1315 + ErrUpdateLogDeprecatedTranslated = 1316 + ErrQueryInterrupted = 1317 + ErrSpWrongNoOfArgs = 1318 + ErrSpCondMismatch = 1319 + ErrSpNoreturn = 1320 + ErrSpNoreturnend = 1321 + ErrSpBadCursorQuery = 1322 + ErrSpBadCursorSelect = 1323 + ErrSpCursorMismatch = 1324 + ErrSpCursorAlreadyOpen = 1325 + ErrSpCursorNotOpen = 1326 + ErrSpUndeclaredVar = 1327 + ErrSpWrongNoOfFetchArgs = 1328 + ErrSpFetchNoData = 1329 + ErrSpDupParam = 1330 + ErrSpDupVar = 1331 + ErrSpDupCond = 1332 + ErrSpDupCurs = 1333 + ErrSpCantAlter = 1334 + ErrSpSubselectNyi = 1335 + ErrStmtNotAllowedInSfOrTrg = 1336 + ErrSpVarcondAfterCurshndlr = 1337 + ErrSpCursorAfterHandler = 1338 + ErrSpCaseNotFound = 1339 + ErrFparserTooBigFile = 1340 + ErrFparserBadHeader = 1341 + ErrFparserEOFInComment = 1342 + ErrFparserErrorInParameter = 1343 + ErrFparserEOFInUnknownParameter = 1344 + ErrViewNoExplain = 1345 + ErrFrmUnknownType = 1346 + ErrWrongObject = 1347 + ErrNonupdateableColumn = 1348 + ErrViewSelectDerived = 1349 + ErrViewSelectClause = 1350 + ErrViewSelectVariable = 1351 + ErrViewSelectTmptable = 1352 + ErrViewWrongList = 1353 + ErrWarnViewMerge = 1354 + ErrWarnViewWithoutKey = 1355 + ErrViewInvalid = 1356 + ErrSpNoDropSp = 1357 + ErrSpGotoInHndlr = 1358 + ErrTrgAlreadyExists = 1359 + ErrTrgDoesNotExist = 1360 + ErrTrgOnViewOrTempTable = 1361 + ErrTrgCantChangeRow = 1362 + ErrTrgNoSuchRowInTrg = 1363 + ErrNoDefaultForField = 1364 + ErrDivisionByZero = 1365 + ErrTruncatedWrongValueForField = 1366 + ErrIllegalValueForType = 1367 + ErrViewNonupdCheck = 1368 + ErrViewCheckFailed = 1369 + ErrProcaccessDenied = 1370 + ErrRelayLogFail = 1371 + ErrPasswdLength = 1372 + ErrUnknownTargetBinlog = 1373 + ErrIoErrLogIndexRead = 1374 + ErrBinlogPurgeProhibited = 1375 + ErrFseekFail = 1376 + ErrBinlogPurgeFatalErr = 1377 + ErrLogInUse = 1378 + ErrLogPurgeUnknownErr = 1379 + ErrRelayLogInit = 1380 + ErrNoBinaryLogging = 1381 + ErrReservedSyntax = 1382 + ErrWsasFailed = 1383 + ErrDiffGroupsProc = 1384 + ErrNoGroupForProc = 1385 + ErrOrderWithProc = 1386 + ErrLoggingProhibitChangingOf = 1387 + ErrNoFileMapping = 1388 + ErrWrongMagic = 1389 + ErrPsManyParam = 1390 + ErrKeyPart0 = 1391 + ErrViewChecksum = 1392 + ErrViewMultiupdate = 1393 + ErrViewNoInsertFieldList = 1394 + ErrViewDeleteMergeView = 1395 + ErrCannotUser = 1396 + ErrXaerNota = 1397 + ErrXaerInval = 1398 + ErrXaerRmfail = 1399 + ErrXaerOutside = 1400 + ErrXaerRmerr = 1401 + ErrXaRbrollback = 1402 + ErrNonexistingProcGrant = 1403 + ErrProcAutoGrantFail = 1404 + ErrProcAutoRevokeFail = 1405 + ErrDataTooLong = 1406 + ErrSpBadSQLstate = 1407 + ErrStartup = 1408 + ErrLoadFromFixedSizeRowsToVar = 1409 + ErrCantCreateUserWithGrant = 1410 + ErrWrongValueForType = 1411 + ErrTableDefChanged = 1412 + ErrSpDupHandler = 1413 + ErrSpNotVarArg = 1414 + ErrSpNoRetset = 1415 + ErrCantCreateGeometryObject = 1416 + ErrFailedRoutineBreakBinlog = 1417 + ErrBinlogUnsafeRoutine = 1418 + ErrBinlogCreateRoutineNeedSuper = 1419 + ErrExecStmtWithOpenCursor = 1420 + ErrStmtHasNoOpenCursor = 1421 + ErrCommitNotAllowedInSfOrTrg = 1422 + ErrNoDefaultForViewField = 1423 + ErrSpNoRecursion = 1424 + ErrTooBigScale = 1425 + ErrTooBigPrecision = 1426 + ErrMBiggerThanD = 1427 + ErrWrongLockOfSystemTable = 1428 + ErrConnectToForeignDataSource = 1429 + ErrQueryOnForeignDataSource = 1430 + ErrForeignDataSourceDoesntExist = 1431 + ErrForeignDataStringInvalidCantCreate = 1432 + ErrForeignDataStringInvalid = 1433 + ErrCantCreateFederatedTable = 1434 + ErrTrgInWrongSchema = 1435 + ErrStackOverrunNeedMore = 1436 + ErrTooLongBody = 1437 + ErrWarnCantDropDefaultKeycache = 1438 + ErrTooBigDisplaywidth = 1439 + ErrXaerDupid = 1440 + ErrDatetimeFunctionOverflow = 1441 + ErrCantUpdateUsedTableInSfOrTrg = 1442 + ErrViewPreventUpdate = 1443 + ErrPsNoRecursion = 1444 + ErrSpCantSetAutocommit = 1445 + ErrMalformedDefiner = 1446 + ErrViewFrmNoUser = 1447 + ErrViewOtherUser = 1448 + ErrNoSuchUser = 1449 + ErrForbidSchemaChange = 1450 + ErrRowIsReferenced2 = 1451 + ErrNoReferencedRow2 = 1452 + ErrSpBadVarShadow = 1453 + ErrTrgNoDefiner = 1454 + ErrOldFileFormat = 1455 + ErrSpRecursionLimit = 1456 + ErrSpProcTableCorrupt = 1457 + ErrSpWrongName = 1458 + ErrTableNeedsUpgrade = 1459 + ErrSpNoAggregate = 1460 + ErrMaxPreparedStmtCountReached = 1461 + ErrViewRecursive = 1462 + ErrNonGroupingFieldUsed = 1463 + ErrTableCantHandleSpkeys = 1464 + ErrNoTriggersOnSystemSchema = 1465 + ErrRemovedSpaces = 1466 + ErrAutoincReadFailed = 1467 + ErrUsername = 1468 + ErrHostname = 1469 + ErrWrongStringLength = 1470 + ErrNonInsertableTable = 1471 + ErrAdminWrongMrgTable = 1472 + ErrTooHighLevelOfNestingForSelect = 1473 + ErrNameBecomesEmpty = 1474 + ErrAmbiguousFieldTerm = 1475 + ErrForeignServerExists = 1476 + ErrForeignServerDoesntExist = 1477 + ErrIllegalHaCreateOption = 1478 + ErrPartitionRequiresValues = 1479 + ErrPartitionWrongValues = 1480 + ErrPartitionMaxvalue = 1481 + ErrPartitionSubpartition = 1482 + ErrPartitionSubpartMix = 1483 + ErrPartitionWrongNoPart = 1484 + ErrPartitionWrongNoSubpart = 1485 + ErrWrongExprInPartitionFunc = 1486 + ErrNoConstExprInRangeOrList = 1487 + ErrFieldNotFoundPart = 1488 + ErrListOfFieldsOnlyInHash = 1489 + ErrInconsistentPartitionInfo = 1490 + ErrPartitionFuncNotAllowed = 1491 + ErrPartitionsMustBeDefined = 1492 + ErrRangeNotIncreasing = 1493 + ErrInconsistentTypeOfFunctions = 1494 + ErrMultipleDefConstInListPart = 1495 + ErrPartitionEntry = 1496 + ErrMixHandler = 1497 + ErrPartitionNotDefined = 1498 + ErrTooManyPartitions = 1499 + ErrSubpartition = 1500 + ErrCantCreateHandlerFile = 1501 + ErrBlobFieldInPartFunc = 1502 + ErrUniqueKeyNeedAllFieldsInPf = 1503 + ErrNoParts = 1504 + ErrPartitionMgmtOnNonpartitioned = 1505 + ErrForeignKeyOnPartitioned = 1506 + ErrDropPartitionNonExistent = 1507 + ErrDropLastPartition = 1508 + ErrCoalesceOnlyOnHashPartition = 1509 + ErrReorgHashOnlyOnSameNo = 1510 + ErrReorgNoParam = 1511 + ErrOnlyOnRangeListPartition = 1512 + ErrAddPartitionSubpart = 1513 + ErrAddPartitionNoNewPartition = 1514 + ErrCoalescePartitionNoPartition = 1515 + ErrReorgPartitionNotExist = 1516 + ErrSameNamePartition = 1517 + ErrNoBinlog = 1518 + ErrConsecutiveReorgPartitions = 1519 + ErrReorgOutsideRange = 1520 + ErrPartitionFunctionFailure = 1521 + ErrPartState = 1522 + ErrLimitedPartRange = 1523 + ErrPluginIsNotLoaded = 1524 + ErrWrongValue = 1525 + ErrNoPartitionForGivenValue = 1526 + ErrFilegroupOptionOnlyOnce = 1527 + ErrCreateFilegroupFailed = 1528 + ErrDropFilegroupFailed = 1529 + ErrTablespaceAutoExtend = 1530 + ErrWrongSizeNumber = 1531 + ErrSizeOverflow = 1532 + ErrAlterFilegroupFailed = 1533 + ErrBinlogRowLoggingFailed = 1534 + ErrBinlogRowWrongTableDef = 1535 + ErrBinlogRowRbrToSbr = 1536 + ErrEventAlreadyExists = 1537 + ErrEventStoreFailed = 1538 + ErrEventDoesNotExist = 1539 + ErrEventCantAlter = 1540 + ErrEventDropFailed = 1541 + ErrEventIntervalNotPositiveOrTooBig = 1542 + ErrEventEndsBeforeStarts = 1543 + ErrEventExecTimeInThePast = 1544 + ErrEventOpenTableFailed = 1545 + ErrEventNeitherMExprNorMAt = 1546 + ErrObsoleteColCountDoesntMatchCorrupted = 1547 + ErrObsoleteCannotLoadFromTable = 1548 + ErrEventCannotDelete = 1549 + ErrEventCompile = 1550 + ErrEventSameName = 1551 + ErrEventDataTooLong = 1552 + ErrDropIndexFk = 1553 + ErrWarnDeprecatedSyntaxWithVer = 1554 + ErrCantWriteLockLogTable = 1555 + ErrCantLockLogTable = 1556 + ErrForeignDuplicateKeyOldUnused = 1557 + ErrColCountDoesntMatchPleaseUpdate = 1558 + ErrTempTablePreventsSwitchOutOfRbr = 1559 + ErrStoredFunctionPreventsSwitchBinlogFormat = 1560 + ErrNdbCantSwitchBinlogFormat = 1561 + ErrPartitionNoTemporary = 1562 + ErrPartitionConstDomain = 1563 + ErrPartitionFunctionIsNotAllowed = 1564 + ErrDdlLog = 1565 + ErrNullInValuesLessThan = 1566 + ErrWrongPartitionName = 1567 + ErrCantChangeTxCharacteristics = 1568 + ErrDupEntryAutoincrementCase = 1569 + ErrEventModifyQueue = 1570 + ErrEventSetVar = 1571 + ErrPartitionMerge = 1572 + ErrCantActivateLog = 1573 + ErrRbrNotAvailable = 1574 + ErrBase64Decode = 1575 + ErrEventRecursionForbidden = 1576 + ErrEventsDB = 1577 + ErrOnlyIntegersAllowed = 1578 + ErrUnsuportedLogEngine = 1579 + ErrBadLogStatement = 1580 + ErrCantRenameLogTable = 1581 + ErrWrongParamcountToNativeFct = 1582 + ErrWrongParametersToNativeFct = 1583 + ErrWrongParametersToStoredFct = 1584 + ErrNativeFctNameCollision = 1585 + ErrDupEntryWithKeyName = 1586 + ErrBinlogPurgeEmFile = 1587 + ErrEventCannotCreateInThePast = 1588 + ErrEventCannotAlterInThePast = 1589 + ErrSlaveIncident = 1590 + ErrNoPartitionForGivenValueSilent = 1591 + ErrBinlogUnsafeStatement = 1592 + ErrSlaveFatal = 1593 + ErrSlaveRelayLogReadFailure = 1594 + ErrSlaveRelayLogWriteFailure = 1595 + ErrSlaveCreateEventFailure = 1596 + ErrSlaveMasterComFailure = 1597 + ErrBinlogLoggingImpossible = 1598 + ErrViewNoCreationCtx = 1599 + ErrViewInvalidCreationCtx = 1600 + ErrSrInvalidCreationCtx = 1601 + ErrTrgCorruptedFile = 1602 + ErrTrgNoCreationCtx = 1603 + ErrTrgInvalidCreationCtx = 1604 + ErrEventInvalidCreationCtx = 1605 + ErrTrgCantOpenTable = 1606 + ErrCantCreateSroutine = 1607 + ErrNeverUsed = 1608 + ErrNoFormatDescriptionEventBeforeBinlogStatement = 1609 + ErrSlaveCorruptEvent = 1610 + ErrLoadDataInvalidColumn = 1611 + ErrLogPurgeNoFile = 1612 + ErrXaRbtimeout = 1613 + ErrXaRbdeadlock = 1614 + ErrNeedReprepare = 1615 + ErrDelayedNotSupported = 1616 + WarnNoMasterInfo = 1617 + WarnOptionIgnored = 1618 + WarnPluginDeleteBuiltin = 1619 + WarnPluginBusy = 1620 + ErrVariableIsReadonly = 1621 + ErrWarnEngineTransactionRollback = 1622 + ErrSlaveHeartbeatFailure = 1623 + ErrSlaveHeartbeatValueOutOfRange = 1624 + ErrNdbReplicationSchema = 1625 + ErrConflictFnParse = 1626 + ErrExceptionsWrite = 1627 + ErrTooLongTableComment = 1628 + ErrTooLongFieldComment = 1629 + ErrFuncInexistentNameCollision = 1630 + ErrDatabaseName = 1631 + ErrTableName = 1632 + ErrPartitionName = 1633 + ErrSubpartitionName = 1634 + ErrTemporaryName = 1635 + ErrRenamedName = 1636 + ErrTooManyConcurrentTrxs = 1637 + WarnNonASCIISeparatorNotImplemented = 1638 + ErrDebugSyncTimeout = 1639 + ErrDebugSyncHitLimit = 1640 + ErrDupSignalSet = 1641 + ErrSignalWarn = 1642 + ErrSignalNotFound = 1643 + ErrSignalException = 1644 + ErrResignalWithoutActiveHandler = 1645 + ErrSignalBadConditionType = 1646 + WarnCondItemTruncated = 1647 + ErrCondItemTooLong = 1648 + ErrUnknownLocale = 1649 + ErrSlaveIgnoreServerIds = 1650 + ErrQueryCacheDisabled = 1651 + ErrSameNamePartitionField = 1652 + ErrPartitionColumnList = 1653 + ErrWrongTypeColumnValue = 1654 + ErrTooManyPartitionFuncFields = 1655 + ErrMaxvalueInValuesIn = 1656 + ErrTooManyValues = 1657 + ErrRowSinglePartitionField = 1658 + ErrFieldTypeNotAllowedAsPartitionField = 1659 + ErrPartitionFieldsTooLong = 1660 + ErrBinlogRowEngineAndStmtEngine = 1661 + ErrBinlogRowModeAndStmtEngine = 1662 + ErrBinlogUnsafeAndStmtEngine = 1663 + ErrBinlogRowInjectionAndStmtEngine = 1664 + ErrBinlogStmtModeAndRowEngine = 1665 + ErrBinlogRowInjectionAndStmtMode = 1666 + ErrBinlogMultipleEnginesAndSelfLoggingEngine = 1667 + ErrBinlogUnsafeLimit = 1668 + ErrBinlogUnsafeInsertDelayed = 1669 + ErrBinlogUnsafeSystemTable = 1670 + ErrBinlogUnsafeAutoincColumns = 1671 + ErrBinlogUnsafeUdf = 1672 + ErrBinlogUnsafeSystemVariable = 1673 + ErrBinlogUnsafeSystemFunction = 1674 + ErrBinlogUnsafeNontransAfterTrans = 1675 + ErrMessageAndStatement = 1676 + ErrSlaveConversionFailed = 1677 + ErrSlaveCantCreateConversion = 1678 + ErrInsideTransactionPreventsSwitchBinlogFormat = 1679 + ErrPathLength = 1680 + ErrWarnDeprecatedSyntaxNoReplacement = 1681 + ErrWrongNativeTableStructure = 1682 + ErrWrongPerfSchemaUsage = 1683 + ErrWarnISSkippedTable = 1684 + ErrInsideTransactionPreventsSwitchBinlogDirect = 1685 + ErrStoredFunctionPreventsSwitchBinlogDirect = 1686 + ErrSpatialMustHaveGeomCol = 1687 + ErrTooLongIndexComment = 1688 + ErrLockAborted = 1689 + ErrDataOutOfRange = 1690 + ErrWrongSpvarTypeInLimit = 1691 + ErrBinlogUnsafeMultipleEnginesAndSelfLoggingEngine = 1692 + ErrBinlogUnsafeMixedStatement = 1693 + ErrInsideTransactionPreventsSwitchSQLLogBin = 1694 + ErrStoredFunctionPreventsSwitchSQLLogBin = 1695 + ErrFailedReadFromParFile = 1696 + ErrValuesIsNotIntType = 1697 + ErrAccessDeniedNoPassword = 1698 + ErrSetPasswordAuthPlugin = 1699 + ErrGrantPluginUserExists = 1700 + ErrTruncateIllegalFk = 1701 + ErrPluginIsPermanent = 1702 + ErrSlaveHeartbeatValueOutOfRangeMin = 1703 + ErrSlaveHeartbeatValueOutOfRangeMax = 1704 + ErrStmtCacheFull = 1705 + ErrMultiUpdateKeyConflict = 1706 + ErrTableNeedsRebuild = 1707 + WarnOptionBelowLimit = 1708 + ErrIndexColumnTooLong = 1709 + ErrErrorInTriggerBody = 1710 + ErrErrorInUnknownTriggerBody = 1711 + ErrIndexCorrupt = 1712 + ErrUndoRecordTooBig = 1713 + ErrBinlogUnsafeInsertIgnoreSelect = 1714 + ErrBinlogUnsafeInsertSelectUpdate = 1715 + ErrBinlogUnsafeReplaceSelect = 1716 + ErrBinlogUnsafeCreateIgnoreSelect = 1717 + ErrBinlogUnsafeCreateReplaceSelect = 1718 + ErrBinlogUnsafeUpdateIgnore = 1719 + ErrPluginNoUninstall = 1720 + ErrPluginNoInstall = 1721 + ErrBinlogUnsafeWriteAutoincSelect = 1722 + ErrBinlogUnsafeCreateSelectAutoinc = 1723 + ErrBinlogUnsafeInsertTwoKeys = 1724 + ErrTableInFkCheck = 1725 + ErrUnsupportedEngine = 1726 + ErrBinlogUnsafeAutoincNotFirst = 1727 + ErrCannotLoadFromTableV2 = 1728 + ErrMasterDelayValueOutOfRange = 1729 + ErrOnlyFdAndRbrEventsAllowedInBinlogStatement = 1730 + ErrPartitionExchangeDifferentOption = 1731 + ErrPartitionExchangePartTable = 1732 + ErrPartitionExchangeTempTable = 1733 + ErrPartitionInsteadOfSubpartition = 1734 + ErrUnknownPartition = 1735 + ErrTablesDifferentMetadata = 1736 + ErrRowDoesNotMatchPartition = 1737 + ErrBinlogCacheSizeGreaterThanMax = 1738 + ErrWarnIndexNotApplicable = 1739 + ErrPartitionExchangeForeignKey = 1740 + ErrNoSuchKeyValue = 1741 + ErrRplInfoDataTooLong = 1742 + ErrNetworkReadEventChecksumFailure = 1743 + ErrBinlogReadEventChecksumFailure = 1744 + ErrBinlogStmtCacheSizeGreaterThanMax = 1745 + ErrCantUpdateTableInCreateTableSelect = 1746 + ErrPartitionClauseOnNonpartitioned = 1747 + ErrRowDoesNotMatchGivenPartitionSet = 1748 + ErrNoSuchPartitionunused = 1749 + ErrChangeRplInfoRepositoryFailure = 1750 + ErrWarningNotCompleteRollbackWithCreatedTempTable = 1751 + ErrWarningNotCompleteRollbackWithDroppedTempTable = 1752 + ErrMtsFeatureIsNotSupported = 1753 + ErrMtsUpdatedDBsGreaterMax = 1754 + ErrMtsCantParallel = 1755 + ErrMtsInconsistentData = 1756 + ErrFulltextNotSupportedWithPartitioning = 1757 + ErrDaInvalidConditionNumber = 1758 + ErrInsecurePlainText = 1759 + ErrInsecureChangeMaster = 1760 + ErrForeignDuplicateKeyWithChildInfo = 1761 + ErrForeignDuplicateKeyWithoutChildInfo = 1762 + ErrSQLthreadWithSecureSlave = 1763 + ErrTableHasNoFt = 1764 + ErrVariableNotSettableInSfOrTrigger = 1765 + ErrVariableNotSettableInTransaction = 1766 + ErrGtidNextIsNotInGtidNextList = 1767 + ErrCantChangeGtidNextInTransactionWhenGtidNextListIsNull = 1768 + ErrSetStatementCannotInvokeFunction = 1769 + ErrGtidNextCantBeAutomaticIfGtidNextListIsNonNull = 1770 + ErrSkippingLoggedTransaction = 1771 + ErrMalformedGtidSetSpecification = 1772 + ErrMalformedGtidSetEncoding = 1773 + ErrMalformedGtidSpecification = 1774 + ErrGnoExhausted = 1775 + ErrBadSlaveAutoPosition = 1776 + ErrAutoPositionRequiresGtidModeOn = 1777 + ErrCantDoImplicitCommitInTrxWhenGtidNextIsSet = 1778 + ErrGtidMode2Or3RequiresEnforceGtidConsistencyOn = 1779 + ErrGtidModeRequiresBinlog = 1780 + ErrCantSetGtidNextToGtidWhenGtidModeIsOff = 1781 + ErrCantSetGtidNextToAnonymousWhenGtidModeIsOn = 1782 + ErrCantSetGtidNextListToNonNullWhenGtidModeIsOff = 1783 + ErrFoundGtidEventWhenGtidModeIsOff = 1784 + ErrGtidUnsafeNonTransactionalTable = 1785 + ErrGtidUnsafeCreateSelect = 1786 + ErrGtidUnsafeCreateDropTemporaryTableInTransaction = 1787 + ErrGtidModeCanOnlyChangeOneStepAtATime = 1788 + ErrMasterHasPurgedRequiredGtids = 1789 + ErrCantSetGtidNextWhenOwningGtid = 1790 + ErrUnknownExplainFormat = 1791 + ErrCantExecuteInReadOnlyTransaction = 1792 + ErrTooLongTablePartitionComment = 1793 + ErrSlaveConfiguration = 1794 + ErrInnodbFtLimit = 1795 + ErrInnodbNoFtTempTable = 1796 + ErrInnodbFtWrongDocidColumn = 1797 + ErrInnodbFtWrongDocidIndex = 1798 + ErrInnodbOnlineLogTooBig = 1799 + ErrUnknownAlterAlgorithm = 1800 + ErrUnknownAlterLock = 1801 + ErrMtsChangeMasterCantRunWithGaps = 1802 + ErrMtsRecoveryFailure = 1803 + ErrMtsResetWorkers = 1804 + ErrColCountDoesntMatchCorruptedV2 = 1805 + ErrSlaveSilentRetryTransaction = 1806 + ErrDiscardFkChecksRunning = 1807 + ErrTableSchemaMismatch = 1808 + ErrTableInSystemTablespace = 1809 + ErrIoRead = 1810 + ErrIoWrite = 1811 + ErrTablespaceMissing = 1812 + ErrTablespaceExists = 1813 + ErrTablespaceDiscarded = 1814 + ErrInternal = 1815 + ErrInnodbImport = 1816 + ErrInnodbIndexCorrupt = 1817 + ErrInvalidYearColumnLength = 1818 + ErrNotValidPassword = 1819 + ErrMustChangePassword = 1820 + ErrFkNoIndexChild = 1821 + ErrFkNoIndexParent = 1822 + ErrFkFailAddSystem = 1823 + ErrFkCannotOpenParent = 1824 + ErrFkIncorrectOption = 1825 + ErrFkDupName = 1826 + ErrPasswordFormat = 1827 + ErrFkColumnCannotDrop = 1828 + ErrFkColumnCannotDropChild = 1829 + ErrFkColumnNotNull = 1830 + ErrDupIndex = 1831 + ErrFkColumnCannotChange = 1832 + ErrFkColumnCannotChangeChild = 1833 + ErrFkCannotDeleteParent = 1834 + ErrMalformedPacket = 1835 + ErrReadOnlyMode = 1836 + ErrGtidNextTypeUndefinedGroup = 1837 + ErrVariableNotSettableInSp = 1838 + ErrCantSetGtidPurgedWhenGtidModeIsOff = 1839 + ErrCantSetGtidPurgedWhenGtidExecutedIsNotEmpty = 1840 + ErrCantSetGtidPurgedWhenOwnedGtidsIsNotEmpty = 1841 + ErrGtidPurgedWasChanged = 1842 + ErrGtidExecutedWasChanged = 1843 + ErrBinlogStmtModeAndNoReplTables = 1844 + ErrAlterOperationNotSupported = 1845 + ErrAlterOperationNotSupportedReason = 1846 + ErrAlterOperationNotSupportedReasonCopy = 1847 + ErrAlterOperationNotSupportedReasonPartition = 1848 + ErrAlterOperationNotSupportedReasonFkRename = 1849 + ErrAlterOperationNotSupportedReasonColumnType = 1850 + ErrAlterOperationNotSupportedReasonFkCheck = 1851 + ErrAlterOperationNotSupportedReasonIgnore = 1852 + ErrAlterOperationNotSupportedReasonNopk = 1853 + ErrAlterOperationNotSupportedReasonAutoinc = 1854 + ErrAlterOperationNotSupportedReasonHiddenFts = 1855 + ErrAlterOperationNotSupportedReasonChangeFts = 1856 + ErrAlterOperationNotSupportedReasonFts = 1857 + ErrSQLSlaveSkipCounterNotSettableInGtidMode = 1858 + ErrDupUnknownInIndex = 1859 + ErrIdentCausesTooLongPath = 1860 + ErrAlterOperationNotSupportedReasonNotNull = 1861 + ErrMustChangePasswordLogin = 1862 + ErrRowInWrongPartition = 1863 + ErrErrorLast = 1863 + ErrMaxExecTimeExceeded = 1907 + ErrInvalidFieldSize = 3013 + ErrIncorrectType = 3064 + ErrInvalidJSONData = 3069 + ErrGeneratedColumnFunctionIsNotAllowed = 3102 + ErrUnsupportedAlterInplaceOnVirtualColumn = 3103 + ErrWrongFKOptionForGeneratedColumn = 3104 + ErrBadGeneratedColumn = 3105 + ErrUnsupportedOnGeneratedColumn = 3106 + ErrGeneratedColumnNonPrior = 3107 + ErrDependentByGeneratedColumn = 3108 + ErrGeneratedColumnRefAutoInc = 3109 + ErrInvalidJSONText = 3140 + ErrInvalidJSONPath = 3143 + ErrInvalidTypeForJSON = 3146 + ErrInvalidJSONPathWildcard = 3149 + ErrInvalidJSONContainsPathType = 3150 + ErrJSONUsedAsKey = 3152 + ErrBadUser = 3162 + ErrUserAlreadyExists = 3163 + ErrInvalidJSONPathArrayCell = 3165 + ErrInvalidEncryptionOption = 3184 + ErrRoleNotGranted = 3530 + ErrLockAcquireFailAndNoWaitSet = 3572 + ErrWindowNoSuchWindow = 3579 + ErrWindowCircularityInWindowGraph = 3580 + ErrWindowNoChildPartitioning = 3581 + ErrWindowNoInherentFrame = 3582 + ErrWindowNoRedefineOrderBy = 3583 + ErrWindowFrameStartIllegal = 3584 + ErrWindowFrameEndIllegal = 3585 + ErrWindowFrameIllegal = 3586 + ErrWindowRangeFrameOrderType = 3587 + ErrWindowRangeFrameTemporalType = 3588 + ErrWindowRangeFrameNumericType = 3589 + ErrWindowRangeBoundNotConstant = 3590 + ErrWindowDuplicateName = 3591 + ErrWindowIllegalOrderBy = 3592 + ErrWindowInvalidWindowFuncUse = 3593 + ErrWindowInvalidWindowFuncAliasUse = 3594 + ErrWindowNestedWindowFuncUseInWindowSpec = 3595 + ErrWindowRowsIntervalUse = 3596 + ErrWindowNoGroupOrderUnused = 3597 + ErrWindowExplainJson = 3598 + ErrWindowFunctionIgnoresFrame = 3599 + ErrDataTruncatedFunctionalIndex = 3751 + ErrDataOutOfRangeFunctionalIndex = 3752 + ErrFunctionalIndexOnJsonOrGeometryFunction = 3753 + ErrFunctionalIndexRefAutoIncrement = 3754 + ErrCannotDropColumnFunctionalIndex = 3755 + ErrFunctionalIndexPrimaryKey = 3756 + ErrFunctionalIndexOnLob = 3757 + ErrFunctionalIndexFunctionIsNotAllowed = 3758 + ErrFulltextFunctionalIndex = 3759 + ErrSpatialFunctionalIndex = 3760 + ErrWrongKeyColumnFunctionalIndex = 3761 + ErrFunctionalIndexOnField = 3762 + ErrFKIncompatibleColumns = 3780 + ErrFunctionalIndexRowValueIsNotAllowed = 3800 + ErrDependentByFunctionalIndex = 3837 + ErrInvalidJsonValueForFuncIndex = 3903 + ErrJsonValueOutOfRangeForFuncIndex = 3904 + ErrFunctionalIndexDataIsTooLong = 3907 + ErrFunctionalIndexNotApplicable = 3909 + + // MariaDB errors. + ErrOnlyOneDefaultPartionAllowed = 4030 + ErrWrongPartitionTypeExpectedSystemTime = 4113 + ErrSystemVersioningWrongPartitions = 4128 + + // TiDB self-defined errors. + ErrMemExceedThreshold = 8001 + ErrForUpdateCantRetry = 8002 + ErrAdminCheckTable = 8003 + ErrTxnTooLarge = 8004 + ErrWriteConflictInTiDB = 8005 + ErrInvalidPluginID = 8101 + ErrInvalidPluginManifest = 8102 + ErrInvalidPluginName = 8103 + ErrInvalidPluginVersion = 8104 + ErrDuplicatePlugin = 8105 + ErrInvalidPluginSysVarName = 8106 + ErrRequireVersionCheckFail = 8107 + ErrUnsupportedType = 8108 + ErrAnalyzeMissIndex = 8109 + ErrCartesianProductUnsupported = 8110 + ErrPreparedStmtNotFound = 8111 + ErrWrongParamCount = 8112 + ErrSchemaChanged = 8113 + ErrUnknownPlan = 8114 + ErrPrepareMulti = 8115 + ErrPrepareDDL = 8116 + ErrResultIsEmpty = 8117 + ErrBuildExecutor = 8118 + ErrBatchInsertFail = 8119 + ErrGetStartTS = 8120 + ErrPrivilegeCheckFail = 8121 + ErrInvalidWildCard = 8122 + ErrMixOfGroupFuncAndFieldsIncompatible = 8123 + ErrUnsupportedReloadPlugin = 8018 + ErrUnsupportedReloadPluginVar = 8019 + ErrTableLocked = 8020 + ErrNotExist = 8021 + ErrTxnRetryable = 8022 + ErrCannotSetNilValue = 8023 + ErrInvalidTxn = 8024 + ErrEntryTooLarge = 8025 + ErrNotImplemented = 8026 + ErrInfoSchemaExpired = 8027 + ErrInfoSchemaChanged = 8028 + ErrBadNumber = 8029 + ErrCastAsSignedOverflow = 8030 + ErrCastNegIntAsUnsigned = 8031 + ErrInvalidYearFormat = 8032 + ErrInvalidYear = 8033 + ErrIncorrectDatetimeValue = 8034 + ErrInvalidTimeFormat = 8036 + ErrInvalidWeekModeFormat = 8037 + ErrFieldGetDefaultFailed = 8038 + ErrIndexOutBound = 8039 + ErrUnsupportedOp = 8040 + ErrRowNotFound = 8041 + ErrTableStateCantNone = 8042 + ErrColumnStateNonPublic = 8043 + ErrIndexStateCantNone = 8044 + ErrInvalidRecordKey = 8045 + ErrColumnStateCantNone = 8046 + ErrUnsupportedValueForVar = 8047 + ErrUnsupportedIsolationLevel = 8048 + ErrLoadPrivilege = 8049 + ErrInvalidPrivilegeType = 8050 + ErrUnknownFieldType = 8051 + ErrInvalidSequence = 8052 + ErrCantGetValidID = 8053 + ErrCantSetToNull = 8054 + ErrSnapshotTooOld = 8055 + ErrInvalidTableID = 8056 + ErrInvalidType = 8057 + + // Error codes used by TiDB ddl package + ErrUnsupportedDDLOperation = 8200 + ErrNotOwner = 8201 + ErrCantDecodeIndex = 8202 + ErrInvalidDDLWorker = 8203 + ErrInvalidDDLJob = 8204 + ErrInvalidDDLJobFlag = 8205 + ErrWaitReorgTimeout = 8206 + ErrInvalidStoreVersion = 8207 + ErrUnknownTypeLength = 8208 + ErrUnknownFractionLength = 8209 + ErrInvalidDDLState = 8210 + ErrReorgPanic = 8211 + ErrInvalidSplitRegionRanges = 8212 + ErrInvalidDDLJobVersion = 8213 + ErrCancelledDDLJob = 8214 + ErrRepairTable = 8215 + ErrInvalidAutoRandom = 8216 + ErrInvalidHashKeyFlag = 8217 + ErrInvalidListIndex = 8218 + ErrInvalidListMetaData = 8219 + ErrWriteOnSnapshot = 8220 + ErrInvalidKey = 8221 + ErrInvalidIndexKey = 8222 + ErrDataInConsistent = 8223 + ErrDDLJobNotFound = 8224 + ErrCancelFinishedDDLJob = 8225 + ErrCannotCancelDDLJob = 8226 + + // TiKV/PD errors. + ErrPDServerTimeout = 9001 + ErrTiKVServerTimeout = 9002 + ErrTiKVServerBusy = 9003 + ErrResolveLockTimeout = 9004 + ErrRegionUnavailable = 9005 + ErrGCTooEarly = 9006 + ErrWriteConflict = 9007 + ErrTiKVStoreLimit = 9008 +) diff --git a/parser/mysql/errname.go b/parser/mysql/errname.go new file mode 100644 index 0000000..b26dfb0 --- /dev/null +++ b/parser/mysql/errname.go @@ -0,0 +1,1058 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mysql + +// MySQLErrName maps error code to MySQL error messages. +var MySQLErrName = map[uint16]string{ + ErrHashchk: "hashchk", + ErrNisamchk: "isamchk", + ErrNo: "NO", + ErrYes: "YES", + ErrCantCreateFile: "Can't create file '%-.200s' (errno: %d - %s)", + ErrCantCreateTable: "Can't create table '%-.200s' (errno: %d)", + ErrCantCreateDB: "Can't create database '%-.192s' (errno: %d)", + ErrDBCreateExists: "Can't create database '%-.192s'; database exists", + ErrDBDropExists: "Can't drop database '%-.192s'; database doesn't exist", + ErrDBDropDelete: "Error dropping database (can't delete '%-.192s', errno: %d)", + ErrDBDropRmdir: "Error dropping database (can't rmdir '%-.192s', errno: %d)", + ErrCantDeleteFile: "Error on delete of '%-.192s' (errno: %d - %s)", + ErrCantFindSystemRec: "Can't read record in system table", + ErrCantGetStat: "Can't get status of '%-.200s' (errno: %d - %s)", + ErrCantGetWd: "Can't get working directory (errno: %d - %s)", + ErrCantLock: "Can't lock file (errno: %d - %s)", + ErrCantOpenFile: "Can't open file: '%-.200s' (errno: %d - %s)", + ErrFileNotFound: "Can't find file: '%-.200s' (errno: %d - %s)", + ErrCantReadDir: "Can't read dir of '%-.192s' (errno: %d - %s)", + ErrCantSetWd: "Can't change dir to '%-.192s' (errno: %d - %s)", + ErrCheckread: "Record has changed since last read in table '%-.192s'", + ErrDiskFull: "Disk full (%s); waiting for someone to free some space... (errno: %d - %s)", + ErrDupKey: "Can't write; duplicate key in table '%-.192s'", + ErrErrorOnClose: "Error on close of '%-.192s' (errno: %d - %s)", + ErrErrorOnRead: "Error reading file '%-.200s' (errno: %d - %s)", + ErrErrorOnRename: "Error on rename of '%-.210s' to '%-.210s' (errno: %d - %s)", + ErrErrorOnWrite: "Error writing file '%-.200s' (errno: %d - %s)", + ErrFileUsed: "'%-.192s' is locked against change", + ErrFilsortAbort: "Sort aborted", + ErrFormNotFound: "View '%-.192s' doesn't exist for '%-.192s'", + ErrGetErrno: "Got error %d from storage engine", + ErrIllegalHa: "Table storage engine for '%-.192s' doesn't have this option", + ErrKeyNotFound: "Can't find record in '%-.192s'", + ErrNotFormFile: "Incorrect information in file: '%-.200s'", + ErrNotKeyFile: "Incorrect key file for table '%-.200s'; try to repair it", + ErrOldKeyFile: "Old key file for table '%-.192s'; repair it!", + ErrOpenAsReadonly: "Table '%-.192s' is read only", + ErrOutofMemory: "Out of memory; restart server and try again (needed %d bytes)", + ErrOutOfSortMemory: "Out of sort memory, consider increasing server sort buffer size", + ErrUnexpectedEOF: "Unexpected EOF found when reading file '%-.192s' (errno: %d - %s)", + ErrConCount: "Too many connections", + ErrOutOfResources: "Out of memory; check if mysqld or some other process uses all available memory; if not, you may have to use 'ulimit' to allow mysqld to use more memory or you can add more swap space", + ErrBadHost: "Can't get hostname for your address", + ErrHandshake: "Bad handshake", + ErrDBaccessDenied: "Access denied for user '%-.48s'@'%-.64s' to database '%-.192s'", + ErrAccessDenied: "Access denied for user '%-.48s'@'%-.64s' (using password: %s)", + ErrNoDB: "No database selected", + ErrUnknownCom: "Unknown command", + ErrBadNull: "Column '%-.192s' cannot be null", + ErrBadDB: "Unknown database '%-.192s'", + ErrTableExists: "Table '%-.192s' already exists", + ErrBadTable: "Unknown table '%-.100s'", + ErrNonUniq: "Column '%-.192s' in %-.192s is ambiguous", + ErrServerShutdown: "Server shutdown in progress", + ErrBadField: "Unknown column '%-.192s' in '%-.192s'", + ErrFieldNotInGroupBy: "Expression #%d of %s is not in GROUP BY clause and contains nonaggregated column '%s' which is not functionally dependent on columns in GROUP BY clause; this is incompatible with sql_mode=only_full_group_by", + ErrWrongGroupField: "Can't group on '%-.192s'", + ErrWrongSumSelect: "Statement has sum functions and columns in same statement", + ErrWrongValueCount: "Column count doesn't match value count", + ErrTooLongIdent: "Identifier name '%-.100s' is too long", + ErrDupFieldName: "Duplicate column name '%-.192s'", + ErrDupKeyName: "Duplicate key name '%-.192s'", + ErrDupEntry: "Duplicate entry '%-.64s' for key '%-.192s'", + ErrWrongFieldSpec: "Incorrect column specifier for column '%-.192s'", + ErrParse: "%s %s", + ErrEmptyQuery: "Query was empty", + ErrNonuniqTable: "Not unique table/alias: '%-.192s'", + ErrInvalidDefault: "Invalid default value for '%-.192s'", + ErrMultiplePriKey: "Multiple primary key defined", + ErrTooManyKeys: "Too many keys specified; max %d keys allowed", + ErrTooManyKeyParts: "Too many key parts specified; max %d parts allowed", + ErrTooLongKey: "Specified key was too long; max key length is %d bytes", + ErrKeyColumnDoesNotExits: "Key column '%-.192s' doesn't exist in table", + ErrBlobUsedAsKey: "BLOB column '%-.192s' can't be used in key specification with the used table type", + ErrTooBigFieldlength: "Column length too big for column '%-.192s' (max = %d); use BLOB or TEXT instead", + ErrWrongAutoKey: "Incorrect table definition; there can be only one auto column and it must be defined as a key", + ErrReady: "%s: ready for connections.\nVersion: '%s' socket: '%s' port: %d", + ErrNormalShutdown: "%s: Normal shutdown\n", + ErrGotSignal: "%s: Got signal %d. Aborting!\n", + ErrShutdownComplete: "%s: Shutdown complete\n", + ErrForcingClose: "%s: Forcing close of thread %d user: '%-.48s'\n", + ErrIpsock: "Can't create IP socket", + ErrNoSuchIndex: "Table '%-.192s' has no index like the one used in CREATE INDEX; recreate the table", + ErrWrongFieldTerminators: "Field separator argument is not what is expected; check the manual", + ErrBlobsAndNoTerminated: "You can't use fixed rowlength with BLOBs; please use 'fields terminated by'", + ErrTextFileNotReadable: "The file '%-.128s' must be in the database directory or be readable by all", + ErrFileExists: "File '%-.200s' already exists", + ErrLoadInfo: "Records: %d Deleted: %d Skipped: %d Warnings: %d", + ErrAlterInfo: "Records: %d Duplicates: %d", + ErrWrongSubKey: "Incorrect prefix key; the used key part isn't a string, the used length is longer than the key part, or the storage engine doesn't support unique prefix keys", + ErrCantRemoveAllFields: "You can't delete all columns with ALTER TABLE; use DROP TABLE instead", + ErrCantDropFieldOrKey: "Can't DROP '%-.192s'; check that column/key exists", + ErrInsertInfo: "Records: %d Duplicates: %d Warnings: %d", + ErrUpdateTableUsed: "You can't specify target table '%-.192s' for update in FROM clause", + ErrNoSuchThread: "Unknown thread id: %d", + ErrKillDenied: "You are not owner of thread %d", + ErrNoTablesUsed: "No tables used", + ErrTooBigSet: "Too many strings for column %-.192s and SET", + ErrNoUniqueLogFile: "Can't generate a unique log-filename %-.200s.(1-999)\n", + ErrTableNotLockedForWrite: "Table '%-.192s' was locked with a READ lock and can't be updated", + ErrTableNotLocked: "Table '%-.192s' was not locked with LOCK TABLES", + ErrBlobCantHaveDefault: "BLOB/TEXT/JSON column '%-.192s' can't have a default value", + ErrWrongDBName: "Incorrect database name '%-.100s'", + ErrWrongTableName: "Incorrect table name '%-.100s'", + ErrTooBigSelect: "The SELECT would examine more than MAXJOINSIZE rows; check your WHERE and use SET SQLBIGSELECTS=1 or SET MAXJOINSIZE=# if the SELECT is okay", + ErrUnknown: "Unknown error", + ErrUnknownProcedure: "Unknown procedure '%-.192s'", + ErrWrongParamcountToProcedure: "Incorrect parameter count to procedure '%-.192s'", + ErrWrongParametersToProcedure: "Incorrect parameters to procedure '%-.192s'", + ErrUnknownTable: "Unknown table '%-.192s' in %-.32s", + ErrFieldSpecifiedTwice: "Column '%-.192s' specified twice", + ErrInvalidGroupFuncUse: "Invalid use of group function", + ErrUnsupportedExtension: "Table '%-.192s' uses an extension that doesn't exist in this MySQL version", + ErrTableMustHaveColumns: "A table must have at least 1 column", + ErrRecordFileFull: "The table '%-.192s' is full", + ErrUnknownCharacterSet: "Unknown character set: '%-.64s'", + ErrTooManyTables: "Too many tables; MySQL can only use %d tables in a join", + ErrTooManyFields: "Too many columns", + ErrTooBigRowsize: "Row size too large. The maximum row size for the used table type, not counting BLOBs, is %d. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs", + ErrStackOverrun: "Thread stack overrun: Used: %d of a %d stack. Use 'mysqld --threadStack=#' to specify a bigger stack if needed", + ErrWrongOuterJoin: "Cross dependency found in OUTER JOIN; examine your ON conditions", + ErrNullColumnInIndex: "Table handler doesn't support NULL in given index. Please change column '%-.192s' to be NOT NULL or use another handler", + ErrCantFindUdf: "Can't load function '%-.192s'", + ErrCantInitializeUdf: "Can't initialize function '%-.192s'; %-.80s", + ErrUdfNoPaths: "No paths allowed for shared library", + ErrUdfExists: "Function '%-.192s' already exists", + ErrCantOpenLibrary: "Can't open shared library '%-.192s' (errno: %d %-.128s)", + ErrCantFindDlEntry: "Can't find symbol '%-.128s' in library", + ErrFunctionNotDefined: "Function '%-.192s' is not defined", + ErrHostIsBlocked: "Host '%-.64s' is blocked because of many connection errors; unblock with 'mysqladmin flush-hosts'", + ErrHostNotPrivileged: "Host '%-.64s' is not allowed to connect to this MySQL server", + ErrPasswordAnonymousUser: "You are using MySQL as an anonymous user and anonymous users are not allowed to change passwords", + ErrPasswordNotAllowed: "You must have privileges to update tables in the mysql database to be able to change passwords for others", + ErrPasswordNoMatch: "Can't find any matching row in the user table", + ErrUpdateInfo: "Rows matched: %d Changed: %d Warnings: %d", + ErrCantCreateThread: "Can't create a new thread (errno %d); if you are not out of available memory, you can consult the manual for a possible OS-dependent bug", + ErrWrongValueCountOnRow: "Column count doesn't match value count at row %d", + ErrCantReopenTable: "Can't reopen table: '%-.192s'", + ErrInvalidUseOfNull: "Invalid use of NULL value", + ErrRegexp: "Got error '%-.64s' from regexp", + ErrMixOfGroupFuncAndFields: "Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause", + ErrNonexistingGrant: "There is no such grant defined for user '%-.48s' on host '%-.64s'", + ErrTableaccessDenied: "%-.128s command denied to user '%-.48s'@'%-.64s' for table '%-.64s'", + ErrColumnaccessDenied: "%-.16s command denied to user '%-.48s'@'%-.64s' for column '%-.192s' in table '%-.192s'", + ErrIllegalGrantForTable: "Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used", + ErrGrantWrongHostOrUser: "The host or user argument to GRANT is too long", + ErrNoSuchTable: "Table '%-.192s.%-.192s' doesn't exist", + ErrNonexistingTableGrant: "There is no such grant defined for user '%-.48s' on host '%-.64s' on table '%-.192s'", + ErrNotAllowedCommand: "The used command is not allowed with this MySQL version", + ErrSyntax: "You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use", + ErrDelayedCantChangeLock: "Delayed insert thread couldn't get requested lock for table %-.192s", + ErrTooManyDelayedThreads: "Too many delayed threads in use", + ErrAbortingConnection: "Aborted connection %d to db: '%-.192s' user: '%-.48s' (%-.64s)", + ErrNetPacketTooLarge: "Got a packet bigger than 'maxAllowedPacket' bytes", + ErrNetReadErrorFromPipe: "Got a read error from the connection pipe", + ErrNetFcntl: "Got an error from fcntl()", + ErrNetPacketsOutOfOrder: "Got packets out of order", + ErrNetUncompress: "Couldn't uncompress communication packet", + ErrNetRead: "Got an error reading communication packets", + ErrNetReadInterrupted: "Got timeout reading communication packets", + ErrNetErrorOnWrite: "Got an error writing communication packets", + ErrNetWriteInterrupted: "Got timeout writing communication packets", + ErrTooLongString: "Result string is longer than 'maxAllowedPacket' bytes", + ErrTableCantHandleBlob: "The used table type doesn't support BLOB/TEXT columns", + ErrTableCantHandleAutoIncrement: "The used table type doesn't support AUTOINCREMENT columns", + ErrDelayedInsertTableLocked: "INSERT DELAYED can't be used with table '%-.192s' because it is locked with LOCK TABLES", + ErrWrongColumnName: "Incorrect column name '%-.100s'", + ErrWrongKeyColumn: "The used storage engine can't index column '%-.192s'", + ErrWrongMrgTable: "Unable to open underlying table which is differently defined or of non-MyISAM type or doesn't exist", + ErrDupUnique: "Can't write, because of unique constraint, to table '%-.192s'", + ErrBlobKeyWithoutLength: "BLOB/TEXT column '%-.192s' used in key specification without a key length", + ErrPrimaryCantHaveNull: "All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead", + ErrTooManyRows: "Result consisted of more than one row", + ErrRequiresPrimaryKey: "This table type requires a primary key", + ErrNoRaidCompiled: "This version of MySQL is not compiled with RAID support", + ErrUpdateWithoutKeyInSafeMode: "You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column", + ErrKeyDoesNotExist: "Key '%-.192s' doesn't exist in table '%-.192s'", + ErrCheckNoSuchTable: "Can't open table", + ErrCheckNotImplemented: "The storage engine for the table doesn't support %s", + ErrCantDoThisDuringAnTransaction: "You are not allowed to execute this command in a transaction", + ErrErrorDuringCommit: "Got error %d during COMMIT", + ErrErrorDuringRollback: "Got error %d during ROLLBACK", + ErrErrorDuringFlushLogs: "Got error %d during FLUSHLOGS", + ErrErrorDuringCheckpoint: "Got error %d during CHECKPOINT", + ErrNewAbortingConnection: "Aborted connection %d to db: '%-.192s' user: '%-.48s' host: '%-.64s' (%-.64s)", + ErrDumpNotImplemented: "The storage engine for the table does not support binary table dump", + ErrFlushMasterBinlogClosed: "Binlog closed, cannot RESET MASTER", + ErrIndexRebuild: "Failed rebuilding the index of dumped table '%-.192s'", + ErrMaster: "Error from master: '%-.64s'", + ErrMasterNetRead: "Net error reading from master", + ErrMasterNetWrite: "Net error writing to master", + ErrFtMatchingKeyNotFound: "Can't find FULLTEXT index matching the column list", + ErrLockOrActiveTransaction: "Can't execute the given command because you have active locked tables or an active transaction", + ErrUnknownSystemVariable: "Unknown system variable '%-.64s'", + ErrCrashedOnUsage: "Table '%-.192s' is marked as crashed and should be repaired", + ErrCrashedOnRepair: "Table '%-.192s' is marked as crashed and last (automatic?) repair failed", + ErrWarningNotCompleteRollback: "Some non-transactional changed tables couldn't be rolled back", + ErrTransCacheFull: "Multi-statement transaction required more than 'maxBinlogCacheSize' bytes of storage; increase this mysqld variable and try again", + ErrSlaveMustStop: "This operation cannot be performed with a running slave; run STOP SLAVE first", + ErrSlaveNotRunning: "This operation requires a running slave; configure slave and do START SLAVE", + ErrBadSlave: "The server is not configured as slave; fix in config file or with CHANGE MASTER TO", + ErrMasterInfo: "Could not initialize master info structure; more error messages can be found in the MySQL error log", + ErrSlaveThread: "Could not create slave thread; check system resources", + ErrTooManyUserConnections: "User %-.64s already has more than 'maxUserConnections' active connections", + ErrSetConstantsOnly: "You may only use constant expressions with SET", + ErrLockWaitTimeout: "Lock wait timeout exceeded; try restarting transaction", + ErrLockTableFull: "The total number of locks exceeds the lock table size", + ErrReadOnlyTransaction: "Update locks cannot be acquired during a READ UNCOMMITTED transaction", + ErrDropDBWithReadLock: "DROP DATABASE not allowed while thread is holding global read lock", + ErrCreateDBWithReadLock: "CREATE DATABASE not allowed while thread is holding global read lock", + ErrWrongArguments: "Incorrect arguments to %s", + ErrNoPermissionToCreateUser: "'%-.48s'@'%-.64s' is not allowed to create new users", + ErrUnionTablesInDifferentDir: "Incorrect table definition; all MERGE tables must be in the same database", + ErrLockDeadlock: "Deadlock found when trying to get lock; try restarting transaction", + ErrTableCantHandleFt: "The used table type doesn't support FULLTEXT indexes", + ErrCannotAddForeign: "Cannot add foreign key constraint", + ErrNoReferencedRow: "Cannot add or update a child row: a foreign key constraint fails", + ErrRowIsReferenced: "Cannot delete or update a parent row: a foreign key constraint fails", + ErrConnectToMaster: "Error connecting to master: %-.128s", + ErrQueryOnMaster: "Error running query on master: %-.128s", + ErrErrorWhenExecutingCommand: "Error when executing command %s: %-.128s", + ErrWrongUsage: "Incorrect usage of %s and %s", + ErrWrongNumberOfColumnsInSelect: "The used SELECT statements have a different number of columns", + ErrCantUpdateWithReadlock: "Can't execute the query because you have a conflicting read lock", + ErrMixingNotAllowed: "Mixing of transactional and non-transactional tables is disabled", + ErrDupArgument: "Option '%s' used twice in statement", + ErrUserLimitReached: "User '%-.64s' has exceeded the '%s' resource (current value: %d)", + ErrSpecificAccessDenied: "Access denied; you need (at least one of) the %-.128s privilege(s) for this operation", + ErrLocalVariable: "Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", + ErrGlobalVariable: "Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", + ErrNoDefault: "Variable '%-.64s' doesn't have a default value", + ErrWrongValueForVar: "Variable '%-.64s' can't be set to the value of '%-.200s'", + ErrWrongTypeForVar: "Incorrect argument type to variable '%-.64s'", + ErrVarCantBeRead: "Variable '%-.64s' can only be set, not read", + ErrCantUseOptionHere: "Incorrect usage/placement of '%s'", + ErrNotSupportedYet: "This version of TiDB doesn't yet support '%s'", + ErrMasterFatalErrorReadingBinlog: "Got fatal error %d from master when reading data from binary log: '%-.320s'", + ErrSlaveIgnoredTable: "Slave SQL thread ignored the query because of replicate-*-table rules", + ErrIncorrectGlobalLocalVar: "Variable '%-.192s' is a %s variable", + ErrWrongFkDef: "Incorrect foreign key definition for '%-.192s': %s", + ErrKeyRefDoNotMatchTableRef: "Key reference and table reference don't match", + ErrOperandColumns: "Operand should contain %d column(s)", + ErrSubqueryNo1Row: "Subquery returns more than 1 row", + ErrUnknownStmtHandler: "Unknown prepared statement handler (%.*s) given to %s", + ErrCorruptHelpDB: "Help database is corrupt or does not exist", + ErrCyclicReference: "Cyclic reference on subqueries", + ErrAutoConvert: "Converting column '%s' from %s to %s", + ErrIllegalReference: "Reference '%-.64s' not supported (%s)", + ErrDerivedMustHaveAlias: "Every derived table must have its own alias", + ErrSelectReduced: "Select %d was reduced during optimization", + ErrTablenameNotAllowedHere: "Table '%s' from one of the %ss cannot be used in %s", + ErrNotSupportedAuthMode: "Client does not support authentication protocol requested by server; consider upgrading MySQL client", + ErrSpatialCantHaveNull: "All parts of a SPATIAL index must be NOT NULL", + ErrCollationCharsetMismatch: "COLLATION '%s' is not valid for CHARACTER SET '%s'", + ErrSlaveWasRunning: "Slave is already running", + ErrSlaveWasNotRunning: "Slave already has been stopped", + ErrTooBigForUncompress: "Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", + ErrZlibZMem: "ZLIB: Not enough memory", + ErrZlibZBuf: "ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", + ErrZlibZData: "ZLIB: Input data corrupted", + ErrCutValueGroupConcat: "Some rows were cut by GROUPCONCAT(%s)", + ErrWarnTooFewRecords: "Row %d doesn't contain data for all columns", + ErrWarnTooManyRecords: "Row %d was truncated; it contained more data than there were input columns", + ErrWarnNullToNotnull: "Column set to default value; NULL supplied to NOT NULL column '%s' at row %d", + ErrWarnDataOutOfRange: "Out of range value for column '%s' at row %d", + WarnDataTruncated: "Data truncated for column '%s' at row %d", + ErrWarnUsingOtherHandler: "Using storage engine %s for table '%s'", + ErrCantAggregate2collations: "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", + ErrDropUser: "Cannot drop one or more of the requested users", + ErrRevokeGrants: "Can't revoke all privileges for one or more of the requested users", + ErrCantAggregate3collations: "Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", + ErrCantAggregateNcollations: "Illegal mix of collations for operation '%s'", + ErrVariableIsNotStruct: "Variable '%-.64s' is not a variable component (can't be used as XXXX.variableName)", + ErrUnknownCollation: "Unknown collation: '%-.64s'", + ErrSlaveIgnoredSslParams: "SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", + ErrServerIsInSecureAuthMode: "Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", + ErrWarnFieldResolved: "Field or reference '%-.192s%s%-.192s%s%-.192s' of SELECT #%d was resolved in SELECT #%d", + ErrBadSlaveUntilCond: "Incorrect parameter or combination of parameters for START SLAVE UNTIL", + ErrMissingSkipSlave: "It is recommended to use --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you will get problems if you get an unexpected slave's mysqld restart", + ErrUntilCondIgnored: "SQL thread is not to be started so UNTIL options are ignored", + ErrWrongNameForIndex: "Incorrect index name '%-.100s'", + ErrWrongNameForCatalog: "Incorrect catalog name '%-.100s'", + ErrWarnQcResize: "Query cache failed to set size %d; new query cache size is %d", + ErrBadFtColumn: "Column '%-.192s' cannot be part of FULLTEXT index", + ErrUnknownKeyCache: "Unknown key cache '%-.100s'", + ErrWarnHostnameWontWork: "MySQL is started in --skip-name-resolve mode; you must restart it without this switch for this grant to work", + ErrUnknownStorageEngine: "Unknown storage engine '%s'", + ErrWarnDeprecatedSyntax: "'%s' is deprecated and will be removed in a future release. Please use %s instead", + ErrNonUpdatableTable: "The target table %-.100s of the %s is not updatable", + ErrFeatureDisabled: "The '%s' feature is disabled; you need MySQL built with '%s' to have it working", + ErrOptionPreventsStatement: "The MySQL server is running with the %s option so it cannot execute this statement", + ErrDuplicatedValueInType: "Column '%-.100s' has duplicated value '%-.64s' in %s", + ErrTruncatedWrongValue: "Truncated incorrect %-.64s value: '%-.128s'", + ErrTooMuchAutoTimestampCols: "Incorrect table definition; there can be only one TIMESTAMP column with CURRENTTIMESTAMP in DEFAULT or ON UPDATE clause", + ErrInvalidOnUpdate: "Invalid ON UPDATE clause for '%-.192s' column", + ErrUnsupportedPs: "This command is not supported in the prepared statement protocol yet", + ErrGetErrmsg: "Got error %d '%-.100s' from %s", + ErrGetTemporaryErrmsg: "Got temporary error %d '%-.100s' from %s", + ErrUnknownTimeZone: "Unknown or incorrect time zone: '%-.64s'", + ErrWarnInvalidTimestamp: "Invalid TIMESTAMP value in column '%s' at row %d", + ErrInvalidCharacterString: "Invalid %s character string: '%.64s'", + ErrWarnAllowedPacketOverflowed: "Result of %s() was larger than max_allowed_packet (%d) - truncated", + ErrConflictingDeclarations: "Conflicting declarations: '%s%s' and '%s%s'", + ErrSpNoRecursiveCreate: "Can't create a %s from within another stored routine", + ErrSpAlreadyExists: "%s %s already exists", + ErrSpDoesNotExist: "%s %s does not exist", + ErrSpDropFailed: "Failed to DROP %s %s", + ErrSpStoreFailed: "Failed to CREATE %s %s", + ErrSpLilabelMismatch: "%s with no matching label: %s", + ErrSpLabelRedefine: "Redefining label %s", + ErrSpLabelMismatch: "End-label %s without match", + ErrSpUninitVar: "Referring to uninitialized variable %s", + ErrSpBadselect: "PROCEDURE %s can't return a result set in the given context", + ErrSpBadreturn: "RETURN is only allowed in a FUNCTION", + ErrSpBadstatement: "%s is not allowed in stored procedures", + ErrUpdateLogDeprecatedIgnored: "The update log is deprecated and replaced by the binary log; SET SQLLOGUPDATE has been ignored.", + ErrUpdateLogDeprecatedTranslated: "The update log is deprecated and replaced by the binary log; SET SQLLOGUPDATE has been translated to SET SQLLOGBIN.", + ErrQueryInterrupted: "Query execution was interrupted", + ErrSpWrongNoOfArgs: "Incorrect number of arguments for %s %s; expected %d, got %d", + ErrSpCondMismatch: "Undefined CONDITION: %s", + ErrSpNoreturn: "No RETURN found in FUNCTION %s", + ErrSpNoreturnend: "FUNCTION %s ended without RETURN", + ErrSpBadCursorQuery: "Cursor statement must be a SELECT", + ErrSpBadCursorSelect: "Cursor SELECT must not have INTO", + ErrSpCursorMismatch: "Undefined CURSOR: %s", + ErrSpCursorAlreadyOpen: "Cursor is already open", + ErrSpCursorNotOpen: "Cursor is not open", + ErrSpUndeclaredVar: "Undeclared variable: %s", + ErrSpWrongNoOfFetchArgs: "Incorrect number of FETCH variables", + ErrSpFetchNoData: "No data - zero rows fetched, selected, or processed", + ErrSpDupParam: "Duplicate parameter: %s", + ErrSpDupVar: "Duplicate variable: %s", + ErrSpDupCond: "Duplicate condition: %s", + ErrSpDupCurs: "Duplicate cursor: %s", + ErrSpCantAlter: "Failed to ALTER %s %s", + ErrSpSubselectNyi: "Subquery value not supported", + ErrStmtNotAllowedInSfOrTrg: "%s is not allowed in stored function or trigger", + ErrSpVarcondAfterCurshndlr: "Variable or condition declaration after cursor or handler declaration", + ErrSpCursorAfterHandler: "Cursor declaration after handler declaration", + ErrSpCaseNotFound: "Case not found for CASE statement", + ErrFparserTooBigFile: "Configuration file '%-.192s' is too big", + ErrFparserBadHeader: "Malformed file type header in file '%-.192s'", + ErrFparserEOFInComment: "Unexpected end of file while parsing comment '%-.200s'", + ErrFparserErrorInParameter: "Error while parsing parameter '%-.192s' (line: '%-.192s')", + ErrFparserEOFInUnknownParameter: "Unexpected end of file while skipping unknown parameter '%-.192s'", + ErrViewNoExplain: "EXPLAIN/SHOW can not be issued; lacking privileges for underlying table", + ErrFrmUnknownType: "File '%-.192s' has unknown type '%-.64s' in its header", + ErrWrongObject: "'%-.192s.%-.192s' is not %s", + ErrNonupdateableColumn: "Column '%-.192s' is not updatable", + ErrViewSelectDerived: "View's SELECT contains a subquery in the FROM clause", + ErrViewSelectClause: "View's SELECT contains a '%s' clause", + ErrViewSelectVariable: "View's SELECT contains a variable or parameter", + ErrViewSelectTmptable: "View's SELECT refers to a temporary table '%-.192s'", + ErrViewWrongList: "View's SELECT and view's field list have different column counts", + ErrWarnViewMerge: "View merge algorithm can't be used here for now (assumed undefined algorithm)", + ErrWarnViewWithoutKey: "View being updated does not have complete key of underlying table in it", + ErrViewInvalid: "View '%-.192s.%-.192s' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them", + ErrSpNoDropSp: "Can't drop or alter a %s from within another stored routine", + ErrSpGotoInHndlr: "GOTO is not allowed in a stored procedure handler", + ErrTrgAlreadyExists: "Trigger already exists", + ErrTrgDoesNotExist: "Trigger does not exist", + ErrTrgOnViewOrTempTable: "Trigger's '%-.192s' is view or temporary table", + ErrTrgCantChangeRow: "Updating of %s row is not allowed in %strigger", + ErrTrgNoSuchRowInTrg: "There is no %s row in %s trigger", + ErrNoDefaultForField: "Field '%-.192s' doesn't have a default value", + ErrDivisionByZero: "Division by 0", + ErrTruncatedWrongValueForField: "Incorrect %-.32s value: '%-.128s' for column '%.192s' at row %d", + ErrIllegalValueForType: "Illegal %s '%-.192s' value found during parsing", + ErrViewNonupdCheck: "CHECK OPTION on non-updatable view '%-.192s.%-.192s'", + ErrViewCheckFailed: "CHECK OPTION failed '%-.192s.%-.192s'", + ErrProcaccessDenied: "%-.16s command denied to user '%-.48s'@'%-.64s' for routine '%-.192s'", + ErrRelayLogFail: "Failed purging old relay logs: %s", + ErrPasswdLength: "Password hash should be a %d-digit hexadecimal number", + ErrUnknownTargetBinlog: "Target log not found in binlog index", + ErrIoErrLogIndexRead: "I/O error reading log index file", + ErrBinlogPurgeProhibited: "Server configuration does not permit binlog purge", + ErrFseekFail: "Failed on fseek()", + ErrBinlogPurgeFatalErr: "Fatal error during log purge", + ErrLogInUse: "A purgeable log is in use, will not purge", + ErrLogPurgeUnknownErr: "Unknown error during log purge", + ErrRelayLogInit: "Failed initializing relay log position: %s", + ErrNoBinaryLogging: "You are not using binary logging", + ErrReservedSyntax: "The '%-.64s' syntax is reserved for purposes internal to the MySQL server", + ErrWsasFailed: "WSAStartup Failed", + ErrDiffGroupsProc: "Can't handle procedures with different groups yet", + ErrNoGroupForProc: "Select must have a group with this procedure", + ErrOrderWithProc: "Can't use ORDER clause with this procedure", + ErrLoggingProhibitChangingOf: "Binary logging and replication forbid changing the global server %s", + ErrNoFileMapping: "Can't map file: %-.200s, errno: %d", + ErrWrongMagic: "Wrong magic in %-.64s", + ErrPsManyParam: "Prepared statement contains too many placeholders", + ErrKeyPart0: "Key part '%-.192s' length cannot be 0", + ErrViewChecksum: "View text checksum failed", + ErrViewMultiupdate: "Can not modify more than one base table through a join view '%-.192s.%-.192s'", + ErrViewNoInsertFieldList: "Can not insert into join view '%-.192s.%-.192s' without fields list", + ErrViewDeleteMergeView: "Can not delete from join view '%-.192s.%-.192s'", + ErrCannotUser: "Operation %s failed for %.256s", + ErrXaerNota: "XAERNOTA: Unknown XID", + ErrXaerInval: "XAERINVAL: Invalid arguments (or unsupported command)", + ErrXaerRmfail: "XAERRMFAIL: The command cannot be executed when global transaction is in the %.64s state", + ErrXaerOutside: "XAEROUTSIDE: Some work is done outside global transaction", + ErrXaerRmerr: "XAERRMERR: Fatal error occurred in the transaction branch - check your data for consistency", + ErrXaRbrollback: "XARBROLLBACK: Transaction branch was rolled back", + ErrNonexistingProcGrant: "There is no such grant defined for user '%-.48s' on host '%-.64s' on routine '%-.192s'", + ErrProcAutoGrantFail: "Failed to grant EXECUTE and ALTER ROUTINE privileges", + ErrProcAutoRevokeFail: "Failed to revoke all privileges to dropped routine", + ErrDataTooLong: "Data too long for column '%s' at row %d", + ErrSpBadSQLstate: "Bad SQLSTATE: '%s'", + ErrStartup: "%s: ready for connections.\nVersion: '%s' socket: '%s' port: %d %s", + ErrLoadFromFixedSizeRowsToVar: "Can't load value from file with fixed size rows to variable", + ErrCantCreateUserWithGrant: "You are not allowed to create a user with GRANT", + ErrWrongValueForType: "Incorrect %-.32s value: '%-.128s' for function %-.32s", + ErrTableDefChanged: "Table definition has changed, please retry transaction", + ErrSpDupHandler: "Duplicate handler declared in the same block", + ErrSpNotVarArg: "OUT or INOUT argument %d for routine %s is not a variable or NEW pseudo-variable in BEFORE trigger", + ErrSpNoRetset: "Not allowed to return a result set from a %s", + ErrCantCreateGeometryObject: "Cannot get geometry object from data you send to the GEOMETRY field", + ErrFailedRoutineBreakBinlog: "A routine failed and has neither NO SQL nor READS SQL DATA in its declaration and binary logging is enabled; if non-transactional tables were updated, the binary log will miss their changes", + ErrBinlogUnsafeRoutine: "This function has none of DETERMINISTIC, NO SQL, or READS SQL DATA in its declaration and binary logging is enabled (you *might* want to use the less safe logBinTrustFunctionCreators variable)", + ErrBinlogCreateRoutineNeedSuper: "You do not have the SUPER privilege and binary logging is enabled (you *might* want to use the less safe logBinTrustFunctionCreators variable)", + ErrExecStmtWithOpenCursor: "You can't execute a prepared statement which has an open cursor associated with it. Reset the statement to re-execute it.", + ErrStmtHasNoOpenCursor: "The statement (%d) has no open cursor.", + ErrCommitNotAllowedInSfOrTrg: "Explicit or implicit commit is not allowed in stored function or trigger.", + ErrNoDefaultForViewField: "Field of view '%-.192s.%-.192s' underlying table doesn't have a default value", + ErrSpNoRecursion: "Recursive stored functions and triggers are not allowed.", + ErrTooBigScale: "Too big scale %d specified for column '%-.192s'. Maximum is %d.", + ErrTooBigPrecision: "Too big precision %d specified for column '%-.192s'. Maximum is %d.", + ErrMBiggerThanD: "For float(M,D), double(M,D) or decimal(M,D), M must be >= D (column '%-.192s').", + ErrWrongLockOfSystemTable: "You can't combine write-locking of system tables with other tables or lock types", + ErrConnectToForeignDataSource: "Unable to connect to foreign data source: %.64s", + ErrQueryOnForeignDataSource: "There was a problem processing the query on the foreign data source. Data source : %-.64s", + ErrForeignDataSourceDoesntExist: "The foreign data source you are trying to reference does not exist. Data source : %-.64s", + ErrForeignDataStringInvalidCantCreate: "Can't create federated table. The data source connection string '%-.64s' is not in the correct format", + ErrForeignDataStringInvalid: "The data source connection string '%-.64s' is not in the correct format", + ErrCantCreateFederatedTable: "Can't create federated table. Foreign data src : %-.64s", + ErrTrgInWrongSchema: "Trigger in wrong schema", + ErrStackOverrunNeedMore: "Thread stack overrun: %d bytes used of a %d byte stack, and %d bytes needed. Use 'mysqld --threadStack=#' to specify a bigger stack.", + ErrTooLongBody: "Routine body for '%-.100s' is too long", + ErrWarnCantDropDefaultKeycache: "Cannot drop default keycache", + ErrTooBigDisplaywidth: "Display width out of range for column '%-.192s' (max = %d)", + ErrXaerDupid: "XAERDUPID: The XID already exists", + ErrDatetimeFunctionOverflow: "Datetime function: %-.32s field overflow", + ErrCantUpdateUsedTableInSfOrTrg: "Can't update table '%-.192s' in stored function/trigger because it is already used by statement which invoked this stored function/trigger.", + ErrViewPreventUpdate: "The definition of table '%-.192s' prevents operation %.192s on table '%-.192s'.", + ErrPsNoRecursion: "The prepared statement contains a stored routine call that refers to that same statement. It's not allowed to execute a prepared statement in such a recursive manner", + ErrSpCantSetAutocommit: "Not allowed to set autocommit from a stored function or trigger", + ErrMalformedDefiner: "Definer is not fully qualified", + ErrViewFrmNoUser: "View '%-.192s'.'%-.192s' has no definer information (old table format). Current user is used as definer. Please recreate the view!", + ErrViewOtherUser: "You need the SUPER privilege for creation view with '%-.192s'@'%-.192s' definer", + ErrNoSuchUser: "The user specified as a definer ('%-.64s'@'%-.64s') does not exist", + ErrForbidSchemaChange: "Changing schema from '%-.192s' to '%-.192s' is not allowed.", + ErrRowIsReferenced2: "Cannot delete or update a parent row: a foreign key constraint fails (%.192s)", + ErrNoReferencedRow2: "Cannot add or update a child row: a foreign key constraint fails (%.192s)", + ErrSpBadVarShadow: "Variable '%-.64s' must be quoted with `...`, or renamed", + ErrTrgNoDefiner: "No definer attribute for trigger '%-.192s'.'%-.192s'. The trigger will be activated under the authorization of the invoker, which may have insufficient privileges. Please recreate the trigger.", + ErrOldFileFormat: "'%-.192s' has an old format, you should re-create the '%s' object(s)", + ErrSpRecursionLimit: "Recursive limit %d (as set by the maxSpRecursionDepth variable) was exceeded for routine %.192s", + ErrSpProcTableCorrupt: "Failed to load routine %-.192s. The table mysql.proc is missing, corrupt, or contains bad data (internal code %d)", + ErrSpWrongName: "Incorrect routine name '%-.192s'", + ErrTableNeedsUpgrade: "Table upgrade required. Please do \"REPAIR TABLE `%-.32s`\"", + ErrSpNoAggregate: "AGGREGATE is not supported for stored functions", + ErrMaxPreparedStmtCountReached: "Can't create more than maxPreparedStmtCount statements (current value: %d)", + ErrViewRecursive: "`%-.192s`.`%-.192s` contains view recursion", + ErrNonGroupingFieldUsed: "Non-grouping field '%-.192s' is used in %-.64s clause", + ErrTableCantHandleSpkeys: "The used table type doesn't support SPATIAL indexes", + ErrNoTriggersOnSystemSchema: "Triggers can not be created on system tables", + ErrRemovedSpaces: "Leading spaces are removed from name '%s'", + ErrAutoincReadFailed: "Failed to read auto-increment value from storage engine", + ErrUsername: "user name", + ErrHostname: "host name", + ErrWrongStringLength: "String '%-.70s' is too long for %s (should be no longer than %d)", + ErrNonInsertableTable: "The target table %-.100s of the %s is not insertable-into", + ErrAdminWrongMrgTable: "Table '%-.64s' is differently defined or of non-MyISAM type or doesn't exist", + ErrTooHighLevelOfNestingForSelect: "Too high level of nesting for select", + ErrNameBecomesEmpty: "Name '%-.64s' has become ''", + ErrAmbiguousFieldTerm: "First character of the FIELDS TERMINATED string is ambiguous; please use non-optional and non-empty FIELDS ENCLOSED BY", + ErrForeignServerExists: "The foreign server, %s, you are trying to create already exists.", + ErrForeignServerDoesntExist: "The foreign server name you are trying to reference does not exist. Data source : %-.64s", + ErrIllegalHaCreateOption: "Table storage engine '%-.64s' does not support the create option '%.64s'", + ErrPartitionRequiresValues: "Syntax : %-.64s PARTITIONING requires definition of VALUES %-.64s for each partition", + ErrPartitionWrongValues: "Only %-.64s PARTITIONING can use VALUES %-.64s in partition definition", + ErrPartitionMaxvalue: "MAXVALUE can only be used in last partition definition", + ErrPartitionSubpartition: "Subpartitions can only be hash partitions and by key", + ErrPartitionSubpartMix: "Must define subpartitions on all partitions if on one partition", + ErrPartitionWrongNoPart: "Wrong number of partitions defined, mismatch with previous setting", + ErrPartitionWrongNoSubpart: "Wrong number of subpartitions defined, mismatch with previous setting", + ErrWrongExprInPartitionFunc: "Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed", + ErrNoConstExprInRangeOrList: "Expression in RANGE/LIST VALUES must be constant", + ErrFieldNotFoundPart: "Field in list of fields for partition function not found in table", + ErrListOfFieldsOnlyInHash: "List of fields is only allowed in KEY partitions", + ErrInconsistentPartitionInfo: "The partition info in the frm file is not consistent with what can be written into the frm file", + ErrPartitionFuncNotAllowed: "The %-.192s function returns the wrong type", + ErrPartitionsMustBeDefined: "For %-.64s partitions each partition must be defined", + ErrRangeNotIncreasing: "VALUES LESS THAN value must be strictly increasing for each partition", + ErrInconsistentTypeOfFunctions: "VALUES value must be of same type as partition function", + ErrMultipleDefConstInListPart: "Multiple definition of same constant in list partitioning", + ErrPartitionEntry: "Partitioning can not be used stand-alone in query", + ErrMixHandler: "The mix of handlers in the partitions is not allowed in this version of MySQL", + ErrPartitionNotDefined: "For the partitioned engine it is necessary to define all %-.64s", + ErrTooManyPartitions: "Too many partitions (including subpartitions) were defined", + ErrSubpartition: "It is only possible to mix RANGE/LIST partitioning with HASH/KEY partitioning for subpartitioning", + ErrCantCreateHandlerFile: "Failed to create specific handler file", + ErrBlobFieldInPartFunc: "A BLOB field is not allowed in partition function", + ErrUniqueKeyNeedAllFieldsInPf: "A %-.192s must include all columns in the table's partitioning function", + ErrNoParts: "Number of %-.64s = 0 is not an allowed value", + ErrPartitionMgmtOnNonpartitioned: "Partition management on a not partitioned table is not possible", + ErrForeignKeyOnPartitioned: "Foreign key clause is not yet supported in conjunction with partitioning", + ErrDropPartitionNonExistent: "Error in list of partitions to %-.64s", + ErrDropLastPartition: "Cannot remove all partitions, use DROP TABLE instead", + ErrCoalesceOnlyOnHashPartition: "COALESCE PARTITION can only be used on HASH/KEY partitions", + ErrReorgHashOnlyOnSameNo: "REORGANIZE PARTITION can only be used to reorganize partitions not to change their numbers", + ErrReorgNoParam: "REORGANIZE PARTITION without parameters can only be used on auto-partitioned tables using HASH PARTITIONs", + ErrOnlyOnRangeListPartition: "%-.64s PARTITION can only be used on RANGE/LIST partitions", + ErrAddPartitionSubpart: "Trying to Add partition(s) with wrong number of subpartitions", + ErrAddPartitionNoNewPartition: "At least one partition must be added", + ErrCoalescePartitionNoPartition: "At least one partition must be coalesced", + ErrReorgPartitionNotExist: "More partitions to reorganize than there are partitions", + ErrSameNamePartition: "Duplicate partition name %-.192s", + ErrNoBinlog: "It is not allowed to shut off binlog on this command", + ErrConsecutiveReorgPartitions: "When reorganizing a set of partitions they must be in consecutive order", + ErrReorgOutsideRange: "Reorganize of range partitions cannot change total ranges except for last partition where it can extend the range", + ErrPartitionFunctionFailure: "Partition function not supported in this version for this handler", + ErrPartState: "Partition state cannot be defined from CREATE/ALTER TABLE", + ErrLimitedPartRange: "The %-.64s handler only supports 32 bit integers in VALUES", + ErrPluginIsNotLoaded: "Plugin '%-.192s' is not loaded", + ErrWrongValue: "Incorrect %-.32s value: '%-.128s'", + ErrNoPartitionForGivenValue: "Table has no partition for value %-.64s", + ErrFilegroupOptionOnlyOnce: "It is not allowed to specify %s more than once", + ErrCreateFilegroupFailed: "Failed to create %s", + ErrDropFilegroupFailed: "Failed to drop %s", + ErrTablespaceAutoExtend: "The handler doesn't support autoextend of tablespaces", + ErrWrongSizeNumber: "A size parameter was incorrectly specified, either number or on the form 10M", + ErrSizeOverflow: "The size number was correct but we don't allow the digit part to be more than 2 billion", + ErrAlterFilegroupFailed: "Failed to alter: %s", + ErrBinlogRowLoggingFailed: "Writing one row to the row-based binary log failed", + ErrBinlogRowWrongTableDef: "Table definition on master and slave does not match: %s", + ErrBinlogRowRbrToSbr: "Slave running with --log-slave-updates must use row-based binary logging to be able to replicate row-based binary log events", + ErrEventAlreadyExists: "Event '%-.192s' already exists", + ErrEventStoreFailed: "Failed to store event %s. Error code %d from storage engine.", + ErrEventDoesNotExist: "Unknown event '%-.192s'", + ErrEventCantAlter: "Failed to alter event '%-.192s'", + ErrEventDropFailed: "Failed to drop %s", + ErrEventIntervalNotPositiveOrTooBig: "INTERVAL is either not positive or too big", + ErrEventEndsBeforeStarts: "ENDS is either invalid or before STARTS", + ErrEventExecTimeInThePast: "Event execution time is in the past. Event has been disabled", + ErrEventOpenTableFailed: "Failed to open mysql.event", + ErrEventNeitherMExprNorMAt: "No datetime expression provided", + ErrObsoleteColCountDoesntMatchCorrupted: "Column count of mysql.%s is wrong. Expected %d, found %d. The table is probably corrupted", + ErrObsoleteCannotLoadFromTable: "Cannot load from mysql.%s. The table is probably corrupted", + ErrEventCannotDelete: "Failed to delete the event from mysql.event", + ErrEventCompile: "Error during compilation of event's body", + ErrEventSameName: "Same old and new event name", + ErrEventDataTooLong: "Data for column '%s' too long", + ErrDropIndexFk: "Cannot drop index '%-.192s': needed in a foreign key constraint", + ErrWarnDeprecatedSyntaxWithVer: "The syntax '%s' is deprecated and will be removed in MySQL %s. Please use %s instead", + ErrCantWriteLockLogTable: "You can't write-lock a log table. Only read access is possible", + ErrCantLockLogTable: "You can't use locks with log tables.", + ErrForeignDuplicateKeyOldUnused: "Upholding foreign key constraints for table '%.192s', entry '%-.192s', key %d would lead to a duplicate entry", + ErrColCountDoesntMatchPleaseUpdate: "Column count of mysql.%s is wrong. Expected %d, found %d. Created with MySQL %d, now running %d. Please use mysqlUpgrade to fix this error.", + ErrTempTablePreventsSwitchOutOfRbr: "Cannot switch out of the row-based binary log format when the session has open temporary tables", + ErrStoredFunctionPreventsSwitchBinlogFormat: "Cannot change the binary logging format inside a stored function or trigger", + ErrNdbCantSwitchBinlogFormat: "The NDB cluster engine does not support changing the binlog format on the fly yet", + ErrPartitionNoTemporary: "Cannot create temporary table with partitions", + ErrPartitionConstDomain: "Partition constant is out of partition function domain", + ErrPartitionFunctionIsNotAllowed: "This partition function is not allowed", + ErrDdlLog: "Error in DDL log", + ErrNullInValuesLessThan: "Not allowed to use NULL value in VALUES LESS THAN", + ErrWrongPartitionName: "Incorrect partition name", + ErrCantChangeTxCharacteristics: "Transaction characteristics can't be changed while a transaction is in progress", + ErrDupEntryAutoincrementCase: "ALTER TABLE causes autoIncrement resequencing, resulting in duplicate entry '%-.192s' for key '%-.192s'", + ErrEventModifyQueue: "Internal scheduler error %d", + ErrEventSetVar: "Error during starting/stopping of the scheduler. Error code %d", + ErrPartitionMerge: "Engine cannot be used in partitioned tables", + ErrCantActivateLog: "Cannot activate '%-.64s' log", + ErrRbrNotAvailable: "The server was not built with row-based replication", + ErrBase64Decode: "Decoding of base64 string failed", + ErrEventRecursionForbidden: "Recursion of EVENT DDL statements is forbidden when body is present", + ErrEventsDB: "Cannot proceed because system tables used by Event Scheduler were found damaged at server start", + ErrOnlyIntegersAllowed: "Only integers allowed as number here", + ErrUnsuportedLogEngine: "This storage engine cannot be used for log tables\"", + ErrBadLogStatement: "You cannot '%s' a log table if logging is enabled", + ErrCantRenameLogTable: "Cannot rename '%s'. When logging enabled, rename to/from log table must rename two tables: the log table to an archive table and another table back to '%s'", + ErrWrongParamcountToNativeFct: "Incorrect parameter count in the call to native function '%-.192s'", + ErrWrongParametersToNativeFct: "Incorrect parameters in the call to native function '%-.192s'", + ErrWrongParametersToStoredFct: "Incorrect parameters in the call to stored function '%-.192s'", + ErrNativeFctNameCollision: "This function '%-.192s' has the same name as a native function", + ErrDupEntryWithKeyName: "Duplicate entry '%-.64s' for key '%-.192s'", + ErrBinlogPurgeEmFile: "Too many files opened, please execute the command again", + ErrEventCannotCreateInThePast: "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation.", + ErrEventCannotAlterInThePast: "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was not changed. Specify a time in the future.", + ErrSlaveIncident: "The incident %s occurred on the master. Message: %-.64s", + ErrNoPartitionForGivenValueSilent: "Table has no partition for some existing values", + ErrBinlogUnsafeStatement: "Unsafe statement written to the binary log using statement format since BINLOGFORMAT = STATEMENT. %s", + ErrSlaveFatal: "Fatal : %s", + ErrSlaveRelayLogReadFailure: "Relay log read failure: %s", + ErrSlaveRelayLogWriteFailure: "Relay log write failure: %s", + ErrSlaveCreateEventFailure: "Failed to create %s", + ErrSlaveMasterComFailure: "Master command %s failed: %s", + ErrBinlogLoggingImpossible: "Binary logging not possible. Message: %s", + ErrViewNoCreationCtx: "View `%-.64s`.`%-.64s` has no creation context", + ErrViewInvalidCreationCtx: "Creation context of view `%-.64s`.`%-.64s' is invalid", + ErrSrInvalidCreationCtx: "Creation context of stored routine `%-.64s`.`%-.64s` is invalid", + ErrTrgCorruptedFile: "Corrupted TRG file for table `%-.64s`.`%-.64s`", + ErrTrgNoCreationCtx: "Triggers for table `%-.64s`.`%-.64s` have no creation context", + ErrTrgInvalidCreationCtx: "Trigger creation context of table `%-.64s`.`%-.64s` is invalid", + ErrEventInvalidCreationCtx: "Creation context of event `%-.64s`.`%-.64s` is invalid", + ErrTrgCantOpenTable: "Cannot open table for trigger `%-.64s`.`%-.64s`", + ErrCantCreateSroutine: "Cannot create stored routine `%-.64s`. Check warnings", + ErrNeverUsed: "Ambiguous slave modes combination. %s", + ErrNoFormatDescriptionEventBeforeBinlogStatement: "The BINLOG statement of type `%s` was not preceded by a format description BINLOG statement.", + ErrSlaveCorruptEvent: "Corrupted replication event was detected", + ErrLoadDataInvalidColumn: "Invalid column reference (%-.64s) in LOAD DATA", + ErrLogPurgeNoFile: "Being purged log %s was not found", + ErrXaRbtimeout: "XARBTIMEOUT: Transaction branch was rolled back: took too long", + ErrXaRbdeadlock: "XARBDEADLOCK: Transaction branch was rolled back: deadlock was detected", + ErrNeedReprepare: "Prepared statement needs to be re-prepared", + ErrDelayedNotSupported: "DELAYED option not supported for table '%-.192s'", + WarnNoMasterInfo: "The master info structure does not exist", + WarnOptionIgnored: "<%-.64s> option ignored", + WarnPluginDeleteBuiltin: "Built-in plugins cannot be deleted", + WarnPluginBusy: "Plugin is busy and will be uninstalled on shutdown", + ErrVariableIsReadonly: "%s variable '%s' is read-only. Use SET %s to assign the value", + ErrWarnEngineTransactionRollback: "Storage engine %s does not support rollback for this statement. Transaction rolled back and must be restarted", + ErrSlaveHeartbeatFailure: "Unexpected master's heartbeat data: %s", + ErrSlaveHeartbeatValueOutOfRange: "The requested value for the heartbeat period is either negative or exceeds the maximum allowed (%s seconds).", + ErrNdbReplicationSchema: "Bad schema for mysql.ndbReplication table. Message: %-.64s", + ErrConflictFnParse: "Error in parsing conflict function. Message: %-.64s", + ErrExceptionsWrite: "Write to exceptions table failed. Message: %-.128s\"", + ErrTooLongTableComment: "Comment for table '%-.64s' is too long (max = %d)", + ErrTooLongFieldComment: "Comment for field '%-.64s' is too long (max = %d)", + ErrFuncInexistentNameCollision: "FUNCTION %s does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual", + ErrDatabaseName: "Database", + ErrTableName: "Table", + ErrPartitionName: "Partition", + ErrSubpartitionName: "Subpartition", + ErrTemporaryName: "Temporary", + ErrRenamedName: "Renamed", + ErrTooManyConcurrentTrxs: "Too many active concurrent transactions", + WarnNonASCIISeparatorNotImplemented: "Non-ASCII separator arguments are not fully supported", + ErrDebugSyncTimeout: "debug sync point wait timed out", + ErrDebugSyncHitLimit: "debug sync point hit limit reached", + ErrDupSignalSet: "Duplicate condition information item '%s'", + ErrSignalWarn: "Unhandled user-defined warning condition", + ErrSignalNotFound: "Unhandled user-defined not found condition", + ErrSignalException: "Unhandled user-defined exception condition", + ErrResignalWithoutActiveHandler: "RESIGNAL when handler not active", + ErrSignalBadConditionType: "SIGNAL/RESIGNAL can only use a CONDITION defined with SQLSTATE", + WarnCondItemTruncated: "Data truncated for condition item '%s'", + ErrCondItemTooLong: "Data too long for condition item '%s'", + ErrUnknownLocale: "Unknown locale: '%-.64s'", + ErrSlaveIgnoreServerIds: "The requested server id %d clashes with the slave startup option --replicate-same-server-id", + ErrQueryCacheDisabled: "Query cache is disabled; restart the server with queryCacheType=1 to enable it", + ErrSameNamePartitionField: "Duplicate partition field name '%-.192s'", + ErrPartitionColumnList: "Inconsistency in usage of column lists for partitioning", + ErrWrongTypeColumnValue: "Partition column values of incorrect type", + ErrTooManyPartitionFuncFields: "Too many fields in '%-.192s'", + ErrMaxvalueInValuesIn: "Cannot use MAXVALUE as value in VALUES IN", + ErrTooManyValues: "Cannot have more than one value for this type of %-.64s partitioning", + ErrRowSinglePartitionField: "Row expressions in VALUES IN only allowed for multi-field column partitioning", + ErrFieldTypeNotAllowedAsPartitionField: "Field '%-.192s' is of a not allowed type for this type of partitioning", + ErrPartitionFieldsTooLong: "The total length of the partitioning fields is too large", + ErrBinlogRowEngineAndStmtEngine: "Cannot execute statement: impossible to write to binary log since both row-incapable engines and statement-incapable engines are involved.", + ErrBinlogRowModeAndStmtEngine: "Cannot execute statement: impossible to write to binary log since BINLOGFORMAT = ROW and at least one table uses a storage engine limited to statement-based logging.", + ErrBinlogUnsafeAndStmtEngine: "Cannot execute statement: impossible to write to binary log since statement is unsafe, storage engine is limited to statement-based logging, and BINLOGFORMAT = MIXED. %s", + ErrBinlogRowInjectionAndStmtEngine: "Cannot execute statement: impossible to write to binary log since statement is in row format and at least one table uses a storage engine limited to statement-based logging.", + ErrBinlogStmtModeAndRowEngine: "Cannot execute statement: impossible to write to binary log since BINLOGFORMAT = STATEMENT and at least one table uses a storage engine limited to row-based logging.%s", + ErrBinlogRowInjectionAndStmtMode: "Cannot execute statement: impossible to write to binary log since statement is in row format and BINLOGFORMAT = STATEMENT.", + ErrBinlogMultipleEnginesAndSelfLoggingEngine: "Cannot execute statement: impossible to write to binary log since more than one engine is involved and at least one engine is self-logging.", + ErrBinlogUnsafeLimit: "The statement is unsafe because it uses a LIMIT clause. This is unsafe because the set of rows included cannot be predicted.", + ErrBinlogUnsafeInsertDelayed: "The statement is unsafe because it uses INSERT DELAYED. This is unsafe because the times when rows are inserted cannot be predicted.", + ErrBinlogUnsafeSystemTable: "The statement is unsafe because it uses the general log, slow query log, or performanceSchema table(s). This is unsafe because system tables may differ on slaves.", + ErrBinlogUnsafeAutoincColumns: "Statement is unsafe because it invokes a trigger or a stored function that inserts into an AUTOINCREMENT column. Inserted values cannot be logged correctly.", + ErrBinlogUnsafeUdf: "Statement is unsafe because it uses a UDF which may not return the same value on the slave.", + ErrBinlogUnsafeSystemVariable: "Statement is unsafe because it uses a system variable that may have a different value on the slave.", + ErrBinlogUnsafeSystemFunction: "Statement is unsafe because it uses a system function that may return a different value on the slave.", + ErrBinlogUnsafeNontransAfterTrans: "Statement is unsafe because it accesses a non-transactional table after accessing a transactional table within the same transaction.", + ErrMessageAndStatement: "%s Statement: %s", + ErrSlaveConversionFailed: "Column %d of table '%-.192s.%-.192s' cannot be converted from type '%-.32s' to type '%-.32s'", + ErrSlaveCantCreateConversion: "Can't create conversion table for table '%-.192s.%-.192s'", + ErrInsideTransactionPreventsSwitchBinlogFormat: "Cannot modify @@session.binlogFormat inside a transaction", + ErrPathLength: "The path specified for %.64s is too long.", + ErrWarnDeprecatedSyntaxNoReplacement: "'%s' is deprecated and will be removed in a future release.", + ErrWrongNativeTableStructure: "Native table '%-.64s'.'%-.64s' has the wrong structure", + ErrWrongPerfSchemaUsage: "Invalid performanceSchema usage.", + ErrWarnISSkippedTable: "Table '%s'.'%s' was skipped since its definition is being modified by concurrent DDL statement", + ErrInsideTransactionPreventsSwitchBinlogDirect: "Cannot modify @@session.binlogDirectNonTransactionalUpdates inside a transaction", + ErrStoredFunctionPreventsSwitchBinlogDirect: "Cannot change the binlog direct flag inside a stored function or trigger", + ErrSpatialMustHaveGeomCol: "A SPATIAL index may only contain a geometrical type column", + ErrTooLongIndexComment: "Comment for index '%-.64s' is too long (max = %d)", + ErrLockAborted: "Wait on a lock was aborted due to a pending exclusive lock", + ErrDataOutOfRange: "%s value is out of range in '%s'", + ErrWrongSpvarTypeInLimit: "A variable of a non-integer based type in LIMIT clause", + ErrBinlogUnsafeMultipleEnginesAndSelfLoggingEngine: "Mixing self-logging and non-self-logging engines in a statement is unsafe.", + ErrBinlogUnsafeMixedStatement: "Statement accesses nontransactional table as well as transactional or temporary table, and writes to any of them.", + ErrInsideTransactionPreventsSwitchSQLLogBin: "Cannot modify @@session.sqlLogBin inside a transaction", + ErrStoredFunctionPreventsSwitchSQLLogBin: "Cannot change the sqlLogBin inside a stored function or trigger", + ErrFailedReadFromParFile: "Failed to read from the .par file", + ErrValuesIsNotIntType: "VALUES value for partition '%-.64s' must have type INT", + ErrAccessDeniedNoPassword: "Access denied for user '%-.48s'@'%-.64s'", + ErrSetPasswordAuthPlugin: "SET PASSWORD has no significance for users authenticating via plugins", + ErrGrantPluginUserExists: "GRANT with IDENTIFIED WITH is illegal because the user %-.*s already exists", + ErrTruncateIllegalFk: "Cannot truncate a table referenced in a foreign key constraint (%.192s)", + ErrPluginIsPermanent: "Plugin '%s' is forcePlusPermanent and can not be unloaded", + ErrSlaveHeartbeatValueOutOfRangeMin: "The requested value for the heartbeat period is less than 1 millisecond. The value is reset to 0, meaning that heartbeating will effectively be disabled.", + ErrSlaveHeartbeatValueOutOfRangeMax: "The requested value for the heartbeat period exceeds the value of `slaveNetTimeout' seconds. A sensible value for the period should be less than the timeout.", + ErrStmtCacheFull: "Multi-row statements required more than 'maxBinlogStmtCacheSize' bytes of storage; increase this mysqld variable and try again", + ErrMultiUpdateKeyConflict: "Primary key/partition key update is not allowed since the table is updated both as '%-.192s' and '%-.192s'.", + ErrTableNeedsRebuild: "Table rebuild required. Please do \"ALTER TABLE `%-.32s` FORCE\" or dump/reload to fix it!", + WarnOptionBelowLimit: "The value of '%s' should be no less than the value of '%s'", + ErrIndexColumnTooLong: "Index column size too large. The maximum column size is %d bytes.", + ErrErrorInTriggerBody: "Trigger '%-.64s' has an error in its body: '%-.256s'", + ErrErrorInUnknownTriggerBody: "Unknown trigger has an error in its body: '%-.256s'", + ErrIndexCorrupt: "Index %s is corrupted", + ErrUndoRecordTooBig: "Undo log record is too big.", + ErrBinlogUnsafeInsertIgnoreSelect: "INSERT IGNORE... SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are ignored. This order cannot be predicted and may differ on master and the slave.", + ErrBinlogUnsafeInsertSelectUpdate: "INSERT... SELECT... ON DUPLICATE KEY UPDATE is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are updated. This order cannot be predicted and may differ on master and the slave.", + ErrBinlogUnsafeReplaceSelect: "REPLACE... SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This order cannot be predicted and may differ on master and the slave.", + ErrBinlogUnsafeCreateIgnoreSelect: "CREATE... IGNORE SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are ignored. This order cannot be predicted and may differ on master and the slave.", + ErrBinlogUnsafeCreateReplaceSelect: "CREATE... REPLACE SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This order cannot be predicted and may differ on master and the slave.", + ErrBinlogUnsafeUpdateIgnore: "UPDATE IGNORE is unsafe because the order in which rows are updated determines which (if any) rows are ignored. This order cannot be predicted and may differ on master and the slave.", + ErrPluginNoUninstall: "Plugin '%s' is marked as not dynamically uninstallable. You have to stop the server to uninstall it.", + ErrPluginNoInstall: "Plugin '%s' is marked as not dynamically installable. You have to stop the server to install it.", + ErrBinlogUnsafeWriteAutoincSelect: "Statements writing to a table with an auto-increment column after selecting from another table are unsafe because the order in which rows are retrieved determines what (if any) rows will be written. This order cannot be predicted and may differ on master and the slave.", + ErrBinlogUnsafeCreateSelectAutoinc: "CREATE TABLE... SELECT... on a table with an auto-increment column is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are inserted. This order cannot be predicted and may differ on master and the slave.", + ErrBinlogUnsafeInsertTwoKeys: "INSERT... ON DUPLICATE KEY UPDATE on a table with more than one UNIQUE KEY is unsafe", + ErrTableInFkCheck: "Table is being used in foreign key check.", + ErrUnsupportedEngine: "Storage engine '%s' does not support system tables. [%s.%s]", + ErrBinlogUnsafeAutoincNotFirst: "INSERT into autoincrement field which is not the first part in the composed primary key is unsafe.", + ErrCannotLoadFromTableV2: "Cannot load from %s.%s. The table is probably corrupted", + ErrMasterDelayValueOutOfRange: "The requested value %d for the master delay exceeds the maximum %d", + ErrOnlyFdAndRbrEventsAllowedInBinlogStatement: "Only FormatDescriptionLogEvent and row events are allowed in BINLOG statements (but %s was provided)", + ErrPartitionExchangeDifferentOption: "Non matching attribute '%-.64s' between partition and table", + ErrPartitionExchangePartTable: "Table to exchange with partition is partitioned: '%-.64s'", + ErrPartitionExchangeTempTable: "Table to exchange with partition is temporary: '%-.64s'", + ErrPartitionInsteadOfSubpartition: "Subpartitioned table, use subpartition instead of partition", + ErrUnknownPartition: "Unknown partition '%-.64s' in table '%-.64s'", + ErrTablesDifferentMetadata: "Tables have different definitions", + ErrRowDoesNotMatchPartition: "Found a row that does not match the partition", + ErrBinlogCacheSizeGreaterThanMax: "Option binlogCacheSize (%d) is greater than maxBinlogCacheSize (%d); setting binlogCacheSize equal to maxBinlogCacheSize.", + ErrWarnIndexNotApplicable: "Cannot use %-.64s access on index '%-.64s' due to type or collation conversion on field '%-.64s'", + ErrPartitionExchangeForeignKey: "Table to exchange with partition has foreign key references: '%-.64s'", + ErrNoSuchKeyValue: "Key value '%-.192s' was not found in table '%-.192s.%-.192s'", + ErrRplInfoDataTooLong: "Data for column '%s' too long", + ErrNetworkReadEventChecksumFailure: "Replication event checksum verification failed while reading from network.", + ErrBinlogReadEventChecksumFailure: "Replication event checksum verification failed while reading from a log file.", + ErrBinlogStmtCacheSizeGreaterThanMax: "Option binlogStmtCacheSize (%d) is greater than maxBinlogStmtCacheSize (%d); setting binlogStmtCacheSize equal to maxBinlogStmtCacheSize.", + ErrCantUpdateTableInCreateTableSelect: "Can't update table '%-.192s' while '%-.192s' is being created.", + ErrPartitionClauseOnNonpartitioned: "PARTITION () clause on non partitioned table", + ErrRowDoesNotMatchGivenPartitionSet: "Found a row not matching the given partition set", + ErrNoSuchPartitionunused: "partition '%-.64s' doesn't exist", + ErrChangeRplInfoRepositoryFailure: "Failure while changing the type of replication repository: %s.", + ErrWarningNotCompleteRollbackWithCreatedTempTable: "The creation of some temporary tables could not be rolled back.", + ErrWarningNotCompleteRollbackWithDroppedTempTable: "Some temporary tables were dropped, but these operations could not be rolled back.", + ErrMtsFeatureIsNotSupported: "%s is not supported in multi-threaded slave mode. %s", + ErrMtsUpdatedDBsGreaterMax: "The number of modified databases exceeds the maximum %d; the database names will not be included in the replication event metadata.", + ErrMtsCantParallel: "Cannot execute the current event group in the parallel mode. Encountered event %s, relay-log name %s, position %s which prevents execution of this event group in parallel mode. Reason: %s.", + ErrMtsInconsistentData: "%s", + ErrFulltextNotSupportedWithPartitioning: "FULLTEXT index is not supported for partitioned tables.", + ErrDaInvalidConditionNumber: "Invalid condition number", + ErrInsecurePlainText: "Sending passwords in plain text without SSL/TLS is extremely insecure.", + ErrInsecureChangeMaster: "Storing MySQL user name or password information in the master.info repository is not secure and is therefore not recommended. Please see the MySQL Manual for more about this issue and possible alternatives.", + ErrForeignDuplicateKeyWithChildInfo: "Foreign key constraint for table '%.192s', record '%-.192s' would lead to a duplicate entry in table '%.192s', key '%.192s'", + ErrForeignDuplicateKeyWithoutChildInfo: "Foreign key constraint for table '%.192s', record '%-.192s' would lead to a duplicate entry in a child table", + ErrSQLthreadWithSecureSlave: "Setting authentication options is not possible when only the Slave SQL Thread is being started.", + ErrTableHasNoFt: "The table does not have FULLTEXT index to support this query", + ErrVariableNotSettableInSfOrTrigger: "The system variable %.200s cannot be set in stored functions or triggers.", + ErrVariableNotSettableInTransaction: "The system variable %.200s cannot be set when there is an ongoing transaction.", + ErrGtidNextIsNotInGtidNextList: "The system variable @@SESSION.GTIDNEXT has the value %.200s, which is not listed in @@SESSION.GTIDNEXTLIST.", + ErrCantChangeGtidNextInTransactionWhenGtidNextListIsNull: "When @@SESSION.GTIDNEXTLIST == NULL, the system variable @@SESSION.GTIDNEXT cannot change inside a transaction.", + ErrSetStatementCannotInvokeFunction: "The statement 'SET %.200s' cannot invoke a stored function.", + ErrGtidNextCantBeAutomaticIfGtidNextListIsNonNull: "The system variable @@SESSION.GTIDNEXT cannot be 'AUTOMATIC' when @@SESSION.GTIDNEXTLIST is non-NULL.", + ErrSkippingLoggedTransaction: "Skipping transaction %.200s because it has already been executed and logged.", + ErrMalformedGtidSetSpecification: "Malformed GTID set specification '%.200s'.", + ErrMalformedGtidSetEncoding: "Malformed GTID set encoding.", + ErrMalformedGtidSpecification: "Malformed GTID specification '%.200s'.", + ErrGnoExhausted: "Impossible to generate Global Transaction Identifier: the integer component reached the maximal value. Restart the server with a new serverUuid.", + ErrBadSlaveAutoPosition: "Parameters MASTERLOGFILE, MASTERLOGPOS, RELAYLOGFILE and RELAYLOGPOS cannot be set when MASTERAUTOPOSITION is active.", + ErrAutoPositionRequiresGtidModeOn: "CHANGE MASTER TO MASTERAUTOPOSITION = 1 can only be executed when @@GLOBAL.GTIDMODE = ON.", + ErrCantDoImplicitCommitInTrxWhenGtidNextIsSet: "Cannot execute statements with implicit commit inside a transaction when @@SESSION.GTIDNEXT != AUTOMATIC or @@SESSION.GTIDNEXTLIST != NULL.", + ErrGtidMode2Or3RequiresEnforceGtidConsistencyOn: "@@GLOBAL.GTIDMODE = ON or UPGRADESTEP2 requires @@GLOBAL.ENFORCEGTIDCONSISTENCY = 1.", + ErrGtidModeRequiresBinlog: "@@GLOBAL.GTIDMODE = ON or UPGRADESTEP1 or UPGRADESTEP2 requires --log-bin and --log-slave-updates.", + ErrCantSetGtidNextToGtidWhenGtidModeIsOff: "@@SESSION.GTIDNEXT cannot be set to UUID:NUMBER when @@GLOBAL.GTIDMODE = OFF.", + ErrCantSetGtidNextToAnonymousWhenGtidModeIsOn: "@@SESSION.GTIDNEXT cannot be set to ANONYMOUS when @@GLOBAL.GTIDMODE = ON.", + ErrCantSetGtidNextListToNonNullWhenGtidModeIsOff: "@@SESSION.GTIDNEXTLIST cannot be set to a non-NULL value when @@GLOBAL.GTIDMODE = OFF.", + ErrFoundGtidEventWhenGtidModeIsOff: "Found a GtidLogEvent or PreviousGtidsLogEvent when @@GLOBAL.GTIDMODE = OFF.", + ErrGtidUnsafeNonTransactionalTable: "When @@GLOBAL.ENFORCEGTIDCONSISTENCY = 1, updates to non-transactional tables can only be done in either autocommitted statements or single-statement transactions, and never in the same statement as updates to transactional tables.", + ErrGtidUnsafeCreateSelect: "CREATE TABLE ... SELECT is forbidden when @@GLOBAL.ENFORCEGTIDCONSISTENCY = 1.", + ErrGtidUnsafeCreateDropTemporaryTableInTransaction: "When @@GLOBAL.ENFORCEGTIDCONSISTENCY = 1, the statements CREATE TEMPORARY TABLE and DROP TEMPORARY TABLE can be executed in a non-transactional context only, and require that AUTOCOMMIT = 1.", + ErrGtidModeCanOnlyChangeOneStepAtATime: "The value of @@GLOBAL.GTIDMODE can only change one step at a time: OFF <-> UPGRADESTEP1 <-> UPGRADESTEP2 <-> ON. Also note that this value must be stepped up or down simultaneously on all servers; see the Manual for instructions.", + ErrMasterHasPurgedRequiredGtids: "The slave is connecting using CHANGE MASTER TO MASTERAUTOPOSITION = 1, but the master has purged binary logs containing GTIDs that the slave requires.", + ErrCantSetGtidNextWhenOwningGtid: "@@SESSION.GTIDNEXT cannot be changed by a client that owns a GTID. The client owns %s. Ownership is released on COMMIT or ROLLBACK.", + ErrUnknownExplainFormat: "Unknown EXPLAIN format name: '%s'", + ErrCantExecuteInReadOnlyTransaction: "Cannot execute statement in a READ ONLY transaction.", + ErrTooLongTablePartitionComment: "Comment for table partition '%-.64s' is too long (max = %d)", + ErrSlaveConfiguration: "Slave is not configured or failed to initialize properly. You must at least set --server-id to enable either a master or a slave. Additional error messages can be found in the MySQL error log.", + ErrInnodbFtLimit: "InnoDB presently supports one FULLTEXT index creation at a time", + ErrInnodbNoFtTempTable: "Cannot create FULLTEXT index on temporary InnoDB table", + ErrInnodbFtWrongDocidColumn: "Column '%-.192s' is of wrong type for an InnoDB FULLTEXT index", + ErrInnodbFtWrongDocidIndex: "Index '%-.192s' is of wrong type for an InnoDB FULLTEXT index", + ErrInnodbOnlineLogTooBig: "Creating index '%-.192s' required more than 'innodbOnlineAlterLogMaxSize' bytes of modification log. Please try again.", + ErrUnknownAlterAlgorithm: "Unknown ALGORITHM '%s'", + ErrUnknownAlterLock: "Unknown LOCK type '%s'", + ErrMtsChangeMasterCantRunWithGaps: "CHANGE MASTER cannot be executed when the slave was stopped with an error or killed in MTS mode. Consider using RESET SLAVE or START SLAVE UNTIL.", + ErrMtsRecoveryFailure: "Cannot recover after SLAVE errored out in parallel execution mode. Additional error messages can be found in the MySQL error log.", + ErrMtsResetWorkers: "Cannot clean up worker info tables. Additional error messages can be found in the MySQL error log.", + ErrColCountDoesntMatchCorruptedV2: "Column count of %s.%s is wrong. Expected %d, found %d. The table is probably corrupted", + ErrSlaveSilentRetryTransaction: "Slave must silently retry current transaction", + ErrDiscardFkChecksRunning: "There is a foreign key check running on table '%-.192s'. Cannot discard the table.", + ErrTableSchemaMismatch: "Schema mismatch (%s)", + ErrTableInSystemTablespace: "Table '%-.192s' in system tablespace", + ErrIoRead: "IO Read : (%d, %s) %s", + ErrIoWrite: "IO Write : (%d, %s) %s", + ErrTablespaceMissing: "Tablespace is missing for table '%-.192s'", + ErrTablespaceExists: "Tablespace for table '%-.192s' exists. Please DISCARD the tablespace before IMPORT.", + ErrTablespaceDiscarded: "Tablespace has been discarded for table '%-.192s'", + ErrInternal: "Internal : %s", + ErrInnodbImport: "ALTER TABLE '%-.192s' IMPORT TABLESPACE failed with error %d : '%s'", + ErrInnodbIndexCorrupt: "Index corrupt: %s", + ErrInvalidYearColumnLength: "Supports only YEAR or YEAR(4) column", + ErrNotValidPassword: "Your password does not satisfy the current policy requirements", + ErrMustChangePassword: "You must SET PASSWORD before executing this statement", + ErrFkNoIndexChild: "Failed to add the foreign key constaint. Missing index for constraint '%s' in the foreign table '%s'", + ErrFkNoIndexParent: "Failed to add the foreign key constaint. Missing index for constraint '%s' in the referenced table '%s'", + ErrFkFailAddSystem: "Failed to add the foreign key constraint '%s' to system tables", + ErrFkCannotOpenParent: "Failed to open the referenced table '%s'", + ErrFkIncorrectOption: "Failed to add the foreign key constraint on table '%s'. Incorrect options in FOREIGN KEY constraint '%s'", + ErrFkDupName: "Duplicate foreign key constraint name '%s'", + ErrPasswordFormat: "The password hash doesn't have the expected format. Check if the correct password algorithm is being used with the PASSWORD() function.", + ErrFkColumnCannotDrop: "Cannot drop column '%-.192s': needed in a foreign key constraint '%-.192s'", + ErrFkColumnCannotDropChild: "Cannot drop column '%-.192s': needed in a foreign key constraint '%-.192s' of table '%-.192s'", + ErrFkColumnNotNull: "Column '%-.192s' cannot be NOT NULL: needed in a foreign key constraint '%-.192s' SET NULL", + ErrDupIndex: "Duplicate index '%-.64s' defined on the table '%-.64s.%-.64s'. This is deprecated and will be disallowed in a future release.", + ErrFkColumnCannotChange: "Cannot change column '%-.192s': used in a foreign key constraint '%-.192s'", + ErrFkColumnCannotChangeChild: "Cannot change column '%-.192s': used in a foreign key constraint '%-.192s' of table '%-.192s'", + ErrFkCannotDeleteParent: "Cannot delete rows from table which is parent in a foreign key constraint '%-.192s' of table '%-.192s'", + ErrMalformedPacket: "Malformed communication packet.", + ErrReadOnlyMode: "Running in read-only mode", + ErrGtidNextTypeUndefinedGroup: "When @@SESSION.GTIDNEXT is set to a GTID, you must explicitly set it again after a COMMIT or ROLLBACK. If you see this error message in the slave SQL thread, it means that a table in the current transaction is transactional on the master and non-transactional on the slave. In a client connection, it means that you executed SET @@SESSION.GTIDNEXT before a transaction and forgot to set @@SESSION.GTIDNEXT to a different identifier or to 'AUTOMATIC' after COMMIT or ROLLBACK. Current @@SESSION.GTIDNEXT is '%s'.", + ErrVariableNotSettableInSp: "The system variable %.200s cannot be set in stored procedures.", + ErrCantSetGtidPurgedWhenGtidModeIsOff: "@@GLOBAL.GTIDPURGED can only be set when @@GLOBAL.GTIDMODE = ON.", + ErrCantSetGtidPurgedWhenGtidExecutedIsNotEmpty: "@@GLOBAL.GTIDPURGED can only be set when @@GLOBAL.GTIDEXECUTED is empty.", + ErrCantSetGtidPurgedWhenOwnedGtidsIsNotEmpty: "@@GLOBAL.GTIDPURGED can only be set when there are no ongoing transactions (not even in other clients).", + ErrGtidPurgedWasChanged: "@@GLOBAL.GTIDPURGED was changed from '%s' to '%s'.", + ErrGtidExecutedWasChanged: "@@GLOBAL.GTIDEXECUTED was changed from '%s' to '%s'.", + ErrBinlogStmtModeAndNoReplTables: "Cannot execute statement: impossible to write to binary log since BINLOGFORMAT = STATEMENT, and both replicated and non replicated tables are written to.", + ErrAlterOperationNotSupported: "%s is not supported for this operation. Try %s.", + ErrAlterOperationNotSupportedReason: "%s is not supported. Reason: %s. Try %s.", + ErrAlterOperationNotSupportedReasonCopy: "COPY algorithm requires a lock", + ErrAlterOperationNotSupportedReasonPartition: "Partition specific operations do not yet support LOCK/ALGORITHM", + ErrAlterOperationNotSupportedReasonFkRename: "Columns participating in a foreign key are renamed", + ErrAlterOperationNotSupportedReasonColumnType: "Cannot change column type INPLACE", + ErrAlterOperationNotSupportedReasonFkCheck: "Adding foreign keys needs foreignKeyChecks=OFF", + ErrAlterOperationNotSupportedReasonIgnore: "Creating unique indexes with IGNORE requires COPY algorithm to remove duplicate rows", + ErrAlterOperationNotSupportedReasonNopk: "Dropping a primary key is not allowed without also adding a new primary key", + ErrAlterOperationNotSupportedReasonAutoinc: "Adding an auto-increment column requires a lock", + ErrAlterOperationNotSupportedReasonHiddenFts: "Cannot replace hidden FTSDOCID with a user-visible one", + ErrAlterOperationNotSupportedReasonChangeFts: "Cannot drop or rename FTSDOCID", + ErrAlterOperationNotSupportedReasonFts: "Fulltext index creation requires a lock", + ErrSQLSlaveSkipCounterNotSettableInGtidMode: "sqlSlaveSkipCounter can not be set when the server is running with @@GLOBAL.GTIDMODE = ON. Instead, for each transaction that you want to skip, generate an empty transaction with the same GTID as the transaction", + ErrDupUnknownInIndex: "Duplicate entry for key '%-.192s'", + ErrIdentCausesTooLongPath: "Long database name and identifier for object resulted in path length exceeding %d characters. Path: '%s'.", + ErrAlterOperationNotSupportedReasonNotNull: "cannot silently convert NULL values, as required in this SQLMODE", + ErrMustChangePasswordLogin: "Your password has expired. To log in you must change it using a client that supports expired passwords.", + ErrRowInWrongPartition: "Found a row in wrong partition %s", + ErrGeneratedColumnFunctionIsNotAllowed: "Expression of generated column '%s' contains a disallowed function.", + ErrUnsupportedAlterInplaceOnVirtualColumn: "INPLACE ADD or DROP of virtual columns cannot be combined with other ALTER TABLE actions.", + ErrWrongFKOptionForGeneratedColumn: "Cannot define foreign key with %s clause on a generated column.", + ErrBadGeneratedColumn: "The value specified for generated column '%s' in table '%s' is not allowed.", + ErrUnsupportedOnGeneratedColumn: "'%s' is not supported for generated columns.", + ErrGeneratedColumnNonPrior: "Generated column can refer only to generated columns defined prior to it.", + ErrDependentByGeneratedColumn: "Column '%s' has a generated column dependency.", + ErrGeneratedColumnRefAutoInc: "Generated column '%s' cannot refer to auto-increment column.", + ErrInvalidFieldSize: "Invalid size for column '%s'.", + ErrIncorrectType: "Incorrect type for argument %s in function %s.", + ErrInvalidJSONData: "Invalid JSON data provided to function %s: %s", + ErrInvalidJSONText: "Invalid JSON text: %-.192s", + ErrInvalidJSONPath: "Invalid JSON path expression %s.", + ErrInvalidTypeForJSON: "Invalid data type for JSON data in argument %d to function %s; a JSON string or JSON type is required.", + ErrInvalidJSONPathWildcard: "In this situation, path expressions may not contain the * and ** tokens.", + ErrInvalidJSONContainsPathType: "The second argument can only be either 'one' or 'all'.", + ErrJSONUsedAsKey: "JSON column '%-.192s' cannot be used in key specification.", + ErrBadUser: "User %s does not exist.", + ErrUserAlreadyExists: "User %s already exists.", + ErrInvalidJSONPathArrayCell: "A path expression is not a path to a cell in an array.", + ErrInvalidEncryptionOption: "Invalid encryption option.", + ErrWindowNoSuchWindow: "Window name '%s' is not defined.", + ErrWindowCircularityInWindowGraph: "There is a circularity in the window dependency graph.", + ErrWindowNoChildPartitioning: "A window which depends on another cannot define partitioning.", + ErrWindowNoInherentFrame: "Window '%s' has a frame definition, so cannot be referenced by another window.", + ErrWindowNoRedefineOrderBy: "Window '%s' cannot inherit '%s' since both contain an ORDER BY clause.", + ErrWindowFrameStartIllegal: "Window '%s': frame start cannot be UNBOUNDED FOLLOWING.", + ErrWindowFrameEndIllegal: "Window '%s': frame end cannot be UNBOUNDED PRECEDING.", + ErrWindowFrameIllegal: "Window '%s': frame start or end is negative, NULL or of non-integral type", + ErrWindowRangeFrameOrderType: "Window '%s' with RANGE N PRECEDING/FOLLOWING frame requires exactly one ORDER BY expression, of numeric or temporal type", + ErrWindowRangeFrameTemporalType: "Window '%s' with RANGE frame has ORDER BY expression of datetime type. Only INTERVAL bound value allowed.", + ErrWindowRangeFrameNumericType: "Window '%s' with RANGE frame has ORDER BY expression of numeric type, INTERVAL bound value not allowed.", + ErrWindowRangeBoundNotConstant: "Window '%s' has a non-constant frame bound.", + ErrWindowDuplicateName: "Window '%s' is defined twice.", + ErrWindowIllegalOrderBy: "Window '%s': ORDER BY or PARTITION BY uses legacy position indication which is not supported, use expression.", + ErrWindowInvalidWindowFuncUse: "You cannot use the window function '%s' in this context.'", + ErrWindowInvalidWindowFuncAliasUse: "You cannot use the alias '%s' of an expression containing a window function in this context.'", + ErrWindowNestedWindowFuncUseInWindowSpec: "You cannot nest a window function in the specification of window '%s'.", + ErrWindowRowsIntervalUse: "Window '%s': INTERVAL can only be used with RANGE frames.", + ErrWindowNoGroupOrderUnused: "ASC or DESC with GROUP BY isn't allowed with window functions; put ASC or DESC in ORDER BY", + ErrWindowExplainJson: "To get information about window functions use EXPLAIN FORMAT=JSON", + ErrWindowFunctionIgnoresFrame: "Window function '%s' ignores the frame clause of window '%s' and aggregates over the whole partition", + ErrRoleNotGranted: "%s is is not granted to %s", + ErrMaxExecTimeExceeded: "Query execution was interrupted, max_execution_time exceeded.", + ErrLockAcquireFailAndNoWaitSet: "Statement aborted because lock(s) could not be acquired immediately and NOWAIT is set.", + ErrDataTruncatedFunctionalIndex: "Data truncated for functional index '%s' at row %d", + ErrDataOutOfRangeFunctionalIndex: "Value is out of range for functional index '%s' at row %d", + ErrFunctionalIndexOnJsonOrGeometryFunction: "Cannot create a functional index on a function that returns a JSON or GEOMETRY value", + ErrFunctionalIndexRefAutoIncrement: "Functional index '%s' cannot refer to an auto-increment column", + ErrCannotDropColumnFunctionalIndex: "Cannot drop column '%s' because it is used by a functional index. In order to drop the column, you must remove the functional index", + ErrFunctionalIndexPrimaryKey: "The primary key cannot be a functional index", + ErrFunctionalIndexOnLob: "Cannot create a functional index on an expression that returns a BLOB or TEXT. Please consider using CAST", + ErrFunctionalIndexFunctionIsNotAllowed: "Expression of functional index '%s' contains a disallowed function", + ErrFulltextFunctionalIndex: "Fulltext functional index is not supported", + ErrSpatialFunctionalIndex: "Spatial functional index is not supported", + ErrWrongKeyColumnFunctionalIndex: "The used storage engine cannot index the expression '%s'", + ErrFunctionalIndexOnField: "Functional index on a column is not supported. Consider using a regular index instead", + ErrFKIncompatibleColumns: "Referencing column '%s' in foreign key constraint '%s' are incompatible", + ErrFunctionalIndexRowValueIsNotAllowed: "Expression of functional index '%s' cannot refer to a row value", + ErrDependentByFunctionalIndex: "Column '%s' has a functional index dependency and cannot be dropped or renamed", + ErrInvalidJsonValueForFuncIndex: "Invalid JSON value for CAST for functional index '%s'", + ErrJsonValueOutOfRangeForFuncIndex: "Out of range JSON value for CAST for functional index '%s'", + ErrFunctionalIndexDataIsTooLong: "Data too long for functional index '%s'", + ErrFunctionalIndexNotApplicable: "Cannot use functional index '%s' due to type or collation conversion", + + // MariaDB errors. + ErrOnlyOneDefaultPartionAllowed: "Only one DEFAULT partition allowed", + ErrWrongPartitionTypeExpectedSystemTime: "Wrong partitioning type, expected type: `SYSTEM_TIME`", + ErrSystemVersioningWrongPartitions: "Wrong Partitions: must have at least one HISTORY and exactly one last CURRENT", + + // TiDB errors. + ErrMemExceedThreshold: "%s holds %dB memory, exceeds threshold %dB.%s", + ErrForUpdateCantRetry: "[%d] can not retry select for update statement", + ErrAdminCheckTable: "TiDB admin check table failed.", + ErrTxnTooLarge: "Transaction is too large, size: %d", + ErrWriteConflictInTiDB: "Write conflict, txnStartTS %d is stale", + ErrInvalidPluginID: "Wrong plugin id: %s, valid plugin id is [name]-[version], both name and version should not contain '-'", + ErrInvalidPluginManifest: "Cannot read plugin %s's manifest", + ErrInvalidPluginName: "Plugin load with %s but got wrong name %s", + ErrInvalidPluginVersion: "Plugin load with %s but got %s", + ErrDuplicatePlugin: "Plugin [%s] is redeclared", + ErrInvalidPluginSysVarName: "Plugin %s's sysVar %s must start with its plugin name %s", + ErrRequireVersionCheckFail: "Plugin %s require %s be %v but got %v", + ErrUnsupportedReloadPlugin: "Plugin %s isn't loaded so cannot be reloaded", + ErrUnsupportedReloadPluginVar: "Reload plugin with different sysVar is unsupported %v", + ErrTableLocked: "Table '%s' was locked in %s by %v", + ErrNotExist: "Error: key not exist", + ErrTxnRetryable: "Error: KV error safe to retry %s ", + ErrCannotSetNilValue: "can not set nil value", + ErrInvalidTxn: "invalid transaction", + ErrEntryTooLarge: "entry too large, the max entry size is %d, the size of data is %d", + ErrNotImplemented: "not implemented", + ErrInfoSchemaExpired: "Information schema is out of date: schema failed to update in 1 lease, please make sure TiDB can connect to TiKV", + ErrInfoSchemaChanged: "Information schema is changed during the execution of the statement(for example, table definition may be updated by other DDL ran in parallel). If you see this error often, try increasing `tidb_max_delta_schema_count`", + ErrBadNumber: "Bad Number", + ErrCastAsSignedOverflow: "Cast to signed converted positive out-of-range integer to it's negative complement", + ErrCastNegIntAsUnsigned: "Cast to unsigned converted negative integer to it's positive complement", + ErrInvalidYearFormat: "invalid year format", + ErrInvalidYear: "invalid year", + ErrIncorrectDatetimeValue: "Incorrect datetime value: '%s'", + ErrInvalidTimeFormat: "invalid time format: '%v'", + ErrInvalidWeekModeFormat: "invalid week mode format: '%v'", + ErrFieldGetDefaultFailed: "Field '%s' get default value fail", + ErrIndexOutBound: "Index column %s offset out of bound, offset: %d, row: %v", + ErrUnsupportedOp: "operation not supported", + ErrRowNotFound: "can not find the row: %s", + ErrTableStateCantNone: "table %s can't be in none state", + ErrColumnStateCantNone: "column %s can't be in none state", + ErrColumnStateNonPublic: "can not use non-public column", + ErrIndexStateCantNone: "index %s can't be in none state", + ErrInvalidRecordKey: "invalid record key", + ErrUnsupportedValueForVar: "variable '%s' does not yet support value: %s", + ErrUnsupportedIsolationLevel: "The isolation level '%s' is not supported. Set tidb_skip_isolation_level_check=1 to skip this error", + ErrInvalidDDLWorker: "Invalid DDL worker", + ErrUnsupportedDDLOperation: "Unsupported %s", + ErrNotOwner: "TiDB server is not a DDL owner", + ErrCantDecodeIndex: "Cannot decode index value, because %s", + ErrInvalidDDLJob: "Invalid DDL job", + ErrInvalidDDLJobFlag: "Invalid DDL job flag", + ErrWaitReorgTimeout: "Timeout waiting for data reorganization", + ErrInvalidStoreVersion: "Invalid storage current version: %d", + ErrUnknownTypeLength: "Unknown length for type %d", + ErrUnknownFractionLength: "Unknown length for type %d and fraction %d", + ErrInvalidDDLJobVersion: "Version %d of DDL job is greater than current one: %d", + ErrInvalidSplitRegionRanges: "Failed to split region ranges", + ErrReorgPanic: "Reorg worker panic", + ErrInvalidDDLState: "Invalid %s state: %v", + ErrCancelledDDLJob: "Cancelled DDL job", + ErrRepairTable: "Failed to repair table: %s", + ErrLoadPrivilege: "Load privilege table fail: %s", + ErrInvalidPrivilegeType: "unknown privilege type %s", + ErrUnknownFieldType: "unknown field type", + ErrInvalidSequence: "invalid sequence", + ErrInvalidType: "invalid type", + ErrCantGetValidID: "cannot get valid auto-increment id in retry", + ErrCantSetToNull: "cannot set variable to null", + ErrSnapshotTooOld: "snapshot is older than GC safe point %s", + ErrInvalidTableID: "invalid TableID", + ErrInvalidAutoRandom: "Invalid auto random: %s", + ErrInvalidHashKeyFlag: "invalid encoded hash key flag", + ErrInvalidListIndex: "invalid list index", + ErrInvalidListMetaData: "invalid list meta data", + ErrWriteOnSnapshot: "write on snapshot", + ErrInvalidKey: "invalid key", + ErrInvalidIndexKey: "invalid index key", + ErrDataInConsistent: "data isn't equal", + ErrDDLJobNotFound: "DDL Job:%v not found", + ErrCancelFinishedDDLJob: "This job:%v is finished, so can't be cancelled", + ErrCannotCancelDDLJob: "This job:%v is almost finished, can't be cancelled now", + + ErrUnsupportedType: "Unsupported type %T", + ErrAnalyzeMissIndex: "Index '%s' in field list does not exist in table '%s'", + ErrCartesianProductUnsupported: "Cartesian product is unsupported", + ErrPreparedStmtNotFound: "Prepared statement not found", + ErrWrongParamCount: "Wrong parameter count", + ErrSchemaChanged: "Schema has changed", + ErrUnknownPlan: "Unknown plan", + ErrPrepareMulti: "Can not prepare multiple statements", + ErrPrepareDDL: "Can not prepare DDL statements with parameters", + ErrResultIsEmpty: "Result is empty", + ErrBuildExecutor: "Failed to build executor", + ErrBatchInsertFail: "Batch insert failed, please clean the table and try again.", + ErrGetStartTS: "Can not get start ts", + ErrPrivilegeCheckFail: "privilege check fail", // this error message should begin lowercased to be compatible with the test + ErrInvalidWildCard: "Wildcard fields without any table name appears in wrong place", + ErrMixOfGroupFuncAndFieldsIncompatible: "In aggregated query without GROUP BY, expression #%d of SELECT list contains nonaggregated column '%s'; this is incompatible with sql_mode=only_full_group_by", + + // TiKV/PD errors. + ErrPDServerTimeout: "PD server timeout", + ErrTiKVServerTimeout: "TiKV server timeout", + ErrTiKVServerBusy: "TiKV server is busy", + ErrResolveLockTimeout: "Resolve lock timeout", + ErrRegionUnavailable: "Region is unavailable", + ErrGCTooEarly: "GC life time is shorter than transaction duration, transaction starts at %v, GC safe point is %v", + ErrWriteConflict: "Write conflict, txnStartTS=%d, conflictStartTS=%d, conflictCommitTS=%d, key=%s", + ErrTiKVStoreLimit: "Store token is up to the limit, store id = %d", +} diff --git a/parser/mysql/error.go b/parser/mysql/error.go new file mode 100644 index 0000000..fd6316c --- /dev/null +++ b/parser/mysql/error.go @@ -0,0 +1,71 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mysql + +import ( + "errors" + "fmt" +) + +// Portable analogs of some common call errors. +var ( + ErrBadConn = errors.New("connection was bad") + ErrMalformPacket = errors.New("malform packet error") +) + +// SQLError records an error information, from executing SQL. +type SQLError struct { + Code uint16 + Message string + State string +} + +// Error prints errors, with a formatted string. +func (e *SQLError) Error() string { + return fmt.Sprintf("ERROR %d (%s): %s", e.Code, e.State, e.Message) +} + +// NewErr generates a SQL error, with an error code and default format specifier defined in MySQLErrName. +func NewErr(errCode uint16, args ...interface{}) *SQLError { + e := &SQLError{Code: errCode} + + if s, ok := MySQLState[errCode]; ok { + e.State = s + } else { + e.State = DefaultMySQLState + } + + if format, ok := MySQLErrName[errCode]; ok { + e.Message = fmt.Sprintf(format, args...) + } else { + e.Message = fmt.Sprint(args...) + } + + return e +} + +// NewErrf creates a SQL error, with an error code and a format specifier. +func NewErrf(errCode uint16, format string, args ...interface{}) *SQLError { + e := &SQLError{Code: errCode} + + if s, ok := MySQLState[errCode]; ok { + e.State = s + } else { + e.State = DefaultMySQLState + } + + e.Message = fmt.Sprintf(format, args...) + + return e +} diff --git a/parser/mysql/error_test.go b/parser/mysql/error_test.go new file mode 100644 index 0000000..a57c548 --- /dev/null +++ b/parser/mysql/error_test.go @@ -0,0 +1,37 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mysql + +import ( + . "github.com/pingcap/check" +) + +var _ = Suite(&testSQLErrorSuite{}) + +type testSQLErrorSuite struct { +} + +func (s *testSQLErrorSuite) TestSQLError(c *C) { + e := NewErrf(ErrNoDB, "no db error") + c.Assert(len(e.Error()), Greater, 0) + + e = NewErrf(0, "customized error") + c.Assert(len(e.Error()), Greater, 0) + + e = NewErr(ErrNoDB) + c.Assert(len(e.Error()), Greater, 0) + + e = NewErr(0, "customized error") + c.Assert(len(e.Error()), Greater, 0) +} diff --git a/parser/mysql/locale_format.go b/parser/mysql/locale_format.go new file mode 100644 index 0000000..b483f94 --- /dev/null +++ b/parser/mysql/locale_format.go @@ -0,0 +1,98 @@ +package mysql + +import ( + "bytes" + "strconv" + "strings" + "unicode" + + "github.com/pingcap/errors" +) + +func formatENUS(number string, precision string) (string, error) { + var buffer bytes.Buffer + if unicode.IsDigit(rune(precision[0])) { + for i, v := range precision { + if unicode.IsDigit(v) { + continue + } + precision = precision[:i] + break + } + } else { + precision = "0" + } + if number[0] == '-' && number[1] == '.' { + number = strings.Replace(number, "-", "-0", 1) + } else if number[0] == '.' { + number = strings.Replace(number, ".", "0.", 1) + } + + if (number[:1] == "-" && !unicode.IsDigit(rune(number[1]))) || + (!unicode.IsDigit(rune(number[0])) && number[:1] != "-") { + buffer.Write([]byte{'0'}) + position, err := strconv.ParseUint(precision, 10, 64) + if err == nil && position > 0 { + buffer.Write([]byte{'.'}) + buffer.WriteString(strings.Repeat("0", int(position))) + } + return buffer.String(), nil + } else if number[:1] == "-" { + buffer.Write([]byte{'-'}) + number = number[1:] + } + + for i, v := range number { + if unicode.IsDigit(v) { + continue + } else if i == 1 && number[1] == '.' { + continue + } else if v == '.' && number[1] != '.' { + continue + } else { + number = number[:i] + break + } + } + + comma := []byte{','} + parts := strings.Split(number, ".") + pos := 0 + if len(parts[0])%3 != 0 { + pos += len(parts[0]) % 3 + buffer.WriteString(parts[0][:pos]) + buffer.Write(comma) + } + for ; pos < len(parts[0]); pos += 3 { + buffer.WriteString(parts[0][pos : pos+3]) + buffer.Write(comma) + } + buffer.Truncate(buffer.Len() - 1) + + position, err := strconv.ParseUint(precision, 10, 64) + if err == nil { + if position > 0 { + buffer.Write([]byte{'.'}) + if len(parts) == 2 { + if uint64(len(parts[1])) >= position { + buffer.WriteString(parts[1][:position]) + } else { + buffer.WriteString(parts[1]) + buffer.WriteString(strings.Repeat("0", int(position)-len(parts[1]))) + } + } else { + buffer.WriteString(strings.Repeat("0", int(position))) + } + } + } + + return buffer.String(), nil +} + +func formatZHCN(number string, precision string) (string, error) { + return "", errors.New("not implemented") +} + +func formatNotSupport(number string, precision string) (string, error) { + return "", errors.New("not support for the specific locale") +} diff --git a/parser/mysql/state.go b/parser/mysql/state.go new file mode 100644 index 0000000..b3e8252 --- /dev/null +++ b/parser/mysql/state.go @@ -0,0 +1,259 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mysql + +const ( + // DefaultMySQLState is default state of the mySQL + DefaultMySQLState = "HY000" +) + +// MySQLState maps error code to MySQL SQLSTATE value. +// The values are taken from ANSI SQL and ODBC and are more standardized. +var MySQLState = map[uint16]string{ + ErrDupKey: "23000", + ErrOutofMemory: "HY001", + ErrOutOfSortMemory: "HY001", + ErrConCount: "08004", + ErrBadHost: "08S01", + ErrHandshake: "08S01", + ErrDBaccessDenied: "42000", + ErrAccessDenied: "28000", + ErrNoDB: "3D000", + ErrUnknownCom: "08S01", + ErrBadNull: "23000", + ErrBadDB: "42000", + ErrTableExists: "42S01", + ErrBadTable: "42S02", + ErrNonUniq: "23000", + ErrServerShutdown: "08S01", + ErrBadField: "42S22", + ErrFieldNotInGroupBy: "42000", + ErrWrongSumSelect: "42000", + ErrWrongGroupField: "42000", + ErrWrongValueCount: "21S01", + ErrTooLongIdent: "42000", + ErrDupFieldName: "42S21", + ErrDupKeyName: "42000", + ErrDupEntry: "23000", + ErrWrongFieldSpec: "42000", + ErrParse: "42000", + ErrEmptyQuery: "42000", + ErrNonuniqTable: "42000", + ErrInvalidDefault: "42000", + ErrMultiplePriKey: "42000", + ErrTooManyKeys: "42000", + ErrTooManyKeyParts: "42000", + ErrTooLongKey: "42000", + ErrKeyColumnDoesNotExits: "42000", + ErrBlobUsedAsKey: "42000", + ErrTooBigFieldlength: "42000", + ErrWrongAutoKey: "42000", + ErrForcingClose: "08S01", + ErrIpsock: "08S01", + ErrNoSuchIndex: "42S12", + ErrWrongFieldTerminators: "42000", + ErrBlobsAndNoTerminated: "42000", + ErrCantRemoveAllFields: "42000", + ErrCantDropFieldOrKey: "42000", + ErrBlobCantHaveDefault: "42000", + ErrWrongDBName: "42000", + ErrWrongTableName: "42000", + ErrTooBigSelect: "42000", + ErrUnknownProcedure: "42000", + ErrWrongParamcountToProcedure: "42000", + ErrUnknownTable: "42S02", + ErrFieldSpecifiedTwice: "42000", + ErrUnsupportedExtension: "42000", + ErrTableMustHaveColumns: "42000", + ErrUnknownCharacterSet: "42000", + ErrTooBigRowsize: "42000", + ErrWrongOuterJoin: "42000", + ErrNullColumnInIndex: "42000", + ErrPasswordAnonymousUser: "42000", + ErrPasswordNotAllowed: "42000", + ErrPasswordNoMatch: "42000", + ErrWrongValueCountOnRow: "21S01", + ErrInvalidUseOfNull: "22004", + ErrRegexp: "42000", + ErrMixOfGroupFuncAndFields: "42000", + ErrNonexistingGrant: "42000", + ErrTableaccessDenied: "42000", + ErrColumnaccessDenied: "42000", + ErrIllegalGrantForTable: "42000", + ErrGrantWrongHostOrUser: "42000", + ErrNoSuchTable: "42S02", + ErrNonexistingTableGrant: "42000", + ErrNotAllowedCommand: "42000", + ErrSyntax: "42000", + ErrAbortingConnection: "08S01", + ErrNetPacketTooLarge: "08S01", + ErrNetReadErrorFromPipe: "08S01", + ErrNetFcntl: "08S01", + ErrNetPacketsOutOfOrder: "08S01", + ErrNetUncompress: "08S01", + ErrNetRead: "08S01", + ErrNetReadInterrupted: "08S01", + ErrNetErrorOnWrite: "08S01", + ErrNetWriteInterrupted: "08S01", + ErrTooLongString: "42000", + ErrTableCantHandleBlob: "42000", + ErrTableCantHandleAutoIncrement: "42000", + ErrWrongColumnName: "42000", + ErrWrongKeyColumn: "42000", + ErrDupUnique: "23000", + ErrBlobKeyWithoutLength: "42000", + ErrPrimaryCantHaveNull: "42000", + ErrTooManyRows: "42000", + ErrRequiresPrimaryKey: "42000", + ErrKeyDoesNotExist: "42000", + ErrCheckNoSuchTable: "42000", + ErrCheckNotImplemented: "42000", + ErrCantDoThisDuringAnTransaction: "25000", + ErrNewAbortingConnection: "08S01", + ErrMasterNetRead: "08S01", + ErrMasterNetWrite: "08S01", + ErrTooManyUserConnections: "42000", + ErrReadOnlyTransaction: "25000", + ErrNoPermissionToCreateUser: "42000", + ErrLockDeadlock: "40001", + ErrNoReferencedRow: "23000", + ErrRowIsReferenced: "23000", + ErrConnectToMaster: "08S01", + ErrWrongNumberOfColumnsInSelect: "21000", + ErrUserLimitReached: "42000", + ErrSpecificAccessDenied: "42000", + ErrNoDefault: "42000", + ErrWrongValueForVar: "42000", + ErrWrongTypeForVar: "42000", + ErrCantUseOptionHere: "42000", + ErrNotSupportedYet: "42000", + ErrWrongFkDef: "42000", + ErrOperandColumns: "21000", + ErrSubqueryNo1Row: "21000", + ErrIllegalReference: "42S22", + ErrDerivedMustHaveAlias: "42000", + ErrSelectReduced: "01000", + ErrTablenameNotAllowedHere: "42000", + ErrNotSupportedAuthMode: "08004", + ErrSpatialCantHaveNull: "42000", + ErrCollationCharsetMismatch: "42000", + ErrWarnTooFewRecords: "01000", + ErrWarnTooManyRecords: "01000", + ErrWarnNullToNotnull: "22004", + ErrWarnDataOutOfRange: "22003", + WarnDataTruncated: "01000", + ErrWrongNameForIndex: "42000", + ErrWrongNameForCatalog: "42000", + ErrUnknownStorageEngine: "42000", + ErrTruncatedWrongValue: "22007", + ErrSpNoRecursiveCreate: "2F003", + ErrSpAlreadyExists: "42000", + ErrSpDoesNotExist: "42000", + ErrSpLilabelMismatch: "42000", + ErrSpLabelRedefine: "42000", + ErrSpLabelMismatch: "42000", + ErrSpUninitVar: "01000", + ErrSpBadselect: "0A000", + ErrSpBadreturn: "42000", + ErrSpBadstatement: "0A000", + ErrUpdateLogDeprecatedIgnored: "42000", + ErrUpdateLogDeprecatedTranslated: "42000", + ErrQueryInterrupted: "70100", + ErrSpWrongNoOfArgs: "42000", + ErrSpCondMismatch: "42000", + ErrSpNoreturn: "42000", + ErrSpNoreturnend: "2F005", + ErrSpBadCursorQuery: "42000", + ErrSpBadCursorSelect: "42000", + ErrSpCursorMismatch: "42000", + ErrSpCursorAlreadyOpen: "24000", + ErrSpCursorNotOpen: "24000", + ErrSpUndeclaredVar: "42000", + ErrSpFetchNoData: "02000", + ErrSpDupParam: "42000", + ErrSpDupVar: "42000", + ErrSpDupCond: "42000", + ErrSpDupCurs: "42000", + ErrSpSubselectNyi: "0A000", + ErrStmtNotAllowedInSfOrTrg: "0A000", + ErrSpVarcondAfterCurshndlr: "42000", + ErrSpCursorAfterHandler: "42000", + ErrSpCaseNotFound: "20000", + ErrDivisionByZero: "22012", + ErrIllegalValueForType: "22007", + ErrProcaccessDenied: "42000", + ErrXaerNota: "XAE04", + ErrXaerInval: "XAE05", + ErrXaerRmfail: "XAE07", + ErrXaerOutside: "XAE09", + ErrXaerRmerr: "XAE03", + ErrXaRbrollback: "XA100", + ErrNonexistingProcGrant: "42000", + ErrDataTooLong: "22001", + ErrSpBadSQLstate: "42000", + ErrCantCreateUserWithGrant: "42000", + ErrSpDupHandler: "42000", + ErrSpNotVarArg: "42000", + ErrSpNoRetset: "0A000", + ErrCantCreateGeometryObject: "22003", + ErrTooBigScale: "42000", + ErrTooBigPrecision: "42000", + ErrMBiggerThanD: "42000", + ErrTooLongBody: "42000", + ErrTooBigDisplaywidth: "42000", + ErrXaerDupid: "XAE08", + ErrDatetimeFunctionOverflow: "22008", + ErrRowIsReferenced2: "23000", + ErrNoReferencedRow2: "23000", + ErrSpBadVarShadow: "42000", + ErrSpWrongName: "42000", + ErrSpNoAggregate: "42000", + ErrMaxPreparedStmtCountReached: "42000", + ErrNonGroupingFieldUsed: "42000", + ErrForeignDuplicateKeyOldUnused: "23000", + ErrCantChangeTxCharacteristics: "25001", + ErrWrongParamcountToNativeFct: "42000", + ErrWrongParametersToNativeFct: "42000", + ErrWrongParametersToStoredFct: "42000", + ErrDupEntryWithKeyName: "23000", + ErrXaRbtimeout: "XA106", + ErrXaRbdeadlock: "XA102", + ErrFuncInexistentNameCollision: "42000", + ErrDupSignalSet: "42000", + ErrSignalWarn: "01000", + ErrSignalNotFound: "02000", + ErrSignalException: "HY000", + ErrResignalWithoutActiveHandler: "0K000", + ErrSpatialMustHaveGeomCol: "42000", + ErrDataOutOfRange: "22003", + ErrAccessDeniedNoPassword: "28000", + ErrTruncateIllegalFk: "42000", + ErrDaInvalidConditionNumber: "35000", + ErrForeignDuplicateKeyWithChildInfo: "23000", + ErrForeignDuplicateKeyWithoutChildInfo: "23000", + ErrCantExecuteInReadOnlyTransaction: "25006", + ErrAlterOperationNotSupported: "0A000", + ErrAlterOperationNotSupportedReason: "0A000", + ErrDupUnknownInIndex: "23000", + ErrBadGeneratedColumn: "HY000", + ErrUnsupportedOnGeneratedColumn: "HY000", + ErrGeneratedColumnNonPrior: "HY000", + ErrDependentByGeneratedColumn: "HY000", + ErrInvalidJSONText: "22032", + ErrInvalidJSONPath: "42000", + ErrInvalidJSONData: "22032", + ErrInvalidJSONPathWildcard: "42000", + ErrJSONUsedAsKey: "42000", + ErrInvalidJSONPathArrayCell: "42000", +} diff --git a/parser/mysql/type.go b/parser/mysql/type.go new file mode 100644 index 0000000..9ea32f9 --- /dev/null +++ b/parser/mysql/type.go @@ -0,0 +1,155 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mysql + +// MySQL type information. +const ( + TypeDecimal byte = 0 + TypeTiny byte = 1 + TypeShort byte = 2 + TypeLong byte = 3 + TypeFloat byte = 4 + TypeDouble byte = 5 + TypeNull byte = 6 + TypeTimestamp byte = 7 + TypeLonglong byte = 8 + TypeInt24 byte = 9 + TypeDate byte = 10 + /* TypeDuration original name was TypeTime, renamed to TypeDuration to resolve the conflict with Go type Time.*/ + TypeDuration byte = 11 + TypeDatetime byte = 12 + TypeYear byte = 13 + TypeNewDate byte = 14 + TypeVarchar byte = 15 + TypeBit byte = 16 + + TypeJSON byte = 0xf5 + TypeNewDecimal byte = 0xf6 + TypeEnum byte = 0xf7 + TypeSet byte = 0xf8 + TypeTinyBlob byte = 0xf9 + TypeMediumBlob byte = 0xfa + TypeLongBlob byte = 0xfb + TypeBlob byte = 0xfc + TypeVarString byte = 0xfd + TypeString byte = 0xfe + TypeGeometry byte = 0xff +) + +// TypeUnspecified is an uninitialized type. TypeDecimal is not used in MySQL. +const TypeUnspecified = TypeDecimal + +// Flag information. +const ( + NotNullFlag uint = 1 << 0 /* Field can't be NULL */ + PriKeyFlag uint = 1 << 1 /* Field is part of a primary key */ + UniqueKeyFlag uint = 1 << 2 /* Field is part of a unique key */ + MultipleKeyFlag uint = 1 << 3 /* Field is part of a key */ + BlobFlag uint = 1 << 4 /* Field is a blob */ + UnsignedFlag uint = 1 << 5 /* Field is unsigned */ + ZerofillFlag uint = 1 << 6 /* Field is zerofill */ + BinaryFlag uint = 1 << 7 /* Field is binary */ + EnumFlag uint = 1 << 8 /* Field is an enum */ + AutoIncrementFlag uint = 1 << 9 /* Field is an auto increment field */ + TimestampFlag uint = 1 << 10 /* Field is a timestamp */ + SetFlag uint = 1 << 11 /* Field is a set */ + NoDefaultValueFlag uint = 1 << 12 /* Field doesn't have a default value */ + OnUpdateNowFlag uint = 1 << 13 /* Field is set to NOW on UPDATE */ + PartKeyFlag uint = 1 << 14 /* Intern: Part of some keys */ + NumFlag uint = 1 << 15 /* Field is a num (for clients) */ + + GroupFlag uint = 1 << 15 /* Internal: Group field */ + UniqueFlag uint = 1 << 16 /* Internal: Used by sql_yacc */ + BinCmpFlag uint = 1 << 17 /* Internal: Used by sql_yacc */ + ParseToJSONFlag uint = 1 << 18 /* Internal: Used when we want to parse string to JSON in CAST */ + IsBooleanFlag uint = 1 << 19 /* Internal: Used for telling boolean literal from integer */ + PreventNullInsertFlag uint = 1 << 20 /* Prevent this Field from inserting NULL values */ +) + +// TypeInt24 bounds. +const ( + MaxUint24 = 1<<24 - 1 + MaxInt24 = 1<<23 - 1 + MinInt24 = -1 << 23 +) + +// HasNotNullFlag checks if NotNullFlag is set. +func HasNotNullFlag(flag uint) bool { + return (flag & NotNullFlag) > 0 +} + +// HasNoDefaultValueFlag checks if NoDefaultValueFlag is set. +func HasNoDefaultValueFlag(flag uint) bool { + return (flag & NoDefaultValueFlag) > 0 +} + +// HasAutoIncrementFlag checks if AutoIncrementFlag is set. +func HasAutoIncrementFlag(flag uint) bool { + return (flag & AutoIncrementFlag) > 0 +} + +// HasUnsignedFlag checks if UnsignedFlag is set. +func HasUnsignedFlag(flag uint) bool { + return (flag & UnsignedFlag) > 0 +} + +// HasZerofillFlag checks if ZerofillFlag is set. +func HasZerofillFlag(flag uint) bool { + return (flag & ZerofillFlag) > 0 +} + +// HasBinaryFlag checks if BinaryFlag is set. +func HasBinaryFlag(flag uint) bool { + return (flag & BinaryFlag) > 0 +} + +// HasPriKeyFlag checks if PriKeyFlag is set. +func HasPriKeyFlag(flag uint) bool { + return (flag & PriKeyFlag) > 0 +} + +// HasUniKeyFlag checks if UniqueKeyFlag is set. +func HasUniKeyFlag(flag uint) bool { + return (flag & UniqueKeyFlag) > 0 +} + +// HasMultipleKeyFlag checks if MultipleKeyFlag is set. +func HasMultipleKeyFlag(flag uint) bool { + return (flag & MultipleKeyFlag) > 0 +} + +// HasTimestampFlag checks if HasTimestampFlag is set. +func HasTimestampFlag(flag uint) bool { + return (flag & TimestampFlag) > 0 +} + +// HasOnUpdateNowFlag checks if OnUpdateNowFlag is set. +func HasOnUpdateNowFlag(flag uint) bool { + return (flag & OnUpdateNowFlag) > 0 +} + +// HasParseToJSONFlag checks if ParseToJSONFlag is set. +func HasParseToJSONFlag(flag uint) bool { + return (flag & ParseToJSONFlag) > 0 +} + +// HasIsBooleanFlag checks if IsBooleanFlag is set. +func HasIsBooleanFlag(flag uint) bool { + return (flag & IsBooleanFlag) > 0 +} + +// HasPreventNullInsertFlag checks if PreventNullInsertFlag is set. +func HasPreventNullInsertFlag(flag uint) bool { + return (flag & PreventNullInsertFlag) > 0 +} diff --git a/parser/mysql/type_test.go b/parser/mysql/type_test.go new file mode 100644 index 0000000..a5139b1 --- /dev/null +++ b/parser/mysql/type_test.go @@ -0,0 +1,37 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mysql + +import ( + . "github.com/pingcap/check" +) + +var _ = Suite(&testTypeSuite{}) + +type testTypeSuite struct{} + +func (s *testTypeSuite) TestFlags(c *C) { + c.Assert(HasNotNullFlag(NotNullFlag), IsTrue) + c.Assert(HasUniKeyFlag(UniqueKeyFlag), IsTrue) + c.Assert(HasNotNullFlag(NotNullFlag), IsTrue) + c.Assert(HasNoDefaultValueFlag(NoDefaultValueFlag), IsTrue) + c.Assert(HasAutoIncrementFlag(AutoIncrementFlag), IsTrue) + c.Assert(HasUnsignedFlag(UnsignedFlag), IsTrue) + c.Assert(HasZerofillFlag(ZerofillFlag), IsTrue) + c.Assert(HasBinaryFlag(BinaryFlag), IsTrue) + c.Assert(HasPriKeyFlag(PriKeyFlag), IsTrue) + c.Assert(HasMultipleKeyFlag(MultipleKeyFlag), IsTrue) + c.Assert(HasTimestampFlag(TimestampFlag), IsTrue) + c.Assert(HasOnUpdateNowFlag(OnUpdateNowFlag), IsTrue) +} diff --git a/parser/mysql/util.go b/parser/mysql/util.go new file mode 100644 index 0000000..859519e --- /dev/null +++ b/parser/mysql/util.go @@ -0,0 +1,95 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mysql + +type lengthAndDecimal struct { + length int + decimal int +} + +// defaultLengthAndDecimal provides default Flen and Decimal for fields +// from CREATE TABLE when they are unspecified. +var defaultLengthAndDecimal = map[byte]lengthAndDecimal{ + TypeBit: {1, 0}, + TypeTiny: {4, 0}, + TypeShort: {6, 0}, + TypeInt24: {9, 0}, + TypeLong: {11, 0}, + TypeLonglong: {20, 0}, + TypeDouble: {22, -1}, + TypeFloat: {12, -1}, + TypeNewDecimal: {11, 0}, + TypeDuration: {10, 0}, + TypeDate: {10, 0}, + TypeTimestamp: {19, 0}, + TypeDatetime: {19, 0}, + TypeYear: {4, 0}, + TypeString: {1, 0}, + TypeVarchar: {5, 0}, + TypeVarString: {5, 0}, + TypeTinyBlob: {255, 0}, + TypeBlob: {65535, 0}, + TypeMediumBlob: {16777215, 0}, + TypeLongBlob: {4294967295, 0}, + TypeJSON: {4294967295, 0}, + TypeNull: {0, 0}, + TypeSet: {-1, 0}, + TypeEnum: {-1, 0}, +} + +// IsIntegerType indicate whether tp is an integer type. +func IsIntegerType(tp byte) bool { + switch tp { + case TypeTiny, TypeShort, TypeInt24, TypeLong, TypeLonglong: + return true + } + return false +} + +// GetDefaultFieldLengthAndDecimal returns the default display length (flen) and decimal length for column. +// Call this when no Flen assigned in ddl. +// or column value is calculated from an expression. +// For example: "select count(*) from t;", the column type is int64 and Flen in ResultField will be 21. +// See https://dev.mysql.com/doc/refman/5.7/en/storage-requirements.html +func GetDefaultFieldLengthAndDecimal(tp byte) (flen int, decimal int) { + val, ok := defaultLengthAndDecimal[tp] + if ok { + return val.length, val.decimal + } + return -1, -1 +} + +// defaultLengthAndDecimal provides default Flen and Decimal for fields +// from CAST when they are unspecified. +var defaultLengthAndDecimalForCast = map[byte]lengthAndDecimal{ + TypeString: {0, -1}, // Flen & Decimal differs. + TypeDate: {10, 0}, + TypeDatetime: {19, 0}, + TypeNewDecimal: {11, 0}, + TypeDuration: {10, 0}, + TypeLonglong: {22, 0}, + TypeDouble: {22, -1}, + TypeFloat: {12, -1}, + TypeJSON: {4194304, 0}, // Flen differs. +} + +// GetDefaultFieldLengthAndDecimalForCast returns the default display length (flen) and decimal length for casted column +// when flen or decimal is not specified. +func GetDefaultFieldLengthAndDecimalForCast(tp byte) (flen int, decimal int) { + val, ok := defaultLengthAndDecimalForCast[tp] + if ok { + return val.length, val.decimal + } + return -1, -1 +} diff --git a/parser/opcode/opcode.go b/parser/opcode/opcode.go new file mode 100644 index 0000000..7e85781 --- /dev/null +++ b/parser/opcode/opcode.go @@ -0,0 +1,138 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package opcode + +import ( + "fmt" + "io" +) + +// Op is opcode type. +type Op int + +// List operators. +const ( + LogicAnd Op = iota + 1 + LeftShift + RightShift + LogicOr + GE + LE + EQ + NE + LT + GT + Plus + Minus + And + Or + Mod + Xor + Div + Mul + Not + BitNeg + IntDiv + LogicXor + NullEQ + In + Like + Case + Regexp + IsNull + IsTruth + IsFalsity +) + +// Ops maps opcode to string. +var Ops = map[Op]string{ + LogicAnd: "and", + LogicOr: "or", + LogicXor: "xor", + LeftShift: "leftshift", + RightShift: "rightshift", + GE: "ge", + LE: "le", + EQ: "eq", + NE: "ne", + LT: "lt", + GT: "gt", + Plus: "plus", + Minus: "minus", + And: "bitand", + Or: "bitor", + Mod: "mod", + Xor: "bitxor", + Div: "div", + Mul: "mul", + Not: "not", + BitNeg: "bitneg", + IntDiv: "intdiv", + NullEQ: "nulleq", + In: "in", + Like: "like", + Case: "case", + Regexp: "regexp", + IsNull: "isnull", + IsTruth: "istrue", + IsFalsity: "isfalse", +} + +// String implements Stringer interface. +func (o Op) String() string { + str, ok := Ops[o] + if !ok { + panic(fmt.Sprintf("%d", o)) + } + + return str +} + +var opsLiteral = map[Op]string{ + LogicAnd: " AND ", + LogicOr: " OR ", + LogicXor: " XOR ", + LeftShift: "<<", + RightShift: ">>", + GE: ">=", + LE: "<=", + EQ: "=", + NE: "!=", + LT: "<", + GT: ">", + Plus: "+", + Minus: "-", + And: "&", + Or: "|", + Mod: "%", + Xor: "^", + Div: "/", + Mul: "*", + Not: "!", + BitNeg: "~", + IntDiv: "DIV", + NullEQ: "<=>", + In: "IN", + Like: "LIKE", + Case: "CASE", + Regexp: "REGEXP", + IsNull: "IS NULL", + IsTruth: "IS TRUE", + IsFalsity: "IS FALSE", +} + +// Format the ExprNode into a Writer. +func (o Op) Format(w io.Writer) { + fmt.Fprintf(w, "%s", opsLiteral[o]) +} diff --git a/parser/parser.go b/parser/parser.go new file mode 100644 index 0000000..05b9a84 --- /dev/null +++ b/parser/parser.go @@ -0,0 +1,9363 @@ +// Code generated by goyacc DO NOT EDIT. +// CAUTION: Generated file - DO NOT EDIT. + +// Copyright 2013 The ql Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSES/QL-LICENSE file. + +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +// Initial yacc source generated by ebnf2y[1] +// at 2013-10-04 23:10:47.861401015 +0200 CEST +// +// $ ebnf2y -o ql.y -oe ql.ebnf -start StatementList -pkg ql -p _ +// +// [1]: http://github.com/cznic/ebnf2y + +package parser + +import __yyfmt__ "fmt" + +import ( + "strings" + + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/charset" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/opcode" + "github.com/pingcap/tidb/parser/types" +) + +type yySymType struct { + yys int + offset int // offset + item interface{} + ident string + expr ast.ExprNode + statement ast.StmtNode +} + +type yyXError struct { + state, xsym int +} + +const ( + yyDefault = 57988 + yyEOFCode = 57344 + account = 57556 + action = 57557 + add = 57359 + addDate = 57819 + admin = 57871 + advise = 57558 + after = 57559 + against = 57560 + algorithm = 57562 + all = 57360 + alter = 57361 + always = 57561 + analyze = 57362 + and = 57363 + andand = 57354 + andnot = 57955 + any = 57563 + as = 57364 + asc = 57365 + ascii = 57564 + assignmentEq = 57956 + autoIncrement = 57565 + autoRandom = 57566 + avg = 57568 + avgRowLength = 57567 + begin = 57569 + between = 57366 + bigIntType = 57367 + binaryType = 57368 + binding = 57809 + bindings = 57810 + binlog = 57570 + bitAnd = 57820 + bitLit = 57954 + bitOr = 57821 + bitType = 57571 + bitXor = 57822 + blobType = 57369 + block = 57572 + boolType = 57574 + booleanType = 57573 + both = 57370 + bound = 57823 + btree = 57575 + buckets = 57872 + builtinAddDate = 57924 + builtinBitAnd = 57925 + builtinBitOr = 57926 + builtinBitXor = 57927 + builtinCast = 57928 + builtinCount = 57929 + builtinCurDate = 57930 + builtinCurTime = 57931 + builtinDateAdd = 57932 + builtinDateSub = 57933 + builtinExtract = 57934 + builtinGroupConcat = 57935 + builtinMax = 57936 + builtinMin = 57937 + builtinNow = 57938 + builtinPosition = 57939 + builtinStddevPop = 57944 + builtinStddevSamp = 57945 + builtinSubDate = 57940 + builtinSubstring = 57941 + builtinSum = 57942 + builtinSysDate = 57943 + builtinTrim = 57946 + builtinUser = 57947 + builtinVarPop = 57948 + builtinVarSamp = 57949 + builtins = 57873 + by = 57371 + byteType = 57576 + cache = 57577 + cancel = 57874 + capture = 57579 + cascade = 57372 + cascaded = 57578 + caseKwd = 57373 + cast = 57824 + change = 57374 + charType = 57376 + character = 57375 + charsetKwd = 57580 + check = 57377 + checksum = 57581 + cipher = 57582 + cleanup = 57583 + client = 57584 + cmSketch = 57875 + coalesce = 57585 + collate = 57378 + collation = 57586 + column = 57379 + columnFormat = 57587 + columns = 57588 + comment = 57589 + commit = 57590 + committed = 57591 + compact = 57592 + compressed = 57593 + compression = 57594 + connection = 57595 + consistent = 57596 + constraint = 57380 + context = 57597 + convert = 57381 + copyKwd = 57825 + count = 57826 + cpu = 57598 + create = 57382 + createTableSelect = 57975 + cross = 57383 + curTime = 57827 + current = 57599 + currentDate = 57384 + currentRole = 57388 + currentTime = 57385 + currentTs = 57386 + currentUser = 57387 + cycle = 57600 + data = 57602 + database = 57389 + databases = 57390 + dateAdd = 57828 + dateSub = 57829 + dateType = 57603 + datetimeType = 57604 + day = 57601 + dayHour = 57391 + dayMicrosecond = 57392 + dayMinute = 57393 + daySecond = 57394 + ddl = 57876 + deallocate = 57605 + decLit = 57951 + decimalType = 57395 + defaultKwd = 57396 + definer = 57606 + delayKeyWrite = 57607 + delayed = 57397 + deleteKwd = 57398 + depth = 57877 + desc = 57399 + describe = 57400 + directory = 57608 + disable = 57609 + discard = 57610 + disk = 57611 + distinct = 57401 + distinctRow = 57402 + div = 57403 + do = 57612 + doubleAtIdentifier = 57350 + doubleType = 57404 + drainer = 57878 + drop = 57405 + dual = 57406 + duplicate = 57613 + dynamic = 57614 + elseKwd = 57407 + empty = 57968 + enable = 57615 + enclosed = 57408 + encryption = 57616 + end = 57617 + enforced = 57817 + engine = 57618 + engines = 57619 + enum = 57620 + eq = 57957 + yyErrCode = 57345 + escape = 57624 + escaped = 57409 + event = 57621 + events = 57622 + evolve = 57623 + exact = 57830 + except = 57412 + exchange = 57625 + exclusive = 57626 + execute = 57627 + exists = 57410 + expansion = 57628 + expire = 57629 + explain = 57411 + exprPushdownBlacklist = 57869 + extended = 57630 + extract = 57831 + falseKwd = 57413 + faultsSym = 57631 + fields = 57632 + first = 57633 + fixed = 57634 + flashback = 57832 + floatLit = 57950 + floatType = 57414 + flush = 57635 + following = 57636 + forKwd = 57415 + force = 57416 + foreign = 57417 + format = 57637 + from = 57418 + full = 57638 + fulltext = 57419 + function = 57639 + ge = 57958 + generated = 57420 + getFormat = 57833 + global = 57782 + grant = 57421 + grants = 57640 + group = 57422 + groupConcat = 57834 + hash = 57641 + having = 57423 + hexLit = 57953 + highPriority = 57424 + higherThanComma = 57987 + hintAggToCop = 57893 + hintBegin = 57352 + hintEnablePlanCache = 57908 + hintEnd = 57353 + hintHASHAGG = 57901 + hintHJ = 57894 + hintINLHJ = 57897 + hintINLJ = 57896 + hintINLMJ = 57898 + hintIgnoreIndex = 57904 + hintMemoryQuota = 57914 + hintNSJI = 57900 + hintNoIndexMerge = 57906 + hintOLAP = 57915 + hintOLTP = 57916 + hintQBName = 57912 + hintQueryType = 57913 + hintReadConsistentReplica = 57910 + hintReadFromStorage = 57911 + hintSJI = 57899 + hintSMJ = 57895 + hintSTREAMAGG = 57902 + hintTiFlash = 57918 + hintTiKV = 57917 + hintUseIndex = 57903 + hintUseIndexMerge = 57905 + hintUsePlanCache = 57909 + hintUseToja = 57907 + history = 57642 + hosts = 57643 + hour = 57644 + hourMicrosecond = 57425 + hourMinute = 57426 + hourSecond = 57427 + identSQLErrors = 57813 + identified = 57645 + identifier = 57346 + ifKwd = 57428 + ignore = 57429 + importKwd = 57646 + in = 57430 + increment = 57650 + incremental = 57651 + index = 57431 + indexes = 57652 + infile = 57432 + inner = 57433 + inplace = 57836 + insert = 57438 + insertMethod = 57647 + insertValues = 57973 + instant = 57837 + int1Type = 57440 + int2Type = 57441 + int3Type = 57442 + int4Type = 57443 + int8Type = 57444 + intLit = 57952 + intType = 57439 + integerType = 57434 + internal = 57838 + interval = 57435 + into = 57436 + invalid = 57351 + invisible = 57653 + invoker = 57654 + io = 57655 + ipc = 57656 + is = 57437 + isolation = 57648 + issuer = 57649 + job = 57880 + jobs = 57879 + join = 57445 + jsonType = 57657 + jss = 57960 + juss = 57961 + key = 57446 + keyBlockSize = 57658 + keys = 57447 + kill = 57448 + labels = 57659 + language = 57449 + last = 57660 + le = 57959 + leading = 57450 + left = 57451 + less = 57661 + level = 57662 + like = 57452 + limit = 57453 + linear = 57455 + lines = 57454 + list = 57663 + load = 57456 + local = 57664 + localTime = 57457 + localTs = 57458 + location = 57665 + lock = 57459 + logs = 57666 + long = 57542 + longblobType = 57460 + longtextType = 57461 + lowPriority = 57462 + lowerThanCharsetKwd = 57976 + lowerThanComma = 57986 + lowerThanCreateTableSelect = 57974 + lowerThanEq = 57983 + lowerThanInsertValues = 57972 + lowerThanIntervalKeyword = 57969 + lowerThanKey = 57977 + lowerThanLocal = 57978 + lowerThanNot = 57985 + lowerThanOn = 57982 + lowerThanRemove = 57979 + lowerThanSetKeyword = 57971 + lowerThanStringLitToken = 57970 + lowerThenOrder = 57980 + lsh = 57962 + master = 57667 + match = 57463 + max = 57840 + maxConnectionsPerHour = 57674 + maxExecutionTime = 57841 + maxQueriesPerHour = 57675 + maxRows = 57673 + maxUpdatesPerHour = 57676 + maxUserConnections = 57677 + maxValue = 57464 + max_idxnum = 57683 + max_minutes = 57682 + mediumIntType = 57466 + mediumblobType = 57465 + mediumtextType = 57467 + memory = 57678 + merge = 57679 + microsecond = 57668 + min = 57839 + minRows = 57680 + minValue = 57681 + minute = 57669 + minuteMicrosecond = 57468 + minuteSecond = 57469 + mod = 57470 + mode = 57670 + modify = 57671 + month = 57672 + names = 57684 + national = 57685 + natural = 57555 + ncharType = 57686 + neg = 57984 + neq = 57963 + neqSynonym = 57964 + never = 57687 + next_row_id = 57835 + no = 57688 + noWriteToBinLog = 57472 + nocache = 57689 + nocycle = 57690 + nodeID = 57881 + nodeState = 57882 + nodegroup = 57691 + nomaxvalue = 57692 + nominvalue = 57693 + none = 57694 + noorder = 57695 + not = 57471 + not2 = 57967 + now = 57842 + nowait = 57818 + null = 57473 + nulleq = 57965 + nulls = 57696 + numericType = 57474 + nvarcharType = 57475 + odbcDateType = 57356 + odbcTimeType = 57357 + odbcTimestampType = 57358 + offset = 57697 + on = 57476 + only = 57698 + open = 57775 + optRuleBlacklist = 57870 + optimistic = 57883 + optimize = 57477 + option = 57478 + optionally = 57479 + or = 57480 + order = 57481 + outer = 57482 + packKeys = 57483 + pageSym = 57699 + parser = 57485 + partial = 57701 + partition = 57484 + partitioning = 57702 + partitions = 57703 + password = 57700 + per_db = 57714 + per_table = 57713 + pessimistic = 57884 + pipes = 57355 + pipesAsOr = 57704 + plugins = 57705 + position = 57843 + preSplitRegions = 57490 + preceding = 57706 + precisionType = 57486 + prepare = 57707 + primary = 57487 + privileges = 57708 + procedure = 57488 + process = 57709 + processlist = 57710 + profile = 57711 + profiles = 57712 + pump = 57885 + quarter = 57715 + queries = 57717 + query = 57716 + quick = 57718 + rangeKwd = 57491 + read = 57492 + realType = 57493 + rebuild = 57719 + recent = 57844 + recover = 57720 + redundant = 57721 + references = 57494 + regexpKwd = 57495 + region = 57923 + regions = 57922 + reload = 57722 + remove = 57723 + rename = 57496 + reorganize = 57724 + repair = 57725 + repeat = 57497 + repeatable = 57726 + replace = 57498 + replica = 57728 + replication = 57729 + require = 57499 + respect = 57727 + restrict = 57500 + reverse = 57730 + revoke = 57501 + right = 57502 + rlike = 57503 + role = 57731 + rollback = 57732 + routine = 57733 + row = 57504 + rowCount = 57734 + rowFormat = 57735 + rsh = 57966 + rtree = 57736 + samples = 57886 + second = 57737 + secondMicrosecond = 57505 + secondaryEngine = 57738 + secondaryLoad = 57739 + secondaryUnload = 57740 + security = 57741 + selectKwd = 57506 + separator = 57742 + sequence = 57743 + serial = 57744 + serializable = 57745 + session = 57746 + set = 57507 + shardRowIDBits = 57489 + share = 57747 + shared = 57748 + show = 57508 + shutdown = 57749 + signed = 57750 + simple = 57751 + singleAtIdentifier = 57349 + slave = 57752 + slow = 57753 + smallIntType = 57509 + snapshot = 57754 + some = 57781 + source = 57776 + spatial = 57510 + split = 57920 + sql = 57511 + sqlBigResult = 57512 + sqlBufferResult = 57755 + sqlCache = 57756 + sqlCalcFoundRows = 57513 + sqlNoCache = 57757 + sqlSmallResult = 57514 + sqlTsiDay = 57758 + sqlTsiHour = 57759 + sqlTsiMinute = 57760 + sqlTsiMonth = 57761 + sqlTsiQuarter = 57762 + sqlTsiSecond = 57763 + sqlTsiWeek = 57764 + sqlTsiYear = 57765 + ssl = 57515 + staleness = 57845 + start = 57766 + starting = 57516 + stats = 57887 + statsAutoRecalc = 57767 + statsBuckets = 57890 + statsHealthy = 57891 + statsHistograms = 57889 + statsMeta = 57888 + statsPersistent = 57768 + statsSamplePages = 57769 + status = 57770 + std = 57846 + stddev = 57847 + stddevPop = 57848 + stddevSamp = 57849 + storage = 57771 + stored = 57519 + straightJoin = 57517 + stringLit = 57348 + strong = 57850 + subDate = 57851 + subject = 57777 + subpartition = 57778 + subpartitions = 57779 + substring = 57853 + sum = 57852 + super = 57780 + swaps = 57772 + switchesSym = 57773 + systemTime = 57774 + tableChecksum = 57783 + tableKwd = 57518 + tableRefPriority = 57981 + tables = 57784 + tablespace = 57785 + temporary = 57786 + temptable = 57787 + terminated = 57520 + textType = 57788 + than = 57789 + then = 57521 + tidb = 57892 + timeType = 57790 + timestampAdd = 57854 + timestampDiff = 57855 + timestampType = 57791 + tinyIntType = 57523 + tinyblobType = 57522 + tinytextType = 57524 + to = 57525 + tokudbDefault = 57856 + tokudbFast = 57857 + tokudbLzma = 57858 + tokudbQuickLZ = 57859 + tokudbSmall = 57861 + tokudbSnappy = 57860 + tokudbUncompressed = 57862 + tokudbZlib = 57863 + top = 57864 + topn = 57919 + tp = 57797 + trace = 57792 + traditional = 57793 + trailing = 57526 + transaction = 57794 + trigger = 57527 + triggers = 57795 + trim = 57865 + trueKwd = 57528 + truncate = 57796 + unbounded = 57798 + uncommitted = 57799 + undefined = 57803 + underscoreCS = 57347 + unicodeSym = 57800 + union = 57530 + unique = 57529 + unknown = 57801 + unlock = 57531 + unsigned = 57532 + until = 57533 + update = 57534 + usage = 57535 + use = 57536 + user = 57802 + using = 57537 + utcDate = 57538 + utcTime = 57540 + utcTimestamp = 57539 + validation = 57804 + value = 57805 + values = 57541 + varPop = 57867 + varSamp = 57868 + varbinaryType = 57545 + varcharType = 57543 + varcharacter = 57544 + variables = 57806 + variance = 57866 + varying = 57546 + view = 57807 + virtual = 57547 + visible = 57808 + warnings = 57811 + week = 57814 + when = 57548 + where = 57549 + width = 57921 + with = 57551 + without = 57812 + write = 57550 + x509 = 57816 + xor = 57552 + yearMonth = 57553 + yearType = 57815 + zerofill = 57554 + + yyMaxDepth = 200 + yyTabOfs = -1159 +) + +var ( + yyXLAT = map[int]int{ + 57589: 0, // comment (995x) + 57744: 1, // serial (972x) + 57565: 2, // autoIncrement (971x) + 57566: 3, // autoRandom (971x) + 57587: 4, // columnFormat (971x) + 57771: 5, // storage (971x) + 57344: 6, // $end (931x) + 59: 7, // ';' (930x) + 41: 8, // ')' (916x) + 44: 9, // ',' (914x) + 57750: 10, // signed (847x) + 57580: 11, // charsetKwd (843x) + 57893: 12, // hintAggToCop (834x) + 57908: 13, // hintEnablePlanCache (834x) + 57901: 14, // hintHASHAGG (834x) + 57894: 15, // hintHJ (834x) + 57904: 16, // hintIgnoreIndex (834x) + 57897: 17, // hintINLHJ (834x) + 57896: 18, // hintINLJ (834x) + 57898: 19, // hintINLMJ (834x) + 57914: 20, // hintMemoryQuota (834x) + 57906: 21, // hintNoIndexMerge (834x) + 57900: 22, // hintNSJI (834x) + 57912: 23, // hintQBName (834x) + 57913: 24, // hintQueryType (834x) + 57910: 25, // hintReadConsistentReplica (834x) + 57911: 26, // hintReadFromStorage (834x) + 57899: 27, // hintSJI (834x) + 57895: 28, // hintSMJ (834x) + 57902: 29, // hintSTREAMAGG (834x) + 57903: 30, // hintUseIndex (834x) + 57905: 31, // hintUseIndexMerge (834x) + 57909: 32, // hintUsePlanCache (834x) + 57907: 33, // hintUseToja (834x) + 57841: 34, // maxExecutionTime (834x) + 57797: 35, // tp (828x) + 57653: 36, // invisible (827x) + 57808: 37, // visible (827x) + 57658: 38, // keyBlockSize (826x) + 57564: 39, // ascii (816x) + 57576: 40, // byteType (816x) + 57800: 41, // unicodeSym (816x) + 57616: 42, // encryption (815x) + 57784: 43, // tables (808x) + 57817: 44, // enforced (807x) + 57575: 45, // btree (806x) + 57637: 46, // format (806x) + 57641: 47, // hash (806x) + 57736: 48, // rtree (806x) + 57805: 49, // value (806x) + 57806: 50, // variables (806x) + 57918: 51, // hintTiFlash (805x) + 57917: 52, // hintTiKV (805x) + 57697: 53, // offset (805x) + 57710: 54, // processlist (805x) + 57801: 55, // unknown (805x) + 57871: 56, // admin (804x) + 57569: 57, // begin (804x) + 57590: 58, // commit (804x) + 57609: 59, // disable (804x) + 57610: 60, // discard (804x) + 57615: 61, // enable (804x) + 57634: 62, // fixed (804x) + 57915: 63, // hintOLAP (804x) + 57916: 64, // hintOLTP (804x) + 57646: 65, // importKwd (804x) + 57657: 66, // jsonType (804x) + 57671: 67, // modify (804x) + 57718: 68, // quick (804x) + 57732: 69, // rollback (804x) + 57739: 70, // secondaryLoad (804x) + 57740: 71, // secondaryUnload (804x) + 57766: 72, // start (804x) + 57785: 73, // tablespace (804x) + 57786: 74, // temporary (804x) + 57796: 75, // truncate (804x) + 57804: 76, // validation (804x) + 57812: 77, // without (804x) + 57561: 78, // always (803x) + 57571: 79, // bitType (803x) + 57573: 80, // booleanType (803x) + 57574: 81, // boolType (803x) + 57604: 82, // datetimeType (803x) + 57603: 83, // dateType (803x) + 57876: 84, // ddl (803x) + 57611: 85, // disk (803x) + 57614: 86, // dynamic (803x) + 57620: 87, // enum (803x) + 57638: 88, // full (803x) + 57782: 89, // global (803x) + 57813: 90, // identSQLErrors (803x) + 57879: 91, // jobs (803x) + 57678: 92, // memory (803x) + 57685: 93, // national (803x) + 57686: 94, // ncharType (803x) + 57746: 95, // session (803x) + 57765: 96, // sqlTsiYear (803x) + 57788: 97, // textType (803x) + 57791: 98, // timestampType (803x) + 57790: 99, // timeType (803x) + 57793: 100, // traditional (803x) + 57794: 101, // transaction (803x) + 57811: 102, // warnings (803x) + 57815: 103, // yearType (803x) + 57556: 104, // account (802x) + 57557: 105, // action (802x) + 57819: 106, // addDate (802x) + 57558: 107, // advise (802x) + 57559: 108, // after (802x) + 57560: 109, // against (802x) + 57562: 110, // algorithm (802x) + 57563: 111, // any (802x) + 57568: 112, // avg (802x) + 57567: 113, // avgRowLength (802x) + 57809: 114, // binding (802x) + 57810: 115, // bindings (802x) + 57570: 116, // binlog (802x) + 57820: 117, // bitAnd (802x) + 57821: 118, // bitOr (802x) + 57822: 119, // bitXor (802x) + 57572: 120, // block (802x) + 57823: 121, // bound (802x) + 57872: 122, // buckets (802x) + 57873: 123, // builtins (802x) + 57577: 124, // cache (802x) + 57874: 125, // cancel (802x) + 57579: 126, // capture (802x) + 57578: 127, // cascaded (802x) + 57824: 128, // cast (802x) + 57581: 129, // checksum (802x) + 57582: 130, // cipher (802x) + 57583: 131, // cleanup (802x) + 57584: 132, // client (802x) + 57875: 133, // cmSketch (802x) + 57585: 134, // coalesce (802x) + 57586: 135, // collation (802x) + 57588: 136, // columns (802x) + 57591: 137, // committed (802x) + 57592: 138, // compact (802x) + 57593: 139, // compressed (802x) + 57594: 140, // compression (802x) + 57595: 141, // connection (802x) + 57596: 142, // consistent (802x) + 57597: 143, // context (802x) + 57825: 144, // copyKwd (802x) + 57826: 145, // count (802x) + 57598: 146, // cpu (802x) + 57599: 147, // current (802x) + 57827: 148, // curTime (802x) + 57600: 149, // cycle (802x) + 57602: 150, // data (802x) + 57828: 151, // dateAdd (802x) + 57829: 152, // dateSub (802x) + 57601: 153, // day (802x) + 57605: 154, // deallocate (802x) + 57606: 155, // definer (802x) + 57607: 156, // delayKeyWrite (802x) + 57877: 157, // depth (802x) + 57608: 158, // directory (802x) + 57612: 159, // do (802x) + 57878: 160, // drainer (802x) + 57613: 161, // duplicate (802x) + 57617: 162, // end (802x) + 57618: 163, // engine (802x) + 57619: 164, // engines (802x) + 57624: 165, // escape (802x) + 57621: 166, // event (802x) + 57622: 167, // events (802x) + 57623: 168, // evolve (802x) + 57830: 169, // exact (802x) + 57625: 170, // exchange (802x) + 57626: 171, // exclusive (802x) + 57627: 172, // execute (802x) + 57628: 173, // expansion (802x) + 57629: 174, // expire (802x) + 57869: 175, // exprPushdownBlacklist (802x) + 57630: 176, // extended (802x) + 57831: 177, // extract (802x) + 57631: 178, // faultsSym (802x) + 57632: 179, // fields (802x) + 57633: 180, // first (802x) + 57832: 181, // flashback (802x) + 57635: 182, // flush (802x) + 57636: 183, // following (802x) + 57639: 184, // function (802x) + 57833: 185, // getFormat (802x) + 57640: 186, // grants (802x) + 57834: 187, // groupConcat (802x) + 57642: 188, // history (802x) + 57643: 189, // hosts (802x) + 57644: 190, // hour (802x) + 57645: 191, // identified (802x) + 57346: 192, // identifier (802x) + 57650: 193, // increment (802x) + 57651: 194, // incremental (802x) + 57652: 195, // indexes (802x) + 57836: 196, // inplace (802x) + 57647: 197, // insertMethod (802x) + 57837: 198, // instant (802x) + 57838: 199, // internal (802x) + 57654: 200, // invoker (802x) + 57655: 201, // io (802x) + 57656: 202, // ipc (802x) + 57648: 203, // isolation (802x) + 57649: 204, // issuer (802x) + 57880: 205, // job (802x) + 57659: 206, // labels (802x) + 57660: 207, // last (802x) + 57661: 208, // less (802x) + 57662: 209, // level (802x) + 57663: 210, // list (802x) + 57664: 211, // local (802x) + 57665: 212, // location (802x) + 57666: 213, // logs (802x) + 57667: 214, // master (802x) + 57840: 215, // max (802x) + 57683: 216, // max_idxnum (802x) + 57682: 217, // max_minutes (802x) + 57674: 218, // maxConnectionsPerHour (802x) + 57675: 219, // maxQueriesPerHour (802x) + 57673: 220, // maxRows (802x) + 57676: 221, // maxUpdatesPerHour (802x) + 57677: 222, // maxUserConnections (802x) + 57679: 223, // merge (802x) + 57668: 224, // microsecond (802x) + 57839: 225, // min (802x) + 57680: 226, // minRows (802x) + 57669: 227, // minute (802x) + 57681: 228, // minValue (802x) + 57670: 229, // mode (802x) + 57672: 230, // month (802x) + 57684: 231, // names (802x) + 57687: 232, // never (802x) + 57835: 233, // next_row_id (802x) + 57688: 234, // no (802x) + 57689: 235, // nocache (802x) + 57690: 236, // nocycle (802x) + 57691: 237, // nodegroup (802x) + 57881: 238, // nodeID (802x) + 57882: 239, // nodeState (802x) + 57692: 240, // nomaxvalue (802x) + 57693: 241, // nominvalue (802x) + 57694: 242, // none (802x) + 57695: 243, // noorder (802x) + 57842: 244, // now (802x) + 57818: 245, // nowait (802x) + 57696: 246, // nulls (802x) + 57698: 247, // only (802x) + 57775: 248, // open (802x) + 57883: 249, // optimistic (802x) + 57870: 250, // optRuleBlacklist (802x) + 57699: 251, // pageSym (802x) + 57701: 252, // partial (802x) + 57702: 253, // partitioning (802x) + 57703: 254, // partitions (802x) + 57700: 255, // password (802x) + 57714: 256, // per_db (802x) + 57713: 257, // per_table (802x) + 57884: 258, // pessimistic (802x) + 57705: 259, // plugins (802x) + 57843: 260, // position (802x) + 57706: 261, // preceding (802x) + 57707: 262, // prepare (802x) + 57708: 263, // privileges (802x) + 57709: 264, // process (802x) + 57711: 265, // profile (802x) + 57712: 266, // profiles (802x) + 57885: 267, // pump (802x) + 57715: 268, // quarter (802x) + 57717: 269, // queries (802x) + 57716: 270, // query (802x) + 57719: 271, // rebuild (802x) + 57844: 272, // recent (802x) + 57720: 273, // recover (802x) + 57721: 274, // redundant (802x) + 57923: 275, // region (802x) + 57922: 276, // regions (802x) + 57722: 277, // reload (802x) + 57723: 278, // remove (802x) + 57724: 279, // reorganize (802x) + 57725: 280, // repair (802x) + 57726: 281, // repeatable (802x) + 57728: 282, // replica (802x) + 57729: 283, // replication (802x) + 57727: 284, // respect (802x) + 57730: 285, // reverse (802x) + 57731: 286, // role (802x) + 57733: 287, // routine (802x) + 57734: 288, // rowCount (802x) + 57735: 289, // rowFormat (802x) + 57886: 290, // samples (802x) + 57737: 291, // second (802x) + 57738: 292, // secondaryEngine (802x) + 57741: 293, // security (802x) + 57742: 294, // separator (802x) + 57743: 295, // sequence (802x) + 57745: 296, // serializable (802x) + 57747: 297, // share (802x) + 57748: 298, // shared (802x) + 57749: 299, // shutdown (802x) + 57751: 300, // simple (802x) + 57752: 301, // slave (802x) + 57753: 302, // slow (802x) + 57754: 303, // snapshot (802x) + 57781: 304, // some (802x) + 57776: 305, // source (802x) + 57920: 306, // split (802x) + 57755: 307, // sqlBufferResult (802x) + 57756: 308, // sqlCache (802x) + 57757: 309, // sqlNoCache (802x) + 57758: 310, // sqlTsiDay (802x) + 57759: 311, // sqlTsiHour (802x) + 57760: 312, // sqlTsiMinute (802x) + 57761: 313, // sqlTsiMonth (802x) + 57762: 314, // sqlTsiQuarter (802x) + 57763: 315, // sqlTsiSecond (802x) + 57764: 316, // sqlTsiWeek (802x) + 57845: 317, // staleness (802x) + 57887: 318, // stats (802x) + 57767: 319, // statsAutoRecalc (802x) + 57890: 320, // statsBuckets (802x) + 57891: 321, // statsHealthy (802x) + 57889: 322, // statsHistograms (802x) + 57888: 323, // statsMeta (802x) + 57768: 324, // statsPersistent (802x) + 57769: 325, // statsSamplePages (802x) + 57770: 326, // status (802x) + 57846: 327, // std (802x) + 57847: 328, // stddev (802x) + 57848: 329, // stddevPop (802x) + 57849: 330, // stddevSamp (802x) + 57850: 331, // strong (802x) + 57851: 332, // subDate (802x) + 57777: 333, // subject (802x) + 57778: 334, // subpartition (802x) + 57779: 335, // subpartitions (802x) + 57853: 336, // substring (802x) + 57852: 337, // sum (802x) + 57780: 338, // super (802x) + 57772: 339, // swaps (802x) + 57773: 340, // switchesSym (802x) + 57774: 341, // systemTime (802x) + 57783: 342, // tableChecksum (802x) + 57787: 343, // temptable (802x) + 57789: 344, // than (802x) + 57892: 345, // tidb (802x) + 57854: 346, // timestampAdd (802x) + 57855: 347, // timestampDiff (802x) + 57856: 348, // tokudbDefault (802x) + 57857: 349, // tokudbFast (802x) + 57858: 350, // tokudbLzma (802x) + 57859: 351, // tokudbQuickLZ (802x) + 57861: 352, // tokudbSmall (802x) + 57860: 353, // tokudbSnappy (802x) + 57862: 354, // tokudbUncompressed (802x) + 57863: 355, // tokudbZlib (802x) + 57864: 356, // top (802x) + 57919: 357, // topn (802x) + 57792: 358, // trace (802x) + 57795: 359, // triggers (802x) + 57865: 360, // trim (802x) + 57798: 361, // unbounded (802x) + 57799: 362, // uncommitted (802x) + 57803: 363, // undefined (802x) + 57802: 364, // user (802x) + 57866: 365, // variance (802x) + 57867: 366, // varPop (802x) + 57868: 367, // varSamp (802x) + 57807: 368, // view (802x) + 57814: 369, // week (802x) + 57921: 370, // width (802x) + 57816: 371, // x509 (802x) + 57471: 372, // not (747x) + 40: 373, // '(' (706x) + 57396: 374, // defaultKwd (685x) + 57364: 375, // as (684x) + 57473: 376, // null (679x) + 57378: 377, // collate (656x) + 57348: 378, // stringLit (648x) + 43: 379, // '+' (614x) + 45: 380, // '-' (614x) + 57470: 381, // mod (612x) + 57446: 382, // key (574x) + 57487: 383, // primary (573x) + 57453: 384, // limit (572x) + 57476: 385, // on (569x) + 57481: 386, // order (567x) + 57377: 387, // check (565x) + 57529: 388, // unique (563x) + 57380: 389, // constraint (558x) + 57420: 390, // generated (554x) + 57537: 391, // using (539x) + 57363: 392, // and (537x) + 57354: 393, // andand (536x) + 57423: 394, // having (536x) + 57480: 395, // or (536x) + 57704: 396, // pipesAsOr (536x) + 57552: 397, // xor (536x) + 57418: 398, // from (530x) + 57422: 399, // group (528x) + 46: 400, // '.' (527x) + 42: 401, // '*' (526x) + 125: 402, // '}' (520x) + 57957: 403, // eq (520x) + 57349: 404, // singleAtIdentifier (515x) + 57428: 405, // ifKwd (513x) + 57952: 406, // intLit (513x) + 57399: 407, // desc (512x) + 57365: 408, // asc (510x) + 57415: 409, // forKwd (508x) + 57498: 410, // replace (499x) + 60: 411, // '<' (497x) + 62: 412, // '>' (497x) + 57958: 413, // ge (497x) + 57437: 414, // is (497x) + 57959: 415, // le (497x) + 57963: 416, // neq (497x) + 57964: 417, // neqSynonym (497x) + 57965: 418, // nulleq (497x) + 57413: 419, // falseKwd (496x) + 57528: 420, // trueKwd (496x) + 37: 421, // '%' (494x) + 38: 422, // '&' (494x) + 47: 423, // '/' (494x) + 94: 424, // '^' (494x) + 124: 425, // '|' (494x) + 57403: 426, // div (494x) + 57962: 427, // lsh (494x) + 57966: 428, // rsh (494x) + 57541: 429, // values (494x) + 57951: 430, // decLit (493x) + 57950: 431, // floatLit (493x) + 57430: 432, // in (493x) + 57389: 433, // database (492x) + 57366: 434, // between (491x) + 57954: 435, // bitLit (491x) + 57938: 436, // builtinNow (491x) + 57386: 437, // currentTs (491x) + 57350: 438, // doubleAtIdentifier (491x) + 57953: 439, // hexLit (491x) + 57457: 440, // localTime (491x) + 57458: 441, // localTs (491x) + 57347: 442, // underscoreCS (491x) + 33: 443, // '!' (489x) + 126: 444, // '~' (489x) + 57929: 445, // builtinCount (489x) + 57930: 446, // builtinCurDate (489x) + 57931: 447, // builtinCurTime (489x) + 57936: 448, // builtinMax (489x) + 57937: 449, // builtinMin (489x) + 57939: 450, // builtinPosition (489x) + 57941: 451, // builtinSubstring (489x) + 57942: 452, // builtinSum (489x) + 57943: 453, // builtinSysDate (489x) + 57946: 454, // builtinTrim (489x) + 57947: 455, // builtinUser (489x) + 57381: 456, // convert (489x) + 57384: 457, // currentDate (489x) + 57388: 458, // currentRole (489x) + 57385: 459, // currentTime (489x) + 57387: 460, // currentUser (489x) + 57435: 461, // interval (489x) + 57451: 462, // left (489x) + 57967: 463, // not2 (489x) + 57497: 464, // repeat (489x) + 57502: 465, // right (489x) + 57504: 466, // row (489x) + 57538: 467, // utcDate (489x) + 57540: 468, // utcTime (489x) + 57539: 469, // utcTimestamp (489x) + 57375: 470, // character (419x) + 57376: 471, // charType (419x) + 57368: 472, // binaryType (414x) + 57549: 473, // where (411x) + 57551: 474, // with (400x) + 57431: 475, // index (393x) + 57445: 476, // join (392x) + 57433: 477, // inner (390x) + 57506: 478, // selectKwd (389x) + 57416: 479, // force (386x) + 57507: 480, // set (386x) + 57536: 481, // use (386x) + 57956: 482, // assignmentEq (384x) + 57429: 483, // ignore (384x) + 57405: 484, // drop (381x) + 57372: 485, // cascade (380x) + 57419: 486, // fulltext (380x) + 57500: 487, // restrict (380x) + 93: 488, // ']' (379x) + 57544: 489, // varcharacter (378x) + 57543: 490, // varcharType (378x) + 57361: 491, // alter (377x) + 57525: 492, // to (376x) + 57545: 493, // varbinaryType (376x) + 57359: 494, // add (375x) + 57367: 495, // bigIntType (375x) + 57369: 496, // blobType (375x) + 57374: 497, // change (375x) + 57395: 498, // decimalType (375x) + 57404: 499, // doubleType (375x) + 57414: 500, // floatType (375x) + 57440: 501, // int1Type (375x) + 57441: 502, // int2Type (375x) + 57442: 503, // int3Type (375x) + 57443: 504, // int4Type (375x) + 57444: 505, // int8Type (375x) + 57434: 506, // integerType (375x) + 57439: 507, // intType (375x) + 57452: 508, // like (375x) + 57542: 509, // long (375x) + 57460: 510, // longblobType (375x) + 57461: 511, // longtextType (375x) + 57465: 512, // mediumblobType (375x) + 57466: 513, // mediumIntType (375x) + 57467: 514, // mediumtextType (375x) + 57474: 515, // numericType (375x) + 57475: 516, // nvarcharType (375x) + 57493: 517, // realType (375x) + 57496: 518, // rename (375x) + 57509: 519, // smallIntType (375x) + 57522: 520, // tinyblobType (375x) + 57523: 521, // tinyIntType (375x) + 57524: 522, // tinytextType (375x) + 58104: 523, // Identifier (188x) + 58145: 524, // NotKeywordToken (188x) + 58234: 525, // TiDBKeyword (188x) + 58237: 526, // UnReservedKeyword (188x) + 58140: 527, // Literal (77x) + 58203: 528, // SimpleIdent (77x) + 58210: 529, // StringLiteral (77x) + 58084: 530, // FunctionCallGeneric (75x) + 58085: 531, // FunctionCallKeyword (75x) + 58086: 532, // FunctionCallNonKeyword (75x) + 58087: 533, // FunctionNameConflict (75x) + 58090: 534, // FunctionNameDatetimePrecision (75x) + 58091: 535, // FunctionNameOptionalBraces (75x) + 58202: 536, // SimpleExpr (75x) + 58213: 537, // SumExpr (75x) + 58215: 538, // SystemVariable (75x) + 58239: 539, // UserVariable (75x) + 58245: 540, // Variable (75x) + 58002: 541, // BitExpr (70x) + 58170: 542, // PredicateExpr (54x) + 58005: 543, // BoolPri (51x) + 58065: 544, // Expression (51x) + 57532: 545, // unsigned (45x) + 57554: 546, // zerofill (45x) + 58255: 547, // logAnd (38x) + 58256: 548, // logOr (38x) + 123: 549, // '{' (32x) + 57353: 550, // hintEnd (31x) + 57517: 551, // straightJoin (25x) + 58173: 552, // QueryBlockOpt (24x) + 57513: 553, // sqlCalcFoundRows (23x) + 58019: 554, // ColumnName (21x) + 58223: 555, // TableName (19x) + 58072: 556, // FieldLen (18x) + 57512: 557, // sqlBigResult (16x) + 57514: 558, // sqlSmallResult (14x) + 58011: 559, // CharsetKw (13x) + 57397: 560, // delayed (13x) + 57424: 561, // highPriority (13x) + 57462: 562, // lowPriority (13x) + 58101: 563, // HintTable (12x) + 58143: 564, // NUM (12x) + 58156: 565, // OptFieldLen (11x) + 58179: 566, // SelectStmt (11x) + 58180: 567, // SelectStmtBasic (11x) + 58183: 568, // SelectStmtFromDualTable (11x) + 58184: 569, // SelectStmtFromTable (11x) + 57398: 570, // deleteKwd (10x) + 57438: 571, // insert (10x) + 58152: 572, // OptBinary (9x) + 57518: 573, // tableKwd (9x) + 58102: 574, // HintTableList (8x) + 58105: 575, // IfExists (8x) + 58133: 576, // KeyOrIndex (8x) + 58135: 577, // LengthNum (8x) + 58032: 578, // ConstraintKeywordOpt (7x) + 58064: 579, // ExprOrDefault (7x) + 57436: 580, // into (7x) + 58211: 581, // StringName (7x) + 57546: 582, // varying (7x) + 57379: 583, // column (6x) + 58015: 584, // ColumnDef (6x) + 58058: 585, // EqOrAssignmentEq (6x) + 58066: 586, // ExpressionList (6x) + 58106: 587, // IfNotExists (6x) + 58113: 588, // IndexInvisible (6x) + 58120: 589, // IndexPartSpecification (6x) + 58123: 590, // IndexType (6x) + 58018: 591, // ColumnKeywordOpt (5x) + 58037: 592, // DBName (5x) + 58047: 593, // DeleteFromStmt (5x) + 58074: 594, // FieldOpt (5x) + 58075: 595, // FieldOpts (5x) + 58118: 596, // IndexOption (5x) + 58119: 597, // IndexOptionList (5x) + 58121: 598, // IndexPartSpecificationList (5x) + 58126: 599, // InsertIntoStmt (5x) + 58131: 600, // JoinTable (5x) + 58175: 601, // ReplaceIntoStmt (5x) + 58222: 602, // TableFactor (5x) + 58230: 603, // TableRef (5x) + 58248: 604, // VariableName (5x) + 58250: 605, // WhereClause (5x) + 58251: 606, // WhereClauseOptional (5x) + 57360: 607, // all (4x) + 57371: 608, // by (4x) + 58012: 609, // CharsetName (4x) + 58030: 610, // Constraint (4x) + 57401: 611, // distinct (4x) + 57402: 612, // distinctRow (4x) + 58057: 613, // EqOpt (4x) + 58115: 614, // IndexName (4x) + 58117: 615, // IndexNameList (4x) + 58124: 616, // IndexTypeName (4x) + 58139: 617, // LimitOption (4x) + 58166: 618, // OrderBy (4x) + 58167: 619, // OrderByOptional (4x) + 58172: 620, // PriorityOpt (4x) + 58193: 621, // SetExpr (4x) + 91: 622, // '[' (3x) + 58007: 623, // ByItem (3x) + 58022: 624, // ColumnOption (3x) + 57382: 625, // create (3x) + 58036: 626, // CrossOpt (3x) + 58054: 627, // EnforcedOrNot (3x) + 58059: 628, // EscapedTableRef (3x) + 58063: 629, // ExplainableStmt (3x) + 58067: 630, // ExpressionListOpt (3x) + 58092: 631, // GeneratedAlways (3x) + 58108: 632, // IndexHint (3x) + 58112: 633, // IndexHintType (3x) + 58116: 634, // IndexNameAndTypeOpt (3x) + 58153: 635, // OptCharset (3x) + 58154: 636, // OptCharsetWithOptBinary (3x) + 58165: 637, // Order (3x) + 58171: 638, // PrimaryOpt (3x) + 58178: 639, // RowValue (3x) + 58186: 640, // SelectStmtLimit (3x) + 57508: 641, // show (3x) + 58208: 642, // StorageOptimizerHintOpt (3x) + 58217: 643, // TableAsName (3x) + 58219: 644, // TableElement (3x) + 58227: 645, // TableOptimizerHintOpt (3x) + 58240: 646, // ValueSym (3x) + 57989: 647, // AdminStmt (2x) + 57990: 648, // AlterTableSpec (2x) + 57993: 649, // AlterTableStmt (2x) + 57362: 650, // analyze (2x) + 57994: 651, // AnalyzeTableStmt (2x) + 58000: 652, // BeginTransactionStmt (2x) + 58008: 653, // ByList (2x) + 58014: 654, // CollationName (2x) + 58023: 655, // ColumnOptionList (2x) + 58024: 656, // ColumnOptionListOpt (2x) + 58025: 657, // ColumnSetValue (2x) + 58028: 658, // CommitStmt (2x) + 58033: 659, // CreateDatabaseStmt (2x) + 58034: 660, // CreateIndexStmt (2x) + 58035: 661, // CreateTableStmt (2x) + 58038: 662, // DatabaseOption (2x) + 58041: 663, // DatabaseSym (2x) + 58044: 664, // DefaultKwdOpt (2x) + 57400: 665, // describe (2x) + 58050: 666, // DropDatabaseStmt (2x) + 58051: 667, // DropIndexStmt (2x) + 58052: 668, // DropTableStmt (2x) + 58053: 669, // EmptyStmt (2x) + 58055: 670, // EnforcedOrNotOpt (2x) + 57410: 671, // exists (2x) + 57411: 672, // explain (2x) + 58061: 673, // ExplainStmt (2x) + 58062: 674, // ExplainSym (2x) + 58069: 675, // Field (2x) + 58070: 676, // FieldAsName (2x) + 58071: 677, // FieldAsNameOpt (2x) + 58077: 678, // FloatOpt (2x) + 58082: 679, // FuncDatetimePrecList (2x) + 58083: 680, // FuncDatetimePrecListOpt (2x) + 58098: 681, // HintStorageType (2x) + 58099: 682, // HintStorageTypeAndTable (2x) + 58103: 683, // HintTrueOrFalse (2x) + 58109: 684, // IndexHintList (2x) + 58110: 685, // IndexHintListOpt (2x) + 58127: 686, // InsertValues (2x) + 58129: 687, // IntoOpt (2x) + 58134: 688, // KeyOrIndexOpt (2x) + 57447: 689, // keys (2x) + 58146: 690, // NowSym (2x) + 58147: 691, // NowSymFunc (2x) + 58148: 692, // NowSymOptionFraction (2x) + 58149: 693, // NumLiteral (2x) + 58161: 694, // OptTemporary (2x) + 58169: 695, // Precision (2x) + 58176: 696, // RestrictOrCascadeOpt (2x) + 58177: 697, // RollbackStmt (2x) + 58194: 698, // SetStmt (2x) + 58198: 699, // ShowStmt (2x) + 58201: 700, // SignedLiteral (2x) + 58205: 701, // Statement (2x) + 58209: 702, // StringList (2x) + 58214: 703, // Symbol (2x) + 58218: 704, // TableAsNameOpt (2x) + 58220: 705, // TableElementList (2x) + 58224: 706, // TableNameList (2x) + 58231: 707, // TableRefs (2x) + 58235: 708, // TruncateTableStmt (2x) + 58238: 709, // UseStmt (2x) + 58242: 710, // ValuesList (2x) + 58244: 711, // Varchar (2x) + 58246: 712, // VariableAssignment (2x) + 57991: 713, // AlterTableSpecList (1x) + 57992: 714, // AlterTableSpecListOpt (1x) + 57996: 715, // AsOpt (1x) + 58001: 716, // BetweenOrNotOp (1x) + 58003: 717, // BitValueType (1x) + 58004: 718, // BlobType (1x) + 58006: 719, // BooleanType (1x) + 58010: 720, // Char (1x) + 58017: 721, // ColumnFormat (1x) + 58020: 722, // ColumnNameList (1x) + 58021: 723, // ColumnNameListOpt (1x) + 58026: 724, // ColumnSetValueList (1x) + 58029: 725, // CompareOp (1x) + 58031: 726, // ConstraintElem (1x) + 58039: 727, // DatabaseOptionList (1x) + 58040: 728, // DatabaseOptionListOpt (1x) + 57390: 729, // databases (1x) + 58042: 730, // DateAndTimeType (1x) + 58043: 731, // DefaultFalseDistinctOpt (1x) + 58046: 732, // DefaultValueExpr (1x) + 58048: 733, // DistinctKwd (1x) + 58049: 734, // DistinctOpt (1x) + 57406: 735, // dual (1x) + 58056: 736, // EnforcedOrNotOrNotNullOpt (1x) + 57345: 737, // error (1x) + 58060: 738, // ExplainFormatType (1x) + 58073: 739, // FieldList (1x) + 58076: 740, // FixedPointType (1x) + 58078: 741, // FloatingPointType (1x) + 57417: 742, // foreign (1x) + 58079: 743, // FromDual (1x) + 58080: 744, // FromOrIn (1x) + 58081: 745, // FuncDatetimePrec (1x) + 58093: 746, // GlobalScope (1x) + 58094: 747, // GroupByClause (1x) + 58095: 748, // HavingClause (1x) + 57352: 749, // hintBegin (1x) + 58096: 750, // HintMemoryQuota (1x) + 58097: 751, // HintQueryType (1x) + 58100: 752, // HintStorageTypeAndTableList (1x) + 58111: 753, // IndexHintScope (1x) + 58114: 754, // IndexKeyTypeOpt (1x) + 58125: 755, // IndexTypeOpt (1x) + 58107: 756, // InOrNotOp (1x) + 58128: 757, // IntegerType (1x) + 58130: 758, // IsOrNotOp (1x) + 58137: 759, // LikeTableWithOrWithoutParen (1x) + 58138: 760, // LimitClause (1x) + 58142: 761, // NChar (1x) + 58150: 762, // NumericType (1x) + 58144: 763, // NVarchar (1x) + 58151: 764, // OptBinMod (1x) + 58157: 765, // OptFull (1x) + 58163: 766, // OptimizerHintList (1x) + 58164: 767, // OptionalBraces (1x) + 58160: 768, // OptTable (1x) + 57485: 769, // parser (1x) + 57486: 770, // precisionType (1x) + 58174: 771, // QuickOptional (1x) + 58181: 772, // SelectStmtCalcFoundRows (1x) + 58182: 773, // SelectStmtFieldList (1x) + 58185: 774, // SelectStmtGroup (1x) + 58187: 775, // SelectStmtOpts (1x) + 58188: 776, // SelectStmtSQLBigResult (1x) + 58189: 777, // SelectStmtSQLBufferResult (1x) + 58190: 778, // SelectStmtSQLCache (1x) + 58191: 779, // SelectStmtSQLSmallResult (1x) + 58192: 780, // SelectStmtStraightJoin (1x) + 58195: 781, // ShowDatabaseNameOpt (1x) + 58197: 782, // ShowLikeOrWhereOpt (1x) + 58200: 783, // ShowTargetFilterable (1x) + 57510: 784, // spatial (1x) + 58204: 785, // Start (1x) + 58206: 786, // StatementList (1x) + 58207: 787, // StorageMedia (1x) + 57519: 788, // stored (1x) + 58212: 789, // StringType (1x) + 58221: 790, // TableElementListOpt (1x) + 58228: 791, // TableOptimizerHints (1x) + 58229: 792, // TableOrTables (1x) + 58232: 793, // TableRefsClause (1x) + 58233: 794, // TextType (1x) + 58236: 795, // Type (1x) + 57534: 796, // update (1x) + 58241: 797, // Values (1x) + 58243: 798, // ValuesOpt (1x) + 58247: 799, // VariableAssignmentList (1x) + 57547: 800, // virtual (1x) + 58249: 801, // VirtualOrStored (1x) + 58254: 802, // Year (1x) + 57988: 803, // $default (0x) + 57955: 804, // andnot (0x) + 57995: 805, // AnyOrAll (0x) + 57997: 806, // Assignment (0x) + 57998: 807, // AssignmentList (0x) + 57999: 808, // AssignmentListOpt (0x) + 57370: 809, // both (0x) + 57924: 810, // builtinAddDate (0x) + 57925: 811, // builtinBitAnd (0x) + 57926: 812, // builtinBitOr (0x) + 57927: 813, // builtinBitXor (0x) + 57928: 814, // builtinCast (0x) + 57932: 815, // builtinDateAdd (0x) + 57933: 816, // builtinDateSub (0x) + 57934: 817, // builtinExtract (0x) + 57935: 818, // builtinGroupConcat (0x) + 57944: 819, // builtinStddevPop (0x) + 57945: 820, // builtinStddevSamp (0x) + 57940: 821, // builtinSubDate (0x) + 57948: 822, // builtinVarPop (0x) + 57949: 823, // builtinVarSamp (0x) + 57373: 824, // caseKwd (0x) + 58009: 825, // CastType (0x) + 58013: 826, // CharsetNameOrDefault (0x) + 58016: 827, // ColumnDefList (0x) + 58027: 828, // CommaOpt (0x) + 57975: 829, // createTableSelect (0x) + 57383: 830, // cross (0x) + 57391: 831, // dayHour (0x) + 57392: 832, // dayMicrosecond (0x) + 57393: 833, // dayMinute (0x) + 57394: 834, // daySecond (0x) + 58045: 835, // DefaultTrueDistinctOpt (0x) + 57407: 836, // elseKwd (0x) + 57968: 837, // empty (0x) + 57408: 838, // enclosed (0x) + 57409: 839, // escaped (0x) + 57412: 840, // except (0x) + 58068: 841, // ExpressionOpt (0x) + 58088: 842, // FunctionNameDateArith (0x) + 58089: 843, // FunctionNameDateArithMultiForms (0x) + 57421: 844, // grant (0x) + 57987: 845, // higherThanComma (0x) + 57425: 846, // hourMicrosecond (0x) + 57426: 847, // hourMinute (0x) + 57427: 848, // hourSecond (0x) + 58122: 849, // IndexPartSpecificationListOpt (0x) + 57432: 850, // infile (0x) + 57973: 851, // insertValues (0x) + 57351: 852, // invalid (0x) + 58132: 853, // JoinType (0x) + 57960: 854, // jss (0x) + 57961: 855, // juss (0x) + 57448: 856, // kill (0x) + 57449: 857, // language (0x) + 57450: 858, // leading (0x) + 58136: 859, // LikeEscapeOpt (0x) + 57455: 860, // linear (0x) + 57454: 861, // lines (0x) + 57456: 862, // load (0x) + 58141: 863, // LocationLabelList (0x) + 57459: 864, // lock (0x) + 57976: 865, // lowerThanCharsetKwd (0x) + 57986: 866, // lowerThanComma (0x) + 57974: 867, // lowerThanCreateTableSelect (0x) + 57983: 868, // lowerThanEq (0x) + 57972: 869, // lowerThanInsertValues (0x) + 57969: 870, // lowerThanIntervalKeyword (0x) + 57977: 871, // lowerThanKey (0x) + 57978: 872, // lowerThanLocal (0x) + 57985: 873, // lowerThanNot (0x) + 57982: 874, // lowerThanOn (0x) + 57979: 875, // lowerThanRemove (0x) + 57971: 876, // lowerThanSetKeyword (0x) + 57970: 877, // lowerThanStringLitToken (0x) + 57980: 878, // lowerThenOrder (0x) + 57463: 879, // match (0x) + 57464: 880, // maxValue (0x) + 57468: 881, // minuteMicrosecond (0x) + 57469: 882, // minuteSecond (0x) + 57555: 883, // natural (0x) + 57984: 884, // neg (0x) + 57472: 885, // noWriteToBinLog (0x) + 57356: 886, // odbcDateType (0x) + 57358: 887, // odbcTimestampType (0x) + 57357: 888, // odbcTimeType (0x) + 58155: 889, // OptCollate (0x) + 58158: 890, // OptGConcatSeparator (0x) + 57477: 891, // optimize (0x) + 58159: 892, // OptInteger (0x) + 57478: 893, // option (0x) + 57479: 894, // optionally (0x) + 58162: 895, // OptWild (0x) + 57482: 896, // outer (0x) + 58168: 897, // OuterOpt (0x) + 57483: 898, // packKeys (0x) + 57484: 899, // partition (0x) + 57355: 900, // pipes (0x) + 57490: 901, // preSplitRegions (0x) + 57488: 902, // procedure (0x) + 57491: 903, // rangeKwd (0x) + 57492: 904, // read (0x) + 57494: 905, // references (0x) + 57495: 906, // regexpKwd (0x) + 57499: 907, // require (0x) + 57501: 908, // revoke (0x) + 57503: 909, // rlike (0x) + 57505: 910, // secondMicrosecond (0x) + 57489: 911, // shardRowIDBits (0x) + 58196: 912, // ShowIndexKwd (0x) + 58199: 913, // ShowTableAliasOpt (0x) + 57511: 914, // sql (0x) + 57515: 915, // ssl (0x) + 57516: 916, // starting (0x) + 58216: 917, // TableAliasRefList (0x) + 58225: 918, // TableNameListOpt (0x) + 58226: 919, // TableNameOptWild (0x) + 57981: 920, // tableRefPriority (0x) + 57520: 921, // terminated (0x) + 57521: 922, // then (0x) + 57526: 923, // trailing (0x) + 57527: 924, // trigger (0x) + 57530: 925, // union (0x) + 57531: 926, // unlock (0x) + 57533: 927, // until (0x) + 57535: 928, // usage (0x) + 57548: 929, // when (0x) + 58252: 930, // WithValidation (0x) + 58253: 931, // WithValidationOpt (0x) + 57550: 932, // write (0x) + 57553: 933, // yearMonth (0x) + } + + yySymNames = []string{ + "comment", + "serial", + "autoIncrement", + "autoRandom", + "columnFormat", + "storage", + "$end", + "';'", + "')'", + "','", + "signed", + "charsetKwd", + "hintAggToCop", + "hintEnablePlanCache", + "hintHASHAGG", + "hintHJ", + "hintIgnoreIndex", + "hintINLHJ", + "hintINLJ", + "hintINLMJ", + "hintMemoryQuota", + "hintNoIndexMerge", + "hintNSJI", + "hintQBName", + "hintQueryType", + "hintReadConsistentReplica", + "hintReadFromStorage", + "hintSJI", + "hintSMJ", + "hintSTREAMAGG", + "hintUseIndex", + "hintUseIndexMerge", + "hintUsePlanCache", + "hintUseToja", + "maxExecutionTime", + "tp", + "invisible", + "visible", + "keyBlockSize", + "ascii", + "byteType", + "unicodeSym", + "encryption", + "tables", + "enforced", + "btree", + "format", + "hash", + "rtree", + "value", + "variables", + "hintTiFlash", + "hintTiKV", + "offset", + "processlist", + "unknown", + "admin", + "begin", + "commit", + "disable", + "discard", + "enable", + "fixed", + "hintOLAP", + "hintOLTP", + "importKwd", + "jsonType", + "modify", + "quick", + "rollback", + "secondaryLoad", + "secondaryUnload", + "start", + "tablespace", + "temporary", + "truncate", + "validation", + "without", + "always", + "bitType", + "booleanType", + "boolType", + "datetimeType", + "dateType", + "ddl", + "disk", + "dynamic", + "enum", + "full", + "global", + "identSQLErrors", + "jobs", + "memory", + "national", + "ncharType", + "session", + "sqlTsiYear", + "textType", + "timestampType", + "timeType", + "traditional", + "transaction", + "warnings", + "yearType", + "account", + "action", + "addDate", + "advise", + "after", + "against", + "algorithm", + "any", + "avg", + "avgRowLength", + "binding", + "bindings", + "binlog", + "bitAnd", + "bitOr", + "bitXor", + "block", + "bound", + "buckets", + "builtins", + "cache", + "cancel", + "capture", + "cascaded", + "cast", + "checksum", + "cipher", + "cleanup", + "client", + "cmSketch", + "coalesce", + "collation", + "columns", + "committed", + "compact", + "compressed", + "compression", + "connection", + "consistent", + "context", + "copyKwd", + "count", + "cpu", + "current", + "curTime", + "cycle", + "data", + "dateAdd", + "dateSub", + "day", + "deallocate", + "definer", + "delayKeyWrite", + "depth", + "directory", + "do", + "drainer", + "duplicate", + "end", + "engine", + "engines", + "escape", + "event", + "events", + "evolve", + "exact", + "exchange", + "exclusive", + "execute", + "expansion", + "expire", + "exprPushdownBlacklist", + "extended", + "extract", + "faultsSym", + "fields", + "first", + "flashback", + "flush", + "following", + "function", + "getFormat", + "grants", + "groupConcat", + "history", + "hosts", + "hour", + "identified", + "identifier", + "increment", + "incremental", + "indexes", + "inplace", + "insertMethod", + "instant", + "internal", + "invoker", + "io", + "ipc", + "isolation", + "issuer", + "job", + "labels", + "last", + "less", + "level", + "list", + "local", + "location", + "logs", + "master", + "max", + "max_idxnum", + "max_minutes", + "maxConnectionsPerHour", + "maxQueriesPerHour", + "maxRows", + "maxUpdatesPerHour", + "maxUserConnections", + "merge", + "microsecond", + "min", + "minRows", + "minute", + "minValue", + "mode", + "month", + "names", + "never", + "next_row_id", + "no", + "nocache", + "nocycle", + "nodegroup", + "nodeID", + "nodeState", + "nomaxvalue", + "nominvalue", + "none", + "noorder", + "now", + "nowait", + "nulls", + "only", + "open", + "optimistic", + "optRuleBlacklist", + "pageSym", + "partial", + "partitioning", + "partitions", + "password", + "per_db", + "per_table", + "pessimistic", + "plugins", + "position", + "preceding", + "prepare", + "privileges", + "process", + "profile", + "profiles", + "pump", + "quarter", + "queries", + "query", + "rebuild", + "recent", + "recover", + "redundant", + "region", + "regions", + "reload", + "remove", + "reorganize", + "repair", + "repeatable", + "replica", + "replication", + "respect", + "reverse", + "role", + "routine", + "rowCount", + "rowFormat", + "samples", + "second", + "secondaryEngine", + "security", + "separator", + "sequence", + "serializable", + "share", + "shared", + "shutdown", + "simple", + "slave", + "slow", + "snapshot", + "some", + "source", + "split", + "sqlBufferResult", + "sqlCache", + "sqlNoCache", + "sqlTsiDay", + "sqlTsiHour", + "sqlTsiMinute", + "sqlTsiMonth", + "sqlTsiQuarter", + "sqlTsiSecond", + "sqlTsiWeek", + "staleness", + "stats", + "statsAutoRecalc", + "statsBuckets", + "statsHealthy", + "statsHistograms", + "statsMeta", + "statsPersistent", + "statsSamplePages", + "status", + "std", + "stddev", + "stddevPop", + "stddevSamp", + "strong", + "subDate", + "subject", + "subpartition", + "subpartitions", + "substring", + "sum", + "super", + "swaps", + "switchesSym", + "systemTime", + "tableChecksum", + "temptable", + "than", + "tidb", + "timestampAdd", + "timestampDiff", + "tokudbDefault", + "tokudbFast", + "tokudbLzma", + "tokudbQuickLZ", + "tokudbSmall", + "tokudbSnappy", + "tokudbUncompressed", + "tokudbZlib", + "top", + "topn", + "trace", + "triggers", + "trim", + "unbounded", + "uncommitted", + "undefined", + "user", + "variance", + "varPop", + "varSamp", + "view", + "week", + "width", + "x509", + "not", + "'('", + "defaultKwd", + "as", + "null", + "collate", + "stringLit", + "'+'", + "'-'", + "mod", + "key", + "primary", + "limit", + "on", + "order", + "check", + "unique", + "constraint", + "generated", + "using", + "and", + "andand", + "having", + "or", + "pipesAsOr", + "xor", + "from", + "group", + "'.'", + "'*'", + "'}'", + "eq", + "singleAtIdentifier", + "ifKwd", + "intLit", + "desc", + "asc", + "forKwd", + "replace", + "'<'", + "'>'", + "ge", + "is", + "le", + "neq", + "neqSynonym", + "nulleq", + "falseKwd", + "trueKwd", + "'%'", + "'&'", + "'/'", + "'^'", + "'|'", + "div", + "lsh", + "rsh", + "values", + "decLit", + "floatLit", + "in", + "database", + "between", + "bitLit", + "builtinNow", + "currentTs", + "doubleAtIdentifier", + "hexLit", + "localTime", + "localTs", + "underscoreCS", + "'!'", + "'~'", + "builtinCount", + "builtinCurDate", + "builtinCurTime", + "builtinMax", + "builtinMin", + "builtinPosition", + "builtinSubstring", + "builtinSum", + "builtinSysDate", + "builtinTrim", + "builtinUser", + "convert", + "currentDate", + "currentRole", + "currentTime", + "currentUser", + "interval", + "left", + "not2", + "repeat", + "right", + "row", + "utcDate", + "utcTime", + "utcTimestamp", + "character", + "charType", + "binaryType", + "where", + "with", + "index", + "join", + "inner", + "selectKwd", + "force", + "set", + "use", + "assignmentEq", + "ignore", + "drop", + "cascade", + "fulltext", + "restrict", + "']'", + "varcharacter", + "varcharType", + "alter", + "to", + "varbinaryType", + "add", + "bigIntType", + "blobType", + "change", + "decimalType", + "doubleType", + "floatType", + "int1Type", + "int2Type", + "int3Type", + "int4Type", + "int8Type", + "integerType", + "intType", + "like", + "long", + "longblobType", + "longtextType", + "mediumblobType", + "mediumIntType", + "mediumtextType", + "numericType", + "nvarcharType", + "realType", + "rename", + "smallIntType", + "tinyblobType", + "tinyIntType", + "tinytextType", + "Identifier", + "NotKeywordToken", + "TiDBKeyword", + "UnReservedKeyword", + "Literal", + "SimpleIdent", + "StringLiteral", + "FunctionCallGeneric", + "FunctionCallKeyword", + "FunctionCallNonKeyword", + "FunctionNameConflict", + "FunctionNameDatetimePrecision", + "FunctionNameOptionalBraces", + "SimpleExpr", + "SumExpr", + "SystemVariable", + "UserVariable", + "Variable", + "BitExpr", + "PredicateExpr", + "BoolPri", + "Expression", + "unsigned", + "zerofill", + "logAnd", + "logOr", + "'{'", + "hintEnd", + "straightJoin", + "QueryBlockOpt", + "sqlCalcFoundRows", + "ColumnName", + "TableName", + "FieldLen", + "sqlBigResult", + "sqlSmallResult", + "CharsetKw", + "delayed", + "highPriority", + "lowPriority", + "HintTable", + "NUM", + "OptFieldLen", + "SelectStmt", + "SelectStmtBasic", + "SelectStmtFromDualTable", + "SelectStmtFromTable", + "deleteKwd", + "insert", + "OptBinary", + "tableKwd", + "HintTableList", + "IfExists", + "KeyOrIndex", + "LengthNum", + "ConstraintKeywordOpt", + "ExprOrDefault", + "into", + "StringName", + "varying", + "column", + "ColumnDef", + "EqOrAssignmentEq", + "ExpressionList", + "IfNotExists", + "IndexInvisible", + "IndexPartSpecification", + "IndexType", + "ColumnKeywordOpt", + "DBName", + "DeleteFromStmt", + "FieldOpt", + "FieldOpts", + "IndexOption", + "IndexOptionList", + "IndexPartSpecificationList", + "InsertIntoStmt", + "JoinTable", + "ReplaceIntoStmt", + "TableFactor", + "TableRef", + "VariableName", + "WhereClause", + "WhereClauseOptional", + "all", + "by", + "CharsetName", + "Constraint", + "distinct", + "distinctRow", + "EqOpt", + "IndexName", + "IndexNameList", + "IndexTypeName", + "LimitOption", + "OrderBy", + "OrderByOptional", + "PriorityOpt", + "SetExpr", + "'['", + "ByItem", + "ColumnOption", + "create", + "CrossOpt", + "EnforcedOrNot", + "EscapedTableRef", + "ExplainableStmt", + "ExpressionListOpt", + "GeneratedAlways", + "IndexHint", + "IndexHintType", + "IndexNameAndTypeOpt", + "OptCharset", + "OptCharsetWithOptBinary", + "Order", + "PrimaryOpt", + "RowValue", + "SelectStmtLimit", + "show", + "StorageOptimizerHintOpt", + "TableAsName", + "TableElement", + "TableOptimizerHintOpt", + "ValueSym", + "AdminStmt", + "AlterTableSpec", + "AlterTableStmt", + "analyze", + "AnalyzeTableStmt", + "BeginTransactionStmt", + "ByList", + "CollationName", + "ColumnOptionList", + "ColumnOptionListOpt", + "ColumnSetValue", + "CommitStmt", + "CreateDatabaseStmt", + "CreateIndexStmt", + "CreateTableStmt", + "DatabaseOption", + "DatabaseSym", + "DefaultKwdOpt", + "describe", + "DropDatabaseStmt", + "DropIndexStmt", + "DropTableStmt", + "EmptyStmt", + "EnforcedOrNotOpt", + "exists", + "explain", + "ExplainStmt", + "ExplainSym", + "Field", + "FieldAsName", + "FieldAsNameOpt", + "FloatOpt", + "FuncDatetimePrecList", + "FuncDatetimePrecListOpt", + "HintStorageType", + "HintStorageTypeAndTable", + "HintTrueOrFalse", + "IndexHintList", + "IndexHintListOpt", + "InsertValues", + "IntoOpt", + "KeyOrIndexOpt", + "keys", + "NowSym", + "NowSymFunc", + "NowSymOptionFraction", + "NumLiteral", + "OptTemporary", + "Precision", + "RestrictOrCascadeOpt", + "RollbackStmt", + "SetStmt", + "ShowStmt", + "SignedLiteral", + "Statement", + "StringList", + "Symbol", + "TableAsNameOpt", + "TableElementList", + "TableNameList", + "TableRefs", + "TruncateTableStmt", + "UseStmt", + "ValuesList", + "Varchar", + "VariableAssignment", + "AlterTableSpecList", + "AlterTableSpecListOpt", + "AsOpt", + "BetweenOrNotOp", + "BitValueType", + "BlobType", + "BooleanType", + "Char", + "ColumnFormat", + "ColumnNameList", + "ColumnNameListOpt", + "ColumnSetValueList", + "CompareOp", + "ConstraintElem", + "DatabaseOptionList", + "DatabaseOptionListOpt", + "databases", + "DateAndTimeType", + "DefaultFalseDistinctOpt", + "DefaultValueExpr", + "DistinctKwd", + "DistinctOpt", + "dual", + "EnforcedOrNotOrNotNullOpt", + "error", + "ExplainFormatType", + "FieldList", + "FixedPointType", + "FloatingPointType", + "foreign", + "FromDual", + "FromOrIn", + "FuncDatetimePrec", + "GlobalScope", + "GroupByClause", + "HavingClause", + "hintBegin", + "HintMemoryQuota", + "HintQueryType", + "HintStorageTypeAndTableList", + "IndexHintScope", + "IndexKeyTypeOpt", + "IndexTypeOpt", + "InOrNotOp", + "IntegerType", + "IsOrNotOp", + "LikeTableWithOrWithoutParen", + "LimitClause", + "NChar", + "NumericType", + "NVarchar", + "OptBinMod", + "OptFull", + "OptimizerHintList", + "OptionalBraces", + "OptTable", + "parser", + "precisionType", + "QuickOptional", + "SelectStmtCalcFoundRows", + "SelectStmtFieldList", + "SelectStmtGroup", + "SelectStmtOpts", + "SelectStmtSQLBigResult", + "SelectStmtSQLBufferResult", + "SelectStmtSQLCache", + "SelectStmtSQLSmallResult", + "SelectStmtStraightJoin", + "ShowDatabaseNameOpt", + "ShowLikeOrWhereOpt", + "ShowTargetFilterable", + "spatial", + "Start", + "StatementList", + "StorageMedia", + "stored", + "StringType", + "TableElementListOpt", + "TableOptimizerHints", + "TableOrTables", + "TableRefsClause", + "TextType", + "Type", + "update", + "Values", + "ValuesOpt", + "VariableAssignmentList", + "virtual", + "VirtualOrStored", + "Year", + "$default", + "andnot", + "AnyOrAll", + "Assignment", + "AssignmentList", + "AssignmentListOpt", + "both", + "builtinAddDate", + "builtinBitAnd", + "builtinBitOr", + "builtinBitXor", + "builtinCast", + "builtinDateAdd", + "builtinDateSub", + "builtinExtract", + "builtinGroupConcat", + "builtinStddevPop", + "builtinStddevSamp", + "builtinSubDate", + "builtinVarPop", + "builtinVarSamp", + "caseKwd", + "CastType", + "CharsetNameOrDefault", + "ColumnDefList", + "CommaOpt", + "createTableSelect", + "cross", + "dayHour", + "dayMicrosecond", + "dayMinute", + "daySecond", + "DefaultTrueDistinctOpt", + "elseKwd", + "empty", + "enclosed", + "escaped", + "except", + "ExpressionOpt", + "FunctionNameDateArith", + "FunctionNameDateArithMultiForms", + "grant", + "higherThanComma", + "hourMicrosecond", + "hourMinute", + "hourSecond", + "IndexPartSpecificationListOpt", + "infile", + "insertValues", + "invalid", + "JoinType", + "jss", + "juss", + "kill", + "language", + "leading", + "LikeEscapeOpt", + "linear", + "lines", + "load", + "LocationLabelList", + "lock", + "lowerThanCharsetKwd", + "lowerThanComma", + "lowerThanCreateTableSelect", + "lowerThanEq", + "lowerThanInsertValues", + "lowerThanIntervalKeyword", + "lowerThanKey", + "lowerThanLocal", + "lowerThanNot", + "lowerThanOn", + "lowerThanRemove", + "lowerThanSetKeyword", + "lowerThanStringLitToken", + "lowerThenOrder", + "match", + "maxValue", + "minuteMicrosecond", + "minuteSecond", + "natural", + "neg", + "noWriteToBinLog", + "odbcDateType", + "odbcTimestampType", + "odbcTimeType", + "OptCollate", + "OptGConcatSeparator", + "optimize", + "OptInteger", + "option", + "optionally", + "OptWild", + "outer", + "OuterOpt", + "packKeys", + "partition", + "pipes", + "preSplitRegions", + "procedure", + "rangeKwd", + "read", + "references", + "regexpKwd", + "require", + "revoke", + "rlike", + "secondMicrosecond", + "shardRowIDBits", + "ShowIndexKwd", + "ShowTableAliasOpt", + "sql", + "ssl", + "starting", + "TableAliasRefList", + "TableNameListOpt", + "TableNameOptWild", + "tableRefPriority", + "terminated", + "then", + "trailing", + "trigger", + "union", + "unlock", + "until", + "usage", + "when", + "WithValidation", + "WithValidationOpt", + "write", + "yearMonth", + } + + yyReductions = []struct{ xsym, components int }{ + {0, 1}, + {785, 1}, + {649, 4}, + {863, 0}, + {863, 3}, + {648, 4}, + {648, 6}, + {648, 2}, + {648, 5}, + {648, 3}, + {648, 2}, + {648, 2}, + {648, 4}, + {648, 5}, + {648, 2}, + {648, 2}, + {648, 4}, + {648, 5}, + {648, 6}, + {648, 8}, + {648, 5}, + {648, 5}, + {648, 5}, + {648, 1}, + {648, 2}, + {648, 2}, + {648, 1}, + {648, 1}, + {648, 4}, + {648, 3}, + {648, 4}, + {931, 0}, + {931, 1}, + {930, 2}, + {930, 2}, + {576, 1}, + {576, 1}, + {688, 0}, + {688, 1}, + {591, 0}, + {591, 1}, + {714, 0}, + {714, 1}, + {713, 1}, + {713, 3}, + {578, 0}, + {578, 1}, + {578, 2}, + {703, 1}, + {651, 3}, + {806, 3}, + {807, 1}, + {807, 3}, + {808, 0}, + {808, 1}, + {652, 1}, + {652, 2}, + {827, 1}, + {827, 3}, + {584, 3}, + {584, 3}, + {554, 1}, + {554, 3}, + {554, 5}, + {722, 1}, + {722, 3}, + {723, 0}, + {723, 1}, + {658, 1}, + {638, 0}, + {638, 1}, + {627, 1}, + {627, 2}, + {670, 0}, + {670, 1}, + {736, 2}, + {736, 1}, + {624, 2}, + {624, 1}, + {624, 1}, + {624, 2}, + {624, 1}, + {624, 2}, + {624, 2}, + {624, 3}, + {624, 3}, + {624, 2}, + {624, 6}, + {624, 6}, + {624, 2}, + {624, 2}, + {624, 2}, + {624, 2}, + {787, 1}, + {787, 1}, + {787, 1}, + {721, 1}, + {721, 1}, + {721, 1}, + {631, 0}, + {631, 2}, + {801, 0}, + {801, 1}, + {801, 1}, + {655, 1}, + {655, 2}, + {656, 0}, + {656, 1}, + {726, 7}, + {726, 7}, + {726, 7}, + {726, 7}, + {726, 5}, + {732, 1}, + {732, 1}, + {692, 1}, + {692, 3}, + {692, 4}, + {691, 1}, + {691, 1}, + {691, 1}, + {691, 1}, + {690, 1}, + {690, 1}, + {690, 1}, + {700, 1}, + {700, 2}, + {700, 2}, + {693, 1}, + {693, 1}, + {693, 1}, + {660, 12}, + {849, 0}, + {849, 3}, + {598, 1}, + {598, 3}, + {589, 3}, + {589, 4}, + {754, 0}, + {754, 1}, + {754, 1}, + {754, 1}, + {659, 5}, + {592, 1}, + {662, 4}, + {662, 4}, + {662, 4}, + {728, 0}, + {728, 1}, + {727, 1}, + {727, 2}, + {661, 7}, + {661, 6}, + {664, 0}, + {664, 1}, + {715, 0}, + {715, 1}, + {759, 2}, + {759, 4}, + {593, 10}, + {663, 1}, + {666, 4}, + {667, 6}, + {668, 6}, + {694, 0}, + {694, 1}, + {696, 0}, + {696, 1}, + {696, 1}, + {792, 1}, + {792, 1}, + {613, 0}, + {613, 1}, + {669, 0}, + {674, 1}, + {674, 1}, + {674, 1}, + {673, 2}, + {673, 5}, + {673, 5}, + {738, 1}, + {738, 1}, + {577, 1}, + {564, 1}, + {544, 3}, + {544, 3}, + {544, 3}, + {544, 3}, + {544, 2}, + {544, 3}, + {544, 1}, + {548, 1}, + {548, 1}, + {547, 1}, + {547, 1}, + {586, 1}, + {586, 3}, + {630, 0}, + {630, 1}, + {680, 0}, + {680, 1}, + {679, 1}, + {543, 3}, + {543, 3}, + {543, 5}, + {543, 1}, + {725, 1}, + {725, 1}, + {725, 1}, + {725, 1}, + {725, 1}, + {725, 1}, + {725, 1}, + {725, 1}, + {716, 1}, + {716, 2}, + {758, 1}, + {758, 2}, + {756, 1}, + {756, 2}, + {805, 1}, + {805, 1}, + {805, 1}, + {542, 5}, + {542, 5}, + {542, 1}, + {859, 0}, + {859, 2}, + {675, 1}, + {675, 3}, + {675, 5}, + {675, 2}, + {675, 5}, + {677, 0}, + {677, 1}, + {676, 1}, + {676, 2}, + {676, 1}, + {676, 2}, + {739, 1}, + {739, 3}, + {747, 3}, + {748, 0}, + {748, 2}, + {575, 0}, + {575, 2}, + {587, 0}, + {587, 3}, + {614, 0}, + {614, 1}, + {597, 0}, + {597, 2}, + {596, 3}, + {596, 1}, + {596, 3}, + {596, 2}, + {596, 1}, + {634, 1}, + {634, 3}, + {634, 3}, + {755, 0}, + {755, 1}, + {590, 2}, + {590, 2}, + {616, 1}, + {616, 1}, + {616, 1}, + {588, 1}, + {588, 1}, + {523, 1}, + {523, 1}, + {523, 1}, + {523, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {526, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {525, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {524, 1}, + {599, 5}, + {687, 0}, + {687, 1}, + {686, 5}, + {686, 4}, + {686, 6}, + {686, 2}, + {686, 3}, + {686, 1}, + {686, 2}, + {646, 1}, + {646, 1}, + {710, 1}, + {710, 3}, + {639, 3}, + {798, 0}, + {798, 1}, + {797, 3}, + {797, 1}, + {579, 1}, + {579, 1}, + {657, 3}, + {724, 0}, + {724, 1}, + {724, 3}, + {601, 5}, + {527, 1}, + {527, 1}, + {527, 1}, + {527, 1}, + {527, 1}, + {527, 1}, + {527, 1}, + {527, 2}, + {527, 1}, + {527, 1}, + {529, 1}, + {529, 2}, + {618, 3}, + {653, 1}, + {653, 3}, + {623, 2}, + {637, 0}, + {637, 1}, + {637, 1}, + {619, 0}, + {619, 1}, + {541, 3}, + {541, 3}, + {541, 3}, + {541, 3}, + {541, 3}, + {541, 3}, + {541, 3}, + {541, 3}, + {541, 3}, + {541, 3}, + {541, 3}, + {541, 3}, + {541, 1}, + {528, 1}, + {528, 3}, + {528, 4}, + {528, 5}, + {536, 1}, + {536, 1}, + {536, 1}, + {536, 1}, + {536, 3}, + {536, 1}, + {536, 1}, + {536, 1}, + {536, 2}, + {536, 2}, + {536, 2}, + {536, 2}, + {536, 2}, + {536, 3}, + {536, 5}, + {536, 6}, + {536, 6}, + {536, 4}, + {536, 4}, + {733, 1}, + {733, 1}, + {734, 1}, + {734, 1}, + {731, 0}, + {731, 1}, + {835, 0}, + {835, 1}, + {533, 1}, + {533, 1}, + {533, 1}, + {533, 1}, + {533, 1}, + {533, 1}, + {533, 1}, + {533, 1}, + {533, 1}, + {533, 1}, + {533, 1}, + {533, 1}, + {533, 1}, + {533, 1}, + {533, 1}, + {533, 1}, + {533, 1}, + {533, 1}, + {533, 1}, + {533, 1}, + {533, 1}, + {533, 1}, + {533, 1}, + {533, 1}, + {533, 1}, + {533, 1}, + {533, 1}, + {533, 1}, + {533, 1}, + {767, 0}, + {767, 2}, + {535, 1}, + {535, 1}, + {535, 1}, + {535, 1}, + {534, 1}, + {534, 1}, + {534, 1}, + {534, 1}, + {534, 1}, + {534, 1}, + {531, 4}, + {531, 4}, + {531, 2}, + {531, 3}, + {531, 2}, + {531, 6}, + {532, 4}, + {532, 4}, + {532, 6}, + {532, 6}, + {532, 6}, + {532, 8}, + {532, 8}, + {532, 4}, + {532, 6}, + {842, 1}, + {842, 1}, + {843, 1}, + {843, 1}, + {537, 4}, + {537, 4}, + {537, 4}, + {537, 4}, + {537, 4}, + {537, 4}, + {890, 0}, + {890, 2}, + {530, 4}, + {745, 0}, + {745, 2}, + {745, 3}, + {841, 0}, + {841, 1}, + {825, 2}, + {825, 3}, + {825, 1}, + {825, 2}, + {825, 2}, + {825, 2}, + {825, 2}, + {825, 2}, + {825, 1}, + {825, 1}, + {825, 2}, + {825, 1}, + {620, 0}, + {620, 1}, + {620, 1}, + {620, 1}, + {555, 1}, + {555, 3}, + {706, 1}, + {706, 3}, + {919, 2}, + {919, 4}, + {917, 1}, + {917, 3}, + {895, 0}, + {895, 2}, + {771, 0}, + {771, 1}, + {697, 1}, + {567, 3}, + {568, 3}, + {569, 6}, + {566, 3}, + {566, 3}, + {566, 3}, + {743, 2}, + {793, 1}, + {707, 1}, + {707, 3}, + {628, 1}, + {628, 4}, + {603, 1}, + {603, 1}, + {602, 3}, + {602, 4}, + {602, 3}, + {704, 0}, + {704, 1}, + {643, 1}, + {643, 2}, + {633, 2}, + {633, 2}, + {633, 2}, + {753, 0}, + {753, 2}, + {753, 3}, + {753, 3}, + {632, 5}, + {615, 0}, + {615, 1}, + {615, 3}, + {615, 1}, + {615, 3}, + {684, 1}, + {684, 2}, + {685, 0}, + {685, 1}, + {600, 3}, + {853, 1}, + {853, 1}, + {897, 0}, + {897, 1}, + {626, 1}, + {626, 2}, + {760, 0}, + {760, 2}, + {617, 1}, + {640, 0}, + {640, 2}, + {640, 4}, + {640, 4}, + {775, 9}, + {791, 0}, + {791, 3}, + {791, 3}, + {766, 1}, + {766, 1}, + {766, 2}, + {766, 3}, + {766, 2}, + {766, 3}, + {645, 6}, + {645, 6}, + {645, 5}, + {645, 5}, + {645, 5}, + {645, 5}, + {645, 5}, + {645, 5}, + {645, 5}, + {645, 6}, + {645, 5}, + {645, 5}, + {645, 5}, + {645, 4}, + {645, 5}, + {645, 5}, + {645, 4}, + {645, 4}, + {645, 4}, + {645, 4}, + {645, 4}, + {645, 4}, + {642, 5}, + {752, 1}, + {752, 3}, + {682, 4}, + {552, 0}, + {552, 1}, + {563, 2}, + {563, 4}, + {574, 1}, + {574, 3}, + {683, 1}, + {683, 1}, + {681, 1}, + {681, 1}, + {751, 1}, + {751, 1}, + {750, 2}, + {772, 0}, + {772, 1}, + {776, 0}, + {776, 1}, + {777, 0}, + {777, 1}, + {778, 0}, + {778, 1}, + {778, 1}, + {779, 0}, + {779, 1}, + {780, 0}, + {780, 1}, + {773, 1}, + {774, 0}, + {774, 1}, + {698, 2}, + {621, 1}, + {621, 1}, + {585, 1}, + {585, 1}, + {604, 1}, + {604, 3}, + {712, 3}, + {712, 4}, + {712, 4}, + {712, 4}, + {712, 3}, + {712, 3}, + {826, 1}, + {826, 1}, + {609, 1}, + {609, 1}, + {654, 1}, + {799, 0}, + {799, 1}, + {799, 3}, + {540, 1}, + {540, 1}, + {538, 1}, + {539, 1}, + {647, 3}, + {647, 5}, + {647, 6}, + {699, 3}, + {699, 4}, + {699, 5}, + {699, 3}, + {912, 1}, + {912, 1}, + {912, 1}, + {744, 1}, + {744, 1}, + {783, 1}, + {783, 3}, + {783, 1}, + {783, 1}, + {783, 2}, + {782, 0}, + {782, 2}, + {746, 0}, + {746, 1}, + {746, 1}, + {765, 0}, + {765, 1}, + {781, 0}, + {781, 2}, + {913, 2}, + {918, 0}, + {918, 1}, + {701, 1}, + {701, 1}, + {701, 1}, + {701, 1}, + {701, 1}, + {701, 1}, + {701, 1}, + {701, 1}, + {701, 1}, + {701, 1}, + {701, 1}, + {701, 1}, + {701, 1}, + {701, 1}, + {701, 1}, + {701, 1}, + {701, 1}, + {701, 1}, + {701, 1}, + {701, 1}, + {701, 1}, + {701, 1}, + {629, 1}, + {629, 1}, + {629, 1}, + {629, 1}, + {786, 1}, + {786, 3}, + {610, 2}, + {644, 1}, + {644, 1}, + {705, 1}, + {705, 3}, + {790, 0}, + {790, 3}, + {768, 0}, + {768, 1}, + {708, 3}, + {795, 1}, + {795, 1}, + {795, 1}, + {762, 3}, + {762, 2}, + {762, 3}, + {762, 3}, + {762, 2}, + {757, 1}, + {757, 1}, + {757, 1}, + {757, 1}, + {757, 1}, + {757, 1}, + {757, 1}, + {757, 1}, + {757, 1}, + {757, 1}, + {757, 1}, + {719, 1}, + {719, 1}, + {892, 0}, + {892, 1}, + {892, 1}, + {740, 1}, + {740, 1}, + {740, 1}, + {741, 1}, + {741, 1}, + {741, 1}, + {741, 2}, + {717, 1}, + {789, 3}, + {789, 2}, + {789, 3}, + {789, 2}, + {789, 3}, + {789, 3}, + {789, 2}, + {789, 2}, + {789, 1}, + {789, 2}, + {789, 5}, + {789, 5}, + {789, 1}, + {789, 3}, + {789, 2}, + {720, 1}, + {720, 1}, + {761, 1}, + {761, 2}, + {761, 2}, + {711, 2}, + {711, 2}, + {711, 1}, + {711, 1}, + {763, 2}, + {763, 2}, + {763, 1}, + {763, 2}, + {763, 2}, + {763, 3}, + {763, 3}, + {763, 2}, + {802, 1}, + {802, 1}, + {718, 1}, + {718, 2}, + {718, 1}, + {718, 1}, + {718, 2}, + {794, 1}, + {794, 2}, + {794, 1}, + {794, 1}, + {636, 1}, + {636, 1}, + {636, 1}, + {636, 1}, + {730, 1}, + {730, 2}, + {730, 2}, + {730, 2}, + {730, 3}, + {556, 3}, + {565, 0}, + {565, 1}, + {594, 1}, + {594, 1}, + {594, 1}, + {595, 0}, + {595, 2}, + {678, 0}, + {678, 1}, + {678, 1}, + {695, 5}, + {764, 0}, + {764, 1}, + {572, 0}, + {572, 2}, + {572, 3}, + {635, 0}, + {635, 2}, + {559, 2}, + {559, 1}, + {559, 2}, + {889, 0}, + {889, 2}, + {702, 1}, + {702, 3}, + {581, 1}, + {581, 1}, + {709, 2}, + {605, 2}, + {606, 0}, + {606, 1}, + {828, 0}, + {828, 1}, + } + + yyXErrors = map[yyXError]string{} + + yyParseTab = [1633][]uint16{ + // 0 + {6: 986, 986, 56: 1182, 1164, 1166, 69: 1176, 72: 1165, 75: 1207, 407: 1172, 410: 1175, 478: 1177, 480: 1181, 1208, 484: 1169, 491: 1162, 566: 1201, 1178, 1179, 1180, 1168, 1174, 593: 1190, 599: 1198, 601: 1200, 625: 1167, 641: 1183, 647: 1185, 649: 1186, 1163, 1187, 1188, 658: 1189, 1192, 1193, 1194, 665: 1171, 1195, 1196, 1197, 1184, 672: 1170, 1191, 1173, 697: 1199, 1202, 1203, 701: 1206, 708: 1204, 1205, 785: 1160, 1161}, + {6: 1159}, + {6: 1158, 2790}, + {573: 2708}, + {573: 2706}, + // 5 + {6: 1104, 1104}, + {101: 2705}, + {6: 1091, 1091}, + {74: 2306, 388: 2339, 433: 2302, 475: 1021, 486: 2341, 573: 995, 663: 2342, 694: 2343, 754: 2338, 784: 2340}, + {68: 342, 398: 342, 560: 2197, 2196, 2195, 620: 2326}, + // 10 + {43: 995, 74: 2306, 433: 2302, 475: 2304, 573: 995, 663: 2303, 694: 2305}, + {46: 985, 410: 985, 478: 985, 570: 985, 985}, + {46: 984, 410: 984, 478: 984, 570: 984, 984}, + {46: 983, 410: 983, 478: 983, 570: 983, 983}, + {46: 2290, 410: 1175, 478: 1177, 566: 2291, 1178, 1179, 1180, 1168, 1174, 593: 2292, 599: 2293, 601: 2294, 629: 2289}, + // 15 + {342, 342, 342, 342, 342, 342, 10: 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 560: 2197, 2196, 2195, 580: 342, 620: 2285}, + {342, 342, 342, 342, 342, 342, 10: 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 560: 2197, 2196, 2195, 580: 342, 620: 2237}, + {6: 326, 326}, + {272, 272, 272, 272, 272, 272, 10: 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 376: 272, 378: 272, 272, 272, 272, 400: 272, 272, 404: 272, 272, 272, 410: 272, 419: 272, 272, 429: 272, 272, 272, 433: 272, 435: 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 549: 272, 551: 272, 553: 272, 557: 272, 272, 560: 272, 272, 272, 607: 272, 611: 272, 272, 749: 2042, 775: 2040, 791: 2041}, + {6: 474, 474, 474, 384: 474, 386: 1945, 398: 1969, 618: 1946, 1970, 743: 1968}, + // 20 + {6: 474, 474, 474, 384: 474, 386: 1945, 618: 1946, 1966}, + {6: 474, 474, 474, 384: 474, 386: 1945, 618: 1946, 1947}, + {1309, 1332, 1217, 1442, 1436, 1426, 190, 190, 9: 190, 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1911, 1250, 1485, 1405, 1318, 1319, 1913, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1912, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 404: 1918, 438: 1917, 523: 1915, 1212, 1213, 1211, 604: 1916, 712: 1919, 799: 1914}, + {641: 1901}, + {43: 161, 50: 164, 54: 161, 88: 1597, 1595, 1593, 95: 1596, 102: 1592, 625: 1589, 729: 1591, 746: 1594, 765: 1590, 783: 1588}, + // 25 + {6: 154, 154}, + {6: 153, 153}, + {6: 152, 152}, + {6: 151, 151}, + {6: 150, 150}, + // 30 + {6: 149, 149}, + {6: 148, 148}, + {6: 147, 147}, + {6: 146, 146}, + {6: 145, 145}, + // 35 + {6: 144, 144}, + {6: 143, 143}, + {6: 142, 142}, + {6: 141, 141}, + {6: 140, 140}, + // 40 + {6: 139, 139}, + {6: 138, 138}, + {6: 137, 137}, + {6: 136, 136}, + {6: 135, 135}, + // 45 + {6: 134, 134}, + {6: 133, 133}, + {6: 128, 128}, + {119, 119, 119, 119, 119, 119, 10: 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 573: 1582, 768: 1583}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 1209, 1212, 1213, 1211, 592: 1581}, + // 50 + {6: 1016, 1016, 11: 1016, 42: 1016, 374: 1016, 377: 1016, 470: 1016, 1016, 473: 1016}, + {890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890}, + {889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889}, + {888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888, 888}, + {887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887, 887}, + // 55 + {886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886, 886}, + {885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885, 885}, + {884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884}, + {883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883, 883}, + {882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882, 882}, + // 60 + {881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881, 881}, + {880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880}, + {879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879, 879}, + {878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878, 878}, + {877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877, 877}, + // 65 + {876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, 876}, + {875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875, 875}, + {874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874, 874}, + {873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, 873}, + {872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, 872}, + // 70 + {871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871}, + {870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, 870}, + {869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869, 869}, + {868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868, 868}, + {867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867, 867}, + // 75 + {866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866, 866}, + {865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865, 865}, + {864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864}, + {863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863}, + {862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862, 862}, + // 80 + {861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861}, + {860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860, 860}, + {859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, 859}, + {858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, 858}, + {857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857, 857}, + // 85 + {856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, 856}, + {855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855, 855}, + {854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854, 854}, + {853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853, 853}, + {852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852, 852}, + // 90 + {851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851, 851}, + {850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850, 850}, + {849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849, 849}, + {848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848, 848}, + {847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847, 847}, + // 95 + {846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846, 846}, + {845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845}, + {844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844, 844}, + {843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843, 843}, + {842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842, 842}, + // 100 + {841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841, 841}, + {840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840}, + {839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839, 839}, + {838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838, 838}, + {837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837, 837}, + // 105 + {836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836}, + {835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835}, + {834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834, 834}, + {833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833}, + {832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, 832}, + // 110 + {831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831, 831}, + {830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830, 830}, + {829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829, 829}, + {828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828, 828}, + {827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827, 827}, + // 115 + {826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826, 826}, + {825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825, 825}, + {824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824, 824}, + {823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823, 823}, + {822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, 822}, + // 120 + {821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821, 821}, + {820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820, 820}, + {819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819, 819}, + {818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818, 818}, + {817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817, 817}, + // 125 + {816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816, 816}, + {815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815, 815}, + {814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814, 814}, + {813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813, 813}, + {812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812, 812}, + // 130 + {811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, 811}, + {810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810, 810}, + {809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809, 809}, + {808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808}, + {807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807}, + // 135 + {806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806, 806}, + {805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805, 805}, + {804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804}, + {803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, 803}, + {802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802, 802}, + // 140 + {801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801, 801}, + {800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800, 800}, + {799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799}, + {798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798, 798}, + {797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797}, + // 145 + {796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796}, + {795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795, 795}, + {794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794}, + {793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793, 793}, + {792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792}, + // 150 + {791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791, 791}, + {790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790, 790}, + {789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789, 789}, + {788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788}, + {787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, 787}, + // 155 + {786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786, 786}, + {785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785, 785}, + {784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784, 784}, + {783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783, 783}, + {782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782, 782}, + // 160 + {781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, 781}, + {780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780, 780}, + {779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779}, + {778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778, 778}, + {777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777, 777}, + // 165 + {776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776, 776}, + {775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, 775}, + {774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774, 774}, + {773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, 773}, + {772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772, 772}, + // 170 + {771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771, 771}, + {770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770, 770}, + {769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769, 769}, + {768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768, 768}, + {767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, 767}, + // 175 + {766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766, 766}, + {765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765, 765}, + {764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764, 764}, + {763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763, 763}, + {762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762, 762}, + // 180 + {761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761, 761}, + {760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, 760}, + {759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759, 759}, + {758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758}, + {757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757, 757}, + // 185 + {756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756, 756}, + {755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755, 755}, + {754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754, 754}, + {753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753, 753}, + {752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752, 752}, + // 190 + {751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751, 751}, + {750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750, 750}, + {749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749, 749}, + {748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748, 748}, + {747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747, 747}, + // 195 + {746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746, 746}, + {745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745, 745}, + {744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744, 744}, + {743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743, 743}, + {742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742, 742}, + // 200 + {741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741}, + {740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740}, + {739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739}, + {738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738, 738}, + {737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737, 737}, + // 205 + {736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736, 736}, + {735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735}, + {734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734}, + {733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733, 733}, + {732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732, 732}, + // 210 + {731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, 731}, + {730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730, 730}, + {729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729}, + {728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728, 728}, + {727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727, 727}, + // 215 + {726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726, 726}, + {725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, 725}, + {724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724, 724}, + {723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723, 723}, + {722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722, 722}, + // 220 + {721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721, 721}, + {720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720, 720}, + {719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719, 719}, + {718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718, 718}, + {717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717, 717}, + // 225 + {716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716, 716}, + {715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715, 715}, + {714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714, 714}, + {713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, 713}, + {712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712, 712}, + // 230 + {711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711, 711}, + {710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710, 710}, + {709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709, 709}, + {708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708, 708}, + {707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, 707}, + // 235 + {706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706, 706}, + {705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705, 705}, + {704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704, 704}, + {703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703, 703}, + {702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, 702}, + // 240 + {701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701, 701}, + {700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700, 700}, + {699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699, 699}, + {698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698, 698}, + {697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697, 697}, + // 245 + {696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696, 696}, + {695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695, 695}, + {694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694, 694}, + {693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693, 693}, + {692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692, 692}, + // 250 + {691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691, 691}, + {690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690, 690}, + {689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689, 689}, + {688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688, 688}, + {687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687}, + // 255 + {686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686, 686}, + {685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685, 685}, + {684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684, 684}, + {683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, 683}, + {682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682, 682}, + // 260 + {681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681, 681}, + {680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680, 680}, + {679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679, 679}, + {678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678, 678}, + {677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677, 677}, + // 265 + {676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676, 676}, + {675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675, 675}, + {674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674, 674}, + {673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673, 673}, + {672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672, 672}, + // 270 + {671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671, 671}, + {670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670, 670}, + {669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669, 669}, + {668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668, 668}, + {667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667, 667}, + // 275 + {666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666, 666}, + {665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665, 665}, + {664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664, 664}, + {663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663, 663}, + {662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, 662}, + // 280 + {661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661, 661}, + {660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660}, + {659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659, 659}, + {658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658, 658}, + {657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657, 657}, + // 285 + {656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656, 656}, + {655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655, 655}, + {654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654, 654}, + {653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653}, + {652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652, 652}, + // 290 + {651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651, 651}, + {650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650, 650}, + {649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649, 649}, + {648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648, 648}, + {647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647, 647}, + // 295 + {646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646, 646}, + {645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645, 645}, + {644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644, 644}, + {643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643, 643}, + {642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642, 642}, + // 300 + {641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641, 641}, + {640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640, 640}, + {639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639, 639}, + {638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638, 638}, + {637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637, 637}, + // 305 + {636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636}, + {635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635, 635}, + {634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634, 634}, + {633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633, 633}, + {632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632, 632}, + // 310 + {631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631, 631}, + {630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630, 630}, + {629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629, 629}, + {628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628}, + {627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627, 627}, + // 315 + {626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626, 626}, + {625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625, 625}, + {624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624, 624}, + {623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623, 623}, + {622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622, 622}, + // 320 + {621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621, 621}, + {620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620, 620}, + {619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619, 619}, + {618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618, 618}, + {617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617, 617}, + // 325 + {616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616, 616}, + {615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615, 615}, + {614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614, 614}, + {613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613, 613}, + {612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612, 612}, + // 330 + {611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611, 611}, + {610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, 610}, + {609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609, 609}, + {608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608, 608}, + {607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607, 607}, + // 335 + {606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606, 606}, + {605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605, 605}, + {604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604, 604}, + {603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603, 603}, + {602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602, 602}, + // 340 + {601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601, 601}, + {600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600}, + {599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599, 599}, + {598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598, 598}, + {597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597, 597}, + // 345 + {596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596, 596}, + {595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595, 595}, + {594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594, 594}, + {593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593, 593}, + {592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592, 592}, + // 350 + {591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591, 591}, + {590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590, 590}, + {589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589, 589}, + {588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588, 588}, + {587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587, 587}, + // 355 + {586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586, 586}, + {585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585, 585}, + {584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, 584}, + {583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583, 583}, + {582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582, 582}, + // 360 + {581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581, 581}, + {580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580, 580}, + {579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579, 579}, + {578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578, 578}, + {577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577, 577}, + // 365 + {576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576, 576}, + {575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575, 575}, + {574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574, 574}, + {573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, 573}, + {572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572, 572}, + // 370 + {571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571, 571}, + {570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570, 570}, + {569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569, 569}, + {568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568, 568}, + {567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567, 567}, + // 375 + {566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566, 566}, + {565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565, 565}, + {564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564, 564}, + {563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563, 563}, + {562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562, 562}, + // 380 + {561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561, 561}, + {560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560, 560}, + {559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559, 559}, + {558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558, 558}, + {557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557, 557}, + // 385 + {556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556}, + {555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555, 555}, + {554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554, 554}, + {553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553, 553}, + {552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552, 552}, + // 390 + {551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551}, + {550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550, 550}, + {549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549, 549}, + {548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, 548}, + {547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547, 547}, + // 395 + {546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546, 546}, + {545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545, 545}, + {544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544, 544}, + {543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543, 543}, + {542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542, 542}, + // 400 + {541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541, 541}, + {540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540}, + {539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539, 539}, + {538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538, 538}, + {537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537, 537}, + // 405 + {536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536, 536}, + {535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535, 535}, + {534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534, 534}, + {533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, 533}, + {532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532}, + // 410 + {531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531, 531}, + {530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530, 530}, + {529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529, 529}, + {528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528, 528}, + {527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527, 527}, + // 415 + {526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526, 526}, + {525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525, 525}, + {524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524, 524}, + {523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523, 523}, + {522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522, 522}, + // 420 + {521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521, 521}, + {520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520, 520}, + {6: 4, 4}, + {118, 118, 118, 118, 118, 118, 10: 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 1584, 1212, 1213, 1211, 555: 1585}, + // 425 + {338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338, 373: 338, 375: 338, 384: 338, 386: 338, 394: 338, 399: 338, 1586, 402: 338, 429: 338, 473: 338, 338, 476: 338, 338, 338, 338, 338, 338, 483: 338, 338, 338, 487: 338, 491: 338, 494: 338, 497: 338, 508: 338, 518: 338}, + {6: 117, 117}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 1587, 1212, 1213, 1211}, + {337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 337, 373: 337, 375: 337, 384: 337, 386: 337, 394: 337, 399: 337, 402: 337, 429: 337, 473: 337, 337, 476: 337, 337, 337, 337, 337, 337, 483: 337, 337, 337, 487: 337, 491: 337, 494: 337, 497: 337, 508: 337, 518: 337}, + {6: 166, 166, 473: 1615, 782: 1614}, + // 430 + {433: 1607, 573: 1606}, + {43: 1600, 54: 1599}, + {6: 171, 171, 473: 171}, + {6: 169, 169, 473: 169}, + {6: 168, 168, 473: 168}, + // 435 + {50: 1598}, + {50: 163}, + {50: 162}, + {43: 160, 54: 160}, + {6: 167, 167, 473: 167}, + // 440 + {6: 177, 177}, + {6: 159, 159, 398: 1601, 432: 1602, 473: 159, 744: 1604, 781: 1603}, + {173, 173, 173, 173, 173, 173, 10: 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173}, + {172, 172, 172, 172, 172, 172, 10: 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172}, + {6: 170, 170, 473: 170}, + // 445 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 1209, 1212, 1213, 1211, 592: 1605}, + {6: 158, 158, 473: 158}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 1584, 1212, 1213, 1211, 555: 1613}, + {913, 913, 913, 913, 913, 913, 10: 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 405: 1608, 587: 1609}, + {372: 1611}, + // 450 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 1209, 1212, 1213, 1211, 592: 1610}, + {6: 178, 178}, + {671: 1612}, + {912, 912, 912, 912, 912, 912, 10: 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 912, 373: 912, 391: 912}, + {6: 179, 179}, + // 455 + {6: 180, 180}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1617}, + {184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 375: 184, 377: 184, 184, 184, 184, 184, 384: 184, 386: 184, 391: 184, 184, 184, 184, 184, 184, 184, 184, 184, 401: 184, 184, 184, 407: 184, 184, 184, 411: 184, 184, 184, 184, 184, 184, 184, 184, 421: 184, 184, 184, 184, 184, 184, 184, 184, 432: 184, 434: 184, 482: 1899}, + {6: 165, 165, 392: 1720, 1719, 395: 1718, 1717, 1715, 547: 1716, 1714}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1898}, + // 460 + {969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, 375: 969, 378: 969, 384: 969, 386: 969, 391: 969, 969, 969, 969, 969, 969, 969, 969, 969, 402: 969, 1888, 407: 969, 969, 969, 411: 1885, 1883, 1882, 1890, 1884, 1886, 1887, 1889, 725: 1881, 758: 1880}, + {954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 954, 375: 954, 378: 954, 384: 954, 386: 954, 391: 954, 954, 954, 954, 954, 954, 954, 954, 954, 402: 954, 954, 407: 954, 954, 954, 411: 954, 954, 954, 954, 954, 954, 954, 954}, + {934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 934, 1868, 375: 934, 378: 934, 1763, 1764, 1769, 384: 934, 386: 934, 391: 934, 934, 934, 934, 934, 934, 934, 934, 934, 401: 1765, 934, 934, 407: 934, 934, 934, 411: 934, 934, 934, 934, 934, 934, 934, 934, 421: 1767, 1760, 1766, 1770, 1759, 1768, 1761, 1762, 432: 1869, 434: 1867, 716: 1871, 756: 1870}, + {890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 1864, 375: 890, 377: 890, 890, 890, 890, 890, 384: 890, 386: 890, 391: 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 890, 407: 890, 890, 890, 411: 890, 890, 890, 890, 890, 890, 890, 890, 421: 890, 890, 890, 890, 890, 890, 890, 890, 432: 890, 434: 890}, + {884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 428, 375: 884, 377: 884, 884, 884, 884, 884, 384: 884, 386: 884, 391: 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 884, 407: 884, 884, 884, 411: 884, 884, 884, 884, 884, 884, 884, 884, 421: 884, 884, 884, 884, 884, 884, 884, 884, 432: 884, 434: 884}, + // 465 + {880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 1861, 375: 880, 377: 880, 880, 880, 880, 880, 384: 880, 386: 880, 391: 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 880, 407: 880, 880, 880, 411: 880, 880, 880, 880, 880, 880, 880, 880, 421: 880, 880, 880, 880, 880, 880, 880, 880, 432: 880, 434: 880}, + {871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 427, 375: 871, 377: 871, 871, 871, 871, 871, 384: 871, 386: 871, 391: 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 871, 407: 871, 871, 871, 411: 871, 871, 871, 871, 871, 871, 871, 871, 421: 871, 871, 871, 871, 871, 871, 871, 871, 432: 871, 434: 871}, + {863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 424, 375: 863, 377: 863, 863, 863, 863, 863, 384: 863, 386: 863, 391: 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 863, 407: 863, 863, 863, 411: 863, 863, 863, 863, 863, 863, 863, 863, 421: 863, 863, 863, 863, 863, 863, 863, 863, 432: 863, 434: 863}, + {861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 422, 375: 861, 377: 861, 861, 861, 861, 861, 384: 861, 386: 861, 391: 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 861, 407: 861, 861, 861, 411: 861, 861, 861, 861, 861, 861, 861, 861, 421: 861, 861, 861, 861, 861, 861, 861, 861, 432: 861, 434: 861}, + {840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 418, 375: 840, 377: 840, 840, 840, 840, 840, 384: 840, 386: 840, 391: 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 840, 407: 840, 840, 840, 411: 840, 840, 840, 840, 840, 840, 840, 840, 421: 840, 840, 840, 840, 840, 840, 840, 840, 432: 840, 434: 840}, + // 470 + {836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 421, 375: 836, 377: 836, 836, 836, 836, 836, 384: 836, 386: 836, 391: 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 836, 407: 836, 836, 836, 411: 836, 836, 836, 836, 836, 836, 836, 836, 421: 836, 836, 836, 836, 836, 836, 836, 836, 432: 836, 434: 836}, + {808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 405, 375: 808, 377: 808, 808, 808, 808, 808, 384: 808, 386: 808, 391: 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 808, 407: 808, 808, 808, 411: 808, 808, 808, 808, 808, 808, 808, 808, 421: 808, 808, 808, 808, 808, 808, 808, 808, 432: 808, 434: 808}, + {807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 404, 375: 807, 377: 807, 807, 807, 807, 807, 384: 807, 386: 807, 391: 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 807, 407: 807, 807, 807, 411: 807, 807, 807, 807, 807, 807, 807, 807, 421: 807, 807, 807, 807, 807, 807, 807, 807, 432: 807, 434: 807}, + {804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 403, 375: 804, 377: 804, 804, 804, 804, 804, 384: 804, 386: 804, 391: 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 804, 407: 804, 804, 804, 411: 804, 804, 804, 804, 804, 804, 804, 804, 421: 804, 804, 804, 804, 804, 804, 804, 804, 432: 804, 434: 804}, + {799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 400, 375: 799, 377: 799, 799, 799, 799, 799, 384: 799, 386: 799, 391: 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 799, 407: 799, 799, 799, 411: 799, 799, 799, 799, 799, 799, 799, 799, 421: 799, 799, 799, 799, 799, 799, 799, 799, 432: 799, 434: 799}, + // 475 + {797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 401, 375: 797, 377: 797, 797, 797, 797, 797, 384: 797, 386: 797, 391: 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, 407: 797, 797, 797, 411: 797, 797, 797, 797, 797, 797, 797, 797, 421: 797, 797, 797, 797, 797, 797, 797, 797, 432: 797, 434: 797}, + {794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 402, 375: 794, 377: 794, 794, 794, 794, 794, 384: 794, 386: 794, 391: 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 794, 407: 794, 794, 794, 411: 794, 794, 794, 794, 794, 794, 794, 794, 421: 794, 794, 794, 794, 794, 794, 794, 794, 432: 794, 434: 794}, + {792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 425, 375: 792, 377: 792, 792, 792, 792, 792, 384: 792, 386: 792, 391: 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, 407: 792, 792, 792, 411: 792, 792, 792, 792, 792, 792, 792, 792, 421: 792, 792, 792, 792, 792, 792, 792, 792, 432: 792, 434: 792}, + {779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 412, 375: 779, 377: 779, 779, 779, 779, 779, 384: 779, 386: 779, 391: 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 779, 407: 779, 779, 779, 411: 779, 779, 779, 779, 779, 779, 779, 779, 421: 779, 779, 779, 779, 779, 779, 779, 779, 432: 779, 434: 779}, + {758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 409, 375: 758, 377: 758, 758, 758, 758, 758, 384: 758, 386: 758, 391: 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 758, 407: 758, 758, 758, 411: 758, 758, 758, 758, 758, 758, 758, 758, 421: 758, 758, 758, 758, 758, 758, 758, 758, 432: 758, 434: 758}, + // 480 + {741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 407, 375: 741, 377: 741, 741, 741, 741, 741, 384: 741, 386: 741, 391: 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 741, 407: 741, 741, 741, 411: 741, 741, 741, 741, 741, 741, 741, 741, 421: 741, 741, 741, 741, 741, 741, 741, 741, 432: 741, 434: 741}, + {740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 426, 375: 740, 377: 740, 740, 740, 740, 740, 384: 740, 386: 740, 391: 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 740, 407: 740, 740, 740, 411: 740, 740, 740, 740, 740, 740, 740, 740, 421: 740, 740, 740, 740, 740, 740, 740, 740, 432: 740, 434: 740}, + {739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 414, 375: 739, 377: 739, 739, 739, 739, 739, 384: 739, 386: 739, 391: 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 739, 407: 739, 739, 739, 411: 739, 739, 739, 739, 739, 739, 739, 739, 421: 739, 739, 739, 739, 739, 739, 739, 739, 432: 739, 434: 739}, + {735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 416, 375: 735, 377: 735, 735, 735, 735, 735, 384: 735, 386: 735, 391: 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 735, 407: 735, 735, 735, 411: 735, 735, 735, 735, 735, 735, 735, 735, 421: 735, 735, 735, 735, 735, 735, 735, 735, 432: 735, 434: 735}, + {734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 415, 375: 734, 377: 734, 734, 734, 734, 734, 384: 734, 386: 734, 391: 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 734, 407: 734, 734, 734, 411: 734, 734, 734, 734, 734, 734, 734, 734, 421: 734, 734, 734, 734, 734, 734, 734, 734, 432: 734, 434: 734}, + // 485 + {729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 406, 375: 729, 377: 729, 729, 729, 729, 729, 384: 729, 386: 729, 391: 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 729, 407: 729, 729, 729, 411: 729, 729, 729, 729, 729, 729, 729, 729, 421: 729, 729, 729, 729, 729, 729, 729, 729, 432: 729, 434: 729}, + {493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 374: 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 493, 401: 493, 493, 493, 407: 493, 493, 493, 411: 493, 493, 493, 493, 493, 493, 493, 493, 421: 493, 493, 493, 493, 493, 493, 493, 493, 432: 493, 434: 493}, + {492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 374: 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 492, 401: 492, 492, 492, 407: 492, 492, 492, 411: 492, 492, 492, 492, 492, 492, 492, 492, 421: 492, 492, 492, 492, 492, 492, 492, 492, 432: 492, 434: 492}, + {491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 374: 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 491, 401: 491, 491, 491, 407: 491, 491, 491, 411: 491, 491, 491, 491, 491, 491, 491, 491, 421: 491, 491, 491, 491, 491, 491, 491, 491, 432: 491, 434: 491}, + {490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 374: 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 490, 401: 490, 490, 490, 407: 490, 490, 490, 411: 490, 490, 490, 490, 490, 490, 490, 490, 421: 490, 490, 490, 490, 490, 490, 490, 490, 432: 490, 434: 490}, + // 490 + {489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 374: 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 489, 401: 489, 489, 489, 407: 489, 489, 489, 411: 489, 489, 489, 489, 489, 489, 489, 489, 421: 489, 489, 489, 489, 489, 489, 489, 489, 432: 489, 434: 489}, + {488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 374: 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, 401: 488, 488, 488, 407: 488, 488, 488, 411: 488, 488, 488, 488, 488, 488, 488, 488, 421: 488, 488, 488, 488, 488, 488, 488, 488, 432: 488, 434: 488}, + {487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 374: 487, 487, 487, 487, 1860, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, 401: 487, 487, 487, 407: 487, 487, 487, 411: 487, 487, 487, 487, 487, 487, 487, 487, 421: 487, 487, 487, 487, 487, 487, 487, 487, 432: 487, 434: 487}, + {378: 1859}, + {485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 374: 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 485, 401: 485, 485, 485, 407: 485, 485, 485, 411: 485, 485, 485, 485, 485, 485, 485, 485, 421: 485, 485, 485, 485, 485, 485, 485, 485, 432: 485, 434: 485}, + // 495 + {484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 374: 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 484, 401: 484, 484, 484, 407: 484, 484, 484, 411: 484, 484, 484, 484, 484, 484, 484, 484, 421: 484, 484, 484, 484, 484, 484, 484, 484, 432: 484, 434: 484}, + {483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 374: 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 401: 483, 483, 483, 407: 483, 483, 483, 411: 483, 483, 483, 483, 483, 483, 483, 483, 421: 483, 483, 483, 483, 483, 483, 483, 483, 432: 483, 434: 483}, + {460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 460, 375: 460, 377: 1846, 460, 460, 460, 460, 384: 460, 386: 460, 391: 460, 460, 460, 460, 460, 460, 460, 460, 460, 401: 460, 460, 460, 407: 460, 460, 460, 411: 460, 460, 460, 460, 460, 460, 460, 460, 421: 460, 460, 460, 460, 460, 460, 460, 460, 432: 460, 434: 460}, + {459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 375: 459, 377: 459, 459, 459, 459, 459, 384: 459, 386: 459, 391: 459, 459, 459, 459, 459, 459, 459, 459, 459, 1855, 459, 459, 459, 407: 459, 459, 459, 411: 459, 459, 459, 459, 459, 459, 459, 459, 421: 459, 459, 459, 459, 459, 459, 459, 459, 432: 459, 434: 459}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 1852, 1212, 1213, 1211}, + // 500 + {455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 455, 375: 455, 377: 455, 455, 455, 455, 455, 384: 455, 386: 455, 391: 455, 455, 455, 455, 455, 455, 455, 455, 455, 401: 455, 455, 455, 407: 455, 455, 455, 411: 455, 455, 455, 455, 455, 455, 455, 455, 421: 455, 455, 455, 455, 455, 455, 455, 455, 432: 455, 434: 455}, + {454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 454, 375: 454, 377: 454, 454, 454, 454, 454, 384: 454, 386: 454, 391: 454, 454, 454, 454, 454, 454, 454, 454, 454, 401: 454, 454, 454, 407: 454, 454, 454, 411: 454, 454, 454, 454, 454, 454, 454, 454, 421: 454, 454, 454, 454, 454, 454, 454, 454, 432: 454, 434: 454}, + {453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 453, 375: 453, 377: 453, 453, 453, 453, 453, 384: 453, 386: 453, 391: 453, 453, 453, 453, 453, 453, 453, 453, 453, 401: 453, 453, 453, 407: 453, 453, 453, 411: 453, 453, 453, 453, 453, 453, 453, 453, 421: 453, 453, 453, 453, 453, 453, 453, 453, 432: 453, 434: 453}, + {452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 452, 375: 452, 377: 452, 452, 452, 452, 452, 384: 452, 386: 452, 391: 452, 452, 452, 452, 452, 452, 452, 452, 452, 401: 452, 452, 452, 407: 452, 452, 452, 411: 452, 452, 452, 452, 452, 452, 452, 452, 421: 452, 452, 452, 452, 452, 452, 452, 452, 432: 452, 434: 452}, + {450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 450, 375: 450, 377: 450, 450, 450, 450, 450, 384: 450, 386: 450, 391: 450, 450, 450, 450, 450, 450, 450, 450, 450, 401: 450, 450, 450, 407: 450, 450, 450, 411: 450, 450, 450, 450, 450, 450, 450, 450, 421: 450, 450, 450, 450, 450, 450, 450, 450, 432: 450, 434: 450}, + // 505 + {449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, 375: 449, 377: 449, 449, 449, 449, 449, 384: 449, 386: 449, 391: 449, 449, 449, 449, 449, 449, 449, 449, 449, 401: 449, 449, 449, 407: 449, 449, 449, 411: 449, 449, 449, 449, 449, 449, 449, 449, 421: 449, 449, 449, 449, 449, 449, 449, 449, 432: 449, 434: 449}, + {448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, 375: 448, 377: 448, 448, 448, 448, 448, 384: 448, 386: 448, 391: 448, 448, 448, 448, 448, 448, 448, 448, 448, 401: 448, 448, 448, 407: 448, 448, 448, 411: 448, 448, 448, 448, 448, 448, 448, 448, 421: 448, 448, 448, 448, 448, 448, 448, 448, 432: 448, 434: 448}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 373: 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1758, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1851, 1665, 1709, 1710, 1664}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 373: 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1758, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1850, 1665, 1709, 1710, 1664}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 373: 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1758, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1849, 1665, 1709, 1710, 1664}, + // 510 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 373: 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1758, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1848, 1665, 1709, 1710, 1664}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 373: 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1758, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1845, 1665, 1709, 1710, 1664}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1839, 586: 1840}, + {373: 1834}, + {373: 1825}, + // 515 + {373: 1822}, + {373: 1819}, + {373: 423}, + {373: 420}, + {373: 419}, + // 520 + {373: 417}, + {373: 413}, + {373: 411}, + {373: 410}, + {373: 408}, + // 525 + {397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 375: 397, 377: 397, 397, 397, 397, 397, 384: 397, 386: 397, 391: 397, 397, 397, 397, 397, 397, 397, 397, 397, 401: 397, 397, 397, 407: 397, 397, 397, 411: 397, 397, 397, 397, 397, 397, 397, 397, 421: 397, 397, 397, 397, 397, 397, 397, 397, 432: 397, 434: 397}, + {396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 375: 396, 377: 396, 396, 396, 396, 396, 384: 396, 386: 396, 391: 396, 396, 396, 396, 396, 396, 396, 396, 396, 401: 396, 396, 396, 407: 396, 396, 396, 411: 396, 396, 396, 396, 396, 396, 396, 396, 421: 396, 396, 396, 396, 396, 396, 396, 396, 432: 396, 434: 396}, + {395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 375: 395, 377: 395, 395, 395, 395, 395, 384: 395, 386: 395, 391: 395, 395, 395, 395, 395, 395, 395, 395, 395, 401: 395, 395, 395, 407: 395, 395, 395, 411: 395, 395, 395, 395, 395, 395, 395, 395, 421: 395, 395, 395, 395, 395, 395, 395, 395, 432: 395, 434: 395}, + {394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 394, 375: 394, 377: 394, 394, 394, 394, 394, 384: 394, 386: 394, 391: 394, 394, 394, 394, 394, 394, 394, 394, 394, 401: 394, 394, 394, 407: 394, 394, 394, 411: 394, 394, 394, 394, 394, 394, 394, 394, 421: 394, 394, 394, 394, 394, 394, 394, 394, 432: 394, 434: 394}, + {393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 393, 375: 393, 377: 393, 393, 393, 393, 393, 384: 393, 386: 393, 391: 393, 393, 393, 393, 393, 393, 393, 393, 393, 401: 393, 393, 393, 407: 393, 393, 393, 411: 393, 393, 393, 393, 393, 393, 393, 393, 421: 393, 393, 393, 393, 393, 393, 393, 393, 432: 393, 434: 393}, + // 530 + {392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 392, 375: 392, 377: 392, 392, 392, 392, 392, 384: 392, 386: 392, 391: 392, 392, 392, 392, 392, 392, 392, 392, 392, 401: 392, 392, 392, 407: 392, 392, 392, 411: 392, 392, 392, 392, 392, 392, 392, 392, 421: 392, 392, 392, 392, 392, 392, 392, 392, 432: 392, 434: 392}, + {391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 375: 391, 377: 391, 391, 391, 391, 391, 384: 391, 386: 391, 391: 391, 391, 391, 391, 391, 391, 391, 391, 391, 401: 391, 391, 391, 407: 391, 391, 391, 411: 391, 391, 391, 391, 391, 391, 391, 391, 421: 391, 391, 391, 391, 391, 391, 391, 391, 432: 391, 434: 391}, + {390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 390, 375: 390, 377: 390, 390, 390, 390, 390, 384: 390, 386: 390, 391: 390, 390, 390, 390, 390, 390, 390, 390, 390, 401: 390, 390, 390, 407: 390, 390, 390, 411: 390, 390, 390, 390, 390, 390, 390, 390, 421: 390, 390, 390, 390, 390, 390, 390, 390, 432: 390, 434: 390}, + {389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 375: 389, 377: 389, 389, 389, 389, 389, 384: 389, 386: 389, 391: 389, 389, 389, 389, 389, 389, 389, 389, 389, 401: 389, 389, 389, 407: 389, 389, 389, 411: 389, 389, 389, 389, 389, 389, 389, 389, 421: 389, 389, 389, 389, 389, 389, 389, 389, 432: 389, 434: 389}, + {388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 388, 375: 388, 377: 388, 388, 388, 388, 388, 384: 388, 386: 388, 391: 388, 388, 388, 388, 388, 388, 388, 388, 388, 401: 388, 388, 388, 407: 388, 388, 388, 411: 388, 388, 388, 388, 388, 388, 388, 388, 421: 388, 388, 388, 388, 388, 388, 388, 388, 432: 388, 434: 388}, + // 535 + {373: 1816}, + {373: 1809}, + {399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 399, 1806, 375: 399, 377: 399, 399, 399, 399, 399, 384: 399, 386: 399, 391: 399, 399, 399, 399, 399, 399, 399, 399, 399, 401: 399, 399, 399, 407: 399, 399, 399, 411: 399, 399, 399, 399, 399, 399, 399, 399, 421: 399, 399, 399, 399, 399, 399, 399, 399, 432: 399, 434: 399, 767: 1807}, + {373: 1804}, + {359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 359, 1800, 375: 359, 377: 359, 359, 359, 359, 359, 384: 359, 386: 359, 391: 359, 359, 359, 359, 359, 359, 359, 359, 359, 401: 359, 359, 359, 407: 359, 359, 359, 411: 359, 359, 359, 359, 359, 359, 359, 359, 421: 359, 359, 359, 359, 359, 359, 359, 359, 432: 359, 434: 359, 745: 1799}, + // 540 + {373: 1794}, + {373: 1791}, + {373: 1786}, + {373: 1756}, + {373: 1742}, + // 545 + {373: 1736}, + {373: 1731}, + {373: 1728}, + {373: 1725}, + {373: 1712}, + // 550 + {187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 375: 187, 377: 187, 187, 187, 187, 187, 384: 187, 386: 187, 391: 187, 187, 187, 187, 187, 187, 187, 187, 187, 401: 187, 187, 187, 407: 187, 187, 187, 411: 187, 187, 187, 187, 187, 187, 187, 187, 421: 187, 187, 187, 187, 187, 187, 187, 187, 432: 187, 434: 187}, + {186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 375: 186, 377: 186, 186, 186, 186, 186, 384: 186, 386: 186, 391: 186, 186, 186, 186, 186, 186, 186, 186, 186, 401: 186, 186, 186, 407: 186, 186, 186, 411: 186, 186, 186, 186, 186, 186, 186, 186, 421: 186, 186, 186, 186, 186, 186, 186, 186, 432: 186, 434: 186}, + {185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 375: 185, 377: 185, 185, 185, 185, 185, 384: 185, 386: 185, 391: 185, 185, 185, 185, 185, 185, 185, 185, 185, 401: 185, 185, 185, 407: 185, 185, 185, 411: 185, 185, 185, 185, 185, 185, 185, 185, 421: 185, 185, 185, 185, 185, 185, 185, 185, 432: 185, 434: 185}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1713}, + {8: 1721, 392: 1720, 1719, 395: 1718, 1717, 1715, 547: 1716, 1714}, + // 555 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1724}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1723}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1722}, + {968, 968, 968, 968, 968, 968, 10: 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 376: 968, 378: 968, 968, 968, 968, 400: 968, 404: 968, 968, 968, 410: 968, 419: 968, 968, 429: 968, 968, 968, 433: 968, 435: 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968, 968}, + {967, 967, 967, 967, 967, 967, 10: 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 376: 967, 378: 967, 967, 967, 967, 400: 967, 404: 967, 967, 967, 410: 967, 419: 967, 967, 429: 967, 967, 967, 433: 967, 435: 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967, 967}, + // 560 + {966, 966, 966, 966, 966, 966, 10: 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 376: 966, 378: 966, 966, 966, 966, 400: 966, 404: 966, 966, 966, 410: 966, 419: 966, 966, 429: 966, 966, 966, 433: 966, 435: 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966, 966}, + {965, 965, 965, 965, 965, 965, 10: 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 376: 965, 378: 965, 965, 965, 965, 400: 965, 404: 965, 965, 965, 410: 965, 419: 965, 965, 429: 965, 965, 965, 433: 965, 435: 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965, 965}, + {363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 363, 375: 363, 377: 363, 363, 363, 363, 363, 384: 363, 386: 363, 391: 363, 363, 363, 363, 363, 363, 363, 363, 363, 401: 363, 363, 363, 407: 363, 363, 363, 411: 363, 363, 363, 363, 363, 363, 363, 363, 421: 363, 363, 363, 363, 363, 363, 363, 363, 432: 363, 434: 363}, + {972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 972, 375: 972, 378: 972, 384: 972, 386: 972, 391: 972, 972, 972, 972, 972, 972, 972, 972, 972, 402: 972, 407: 972, 972, 972, 547: 1716, 1714}, + {973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 973, 375: 973, 378: 973, 384: 973, 386: 973, 391: 973, 1720, 1719, 973, 973, 973, 973, 973, 973, 402: 973, 407: 973, 973, 973, 547: 1716, 1714}, + // 565 + {974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 974, 375: 974, 378: 974, 384: 974, 386: 974, 391: 974, 1720, 1719, 974, 974, 974, 1715, 974, 974, 402: 974, 407: 974, 974, 974, 547: 1716, 1714}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1726}, + {8: 1727, 392: 1720, 1719, 395: 1718, 1717, 1715, 547: 1716, 1714}, + {364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 364, 375: 364, 377: 364, 364, 364, 364, 364, 384: 364, 386: 364, 391: 364, 364, 364, 364, 364, 364, 364, 364, 364, 401: 364, 364, 364, 407: 364, 364, 364, 411: 364, 364, 364, 364, 364, 364, 364, 364, 421: 364, 364, 364, 364, 364, 364, 364, 364, 432: 364, 434: 364}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1729}, + // 570 + {8: 1730, 392: 1720, 1719, 395: 1718, 1717, 1715, 547: 1716, 1714}, + {365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 365, 375: 365, 377: 365, 365, 365, 365, 365, 384: 365, 386: 365, 391: 365, 365, 365, 365, 365, 365, 365, 365, 365, 401: 365, 365, 365, 407: 365, 365, 365, 411: 365, 365, 365, 365, 365, 365, 365, 365, 421: 365, 365, 365, 365, 365, 365, 365, 365, 432: 365, 434: 365}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 1733, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1732}, + {8: 1735, 392: 1720, 1719, 395: 1718, 1717, 1715, 547: 1716, 1714}, + {8: 1734}, + // 575 + {366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 366, 375: 366, 377: 366, 366, 366, 366, 366, 384: 366, 386: 366, 391: 366, 366, 366, 366, 366, 366, 366, 366, 366, 401: 366, 366, 366, 407: 366, 366, 366, 411: 366, 366, 366, 366, 366, 366, 366, 366, 421: 366, 366, 366, 366, 366, 366, 366, 366, 432: 366, 434: 366}, + {367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 367, 375: 367, 377: 367, 367, 367, 367, 367, 384: 367, 386: 367, 391: 367, 367, 367, 367, 367, 367, 367, 367, 367, 401: 367, 367, 367, 407: 367, 367, 367, 411: 367, 367, 367, 367, 367, 367, 367, 367, 421: 367, 367, 367, 367, 367, 367, 367, 367, 432: 367, 434: 367}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1737}, + {8: 1738, 392: 1720, 1719, 395: 1718, 1717, 1715, 1739, 547: 1716, 1714}, + {374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 375: 374, 377: 374, 374, 374, 374, 374, 384: 374, 386: 374, 391: 374, 374, 374, 374, 374, 374, 374, 374, 374, 401: 374, 374, 374, 407: 374, 374, 374, 411: 374, 374, 374, 374, 374, 374, 374, 374, 421: 374, 374, 374, 374, 374, 374, 374, 374, 432: 374, 434: 374}, + // 580 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1740}, + {8: 1741, 392: 1720, 1719, 395: 1718, 1717, 1715, 547: 1716, 1714}, + {373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 375: 373, 377: 373, 373, 373, 373, 373, 384: 373, 386: 373, 391: 373, 373, 373, 373, 373, 373, 373, 373, 373, 401: 373, 373, 373, 407: 373, 373, 373, 411: 373, 373, 373, 373, 373, 373, 373, 373, 421: 373, 373, 373, 373, 373, 373, 373, 373, 432: 373, 434: 373}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1743}, + {9: 1744, 392: 1720, 1719, 395: 1718, 1717, 1715, 1745, 547: 1716, 1714}, + // 585 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1751}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1746}, + {8: 1747, 392: 1720, 1719, 395: 1718, 1717, 1715, 409: 1748, 547: 1716, 1714}, + {377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 377, 375: 377, 377: 377, 377, 377, 377, 377, 384: 377, 386: 377, 391: 377, 377, 377, 377, 377, 377, 377, 377, 377, 401: 377, 377, 377, 407: 377, 377, 377, 411: 377, 377, 377, 377, 377, 377, 377, 377, 421: 377, 377, 377, 377, 377, 377, 377, 377, 432: 377, 434: 377}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1749}, + // 590 + {8: 1750, 392: 1720, 1719, 395: 1718, 1717, 1715, 547: 1716, 1714}, + {375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375: 375, 377: 375, 375, 375, 375, 375, 384: 375, 386: 375, 391: 375, 375, 375, 375, 375, 375, 375, 375, 375, 401: 375, 375, 375, 407: 375, 375, 375, 411: 375, 375, 375, 375, 375, 375, 375, 375, 421: 375, 375, 375, 375, 375, 375, 375, 375, 432: 375, 434: 375}, + {8: 1752, 1753, 392: 1720, 1719, 395: 1718, 1717, 1715, 547: 1716, 1714}, + {378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, 375: 378, 377: 378, 378, 378, 378, 378, 384: 378, 386: 378, 391: 378, 378, 378, 378, 378, 378, 378, 378, 378, 401: 378, 378, 378, 407: 378, 378, 378, 411: 378, 378, 378, 378, 378, 378, 378, 378, 421: 378, 378, 378, 378, 378, 378, 378, 378, 432: 378, 434: 378}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1754}, + // 595 + {8: 1755, 392: 1720, 1719, 395: 1718, 1717, 1715, 547: 1716, 1714}, + {376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 375: 376, 377: 376, 376, 376, 376, 376, 384: 376, 386: 376, 391: 376, 376, 376, 376, 376, 376, 376, 376, 376, 401: 376, 376, 376, 407: 376, 376, 376, 411: 376, 376, 376, 376, 376, 376, 376, 376, 421: 376, 376, 376, 376, 376, 376, 376, 376, 432: 376, 434: 376}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 373: 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1758, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1757}, + {379: 1763, 1764, 1769, 401: 1765, 421: 1767, 1760, 1766, 1770, 1759, 1768, 1761, 1762, 432: 1771}, + {184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 375: 184, 377: 184, 184, 184, 184, 184, 384: 184, 386: 184, 391: 184, 184, 184, 184, 184, 184, 184, 184, 184, 401: 184, 184, 184, 407: 184, 184, 184, 411: 184, 184, 184, 184, 184, 184, 184, 184, 421: 184, 184, 184, 184, 184, 184, 184, 184, 432: 184, 434: 184}, + // 600 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 373: 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1758, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1785}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 373: 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1758, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1784}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 373: 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1758, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1783}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 373: 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1758, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1782}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 373: 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1758, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1781}, + // 605 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 373: 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1758, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1780}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 373: 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1758, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1779}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 373: 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1758, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1778}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 373: 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1758, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1777}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 373: 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1758, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1776}, + // 610 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 373: 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1758, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1775}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 373: 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1758, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1774}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1772}, + {8: 1773, 392: 1720, 1719, 395: 1718, 1717, 1715, 547: 1716, 1714}, + {379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 379, 375: 379, 377: 379, 379, 379, 379, 379, 384: 379, 386: 379, 391: 379, 379, 379, 379, 379, 379, 379, 379, 379, 401: 379, 379, 379, 407: 379, 379, 379, 411: 379, 379, 379, 379, 379, 379, 379, 379, 421: 379, 379, 379, 379, 379, 379, 379, 379, 432: 379, 434: 379}, + // 615 + {461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 461, 375: 461, 378: 461, 461, 461, 461, 384: 461, 386: 461, 391: 461, 461, 461, 461, 461, 461, 461, 461, 461, 401: 461, 461, 461, 407: 461, 461, 461, 411: 461, 461, 461, 461, 461, 461, 461, 461, 421: 461, 461, 461, 461, 461, 461, 461, 461, 432: 461, 434: 461}, + {462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 462, 375: 462, 378: 462, 462, 462, 462, 384: 462, 386: 462, 391: 462, 462, 462, 462, 462, 462, 462, 462, 462, 401: 462, 462, 462, 407: 462, 462, 462, 411: 462, 462, 462, 462, 462, 462, 462, 462, 421: 462, 462, 462, 1770, 462, 462, 462, 462, 432: 462, 434: 462}, + {463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 463, 375: 463, 378: 463, 463, 463, 463, 384: 463, 386: 463, 391: 463, 463, 463, 463, 463, 463, 463, 463, 463, 401: 463, 463, 463, 407: 463, 463, 463, 411: 463, 463, 463, 463, 463, 463, 463, 463, 421: 463, 463, 463, 1770, 463, 463, 463, 463, 432: 463, 434: 463}, + {464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 464, 375: 464, 378: 464, 464, 464, 464, 384: 464, 386: 464, 391: 464, 464, 464, 464, 464, 464, 464, 464, 464, 401: 464, 464, 464, 407: 464, 464, 464, 411: 464, 464, 464, 464, 464, 464, 464, 464, 421: 464, 464, 464, 1770, 464, 464, 464, 464, 432: 464, 434: 464}, + {465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 465, 375: 465, 378: 465, 465, 465, 465, 384: 465, 386: 465, 391: 465, 465, 465, 465, 465, 465, 465, 465, 465, 401: 465, 465, 465, 407: 465, 465, 465, 411: 465, 465, 465, 465, 465, 465, 465, 465, 421: 465, 465, 465, 1770, 465, 465, 465, 465, 432: 465, 434: 465}, + // 620 + {466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 466, 375: 466, 378: 466, 466, 466, 466, 384: 466, 386: 466, 391: 466, 466, 466, 466, 466, 466, 466, 466, 466, 401: 466, 466, 466, 407: 466, 466, 466, 411: 466, 466, 466, 466, 466, 466, 466, 466, 421: 466, 466, 466, 1770, 466, 466, 466, 466, 432: 466, 434: 466}, + {467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 467, 375: 467, 378: 467, 467, 467, 1769, 384: 467, 386: 467, 391: 467, 467, 467, 467, 467, 467, 467, 467, 467, 401: 1765, 467, 467, 407: 467, 467, 467, 411: 467, 467, 467, 467, 467, 467, 467, 467, 421: 1767, 467, 1766, 1770, 467, 1768, 467, 467, 432: 467, 434: 467}, + {468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 468, 375: 468, 378: 468, 468, 468, 1769, 384: 468, 386: 468, 391: 468, 468, 468, 468, 468, 468, 468, 468, 468, 401: 1765, 468, 468, 407: 468, 468, 468, 411: 468, 468, 468, 468, 468, 468, 468, 468, 421: 1767, 468, 1766, 1770, 468, 1768, 468, 468, 432: 468, 434: 468}, + {469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 469, 375: 469, 378: 469, 1763, 1764, 1769, 384: 469, 386: 469, 391: 469, 469, 469, 469, 469, 469, 469, 469, 469, 401: 1765, 469, 469, 407: 469, 469, 469, 411: 469, 469, 469, 469, 469, 469, 469, 469, 421: 1767, 469, 1766, 1770, 469, 1768, 469, 469, 432: 469, 434: 469}, + {470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 470, 375: 470, 378: 470, 1763, 1764, 1769, 384: 470, 386: 470, 391: 470, 470, 470, 470, 470, 470, 470, 470, 470, 401: 1765, 470, 470, 407: 470, 470, 470, 411: 470, 470, 470, 470, 470, 470, 470, 470, 421: 1767, 470, 1766, 1770, 470, 1768, 470, 470, 432: 470, 434: 470}, + // 625 + {471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, 375: 471, 378: 471, 1763, 1764, 1769, 384: 471, 386: 471, 391: 471, 471, 471, 471, 471, 471, 471, 471, 471, 401: 1765, 471, 471, 407: 471, 471, 471, 411: 471, 471, 471, 471, 471, 471, 471, 471, 421: 1767, 471, 1766, 1770, 471, 1768, 1761, 1762, 432: 471, 434: 471}, + {472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 472, 375: 472, 378: 472, 1763, 1764, 1769, 384: 472, 386: 472, 391: 472, 472, 472, 472, 472, 472, 472, 472, 472, 401: 1765, 472, 472, 407: 472, 472, 472, 411: 472, 472, 472, 472, 472, 472, 472, 472, 421: 1767, 1760, 1766, 1770, 472, 1768, 1761, 1762, 432: 472, 434: 472}, + {8: 960, 406: 1788, 679: 1787, 1789}, + {8: 959}, + {8: 958}, + // 630 + {8: 1790}, + {380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 380, 375: 380, 377: 380, 380, 380, 380, 380, 384: 380, 386: 380, 391: 380, 380, 380, 380, 380, 380, 380, 380, 380, 401: 380, 380, 380, 407: 380, 380, 380, 411: 380, 380, 380, 380, 380, 380, 380, 380, 421: 380, 380, 380, 380, 380, 380, 380, 380, 432: 380, 434: 380}, + {8: 960, 406: 1788, 679: 1787, 1792}, + {8: 1793}, + {381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 381, 375: 381, 377: 381, 381, 381, 381, 381, 384: 381, 386: 381, 391: 381, 381, 381, 381, 381, 381, 381, 381, 381, 401: 381, 381, 381, 407: 381, 381, 381, 411: 381, 381, 381, 381, 381, 381, 381, 381, 421: 381, 381, 381, 381, 381, 381, 381, 381, 432: 381, 434: 381}, + // 635 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 373: 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1758, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1795}, + {9: 1796, 379: 1763, 1764, 1769, 401: 1765, 421: 1767, 1760, 1766, 1770, 1759, 1768, 1761, 1762}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 373: 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1758, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1797}, + {8: 1798, 379: 1763, 1764, 1769, 401: 1765, 421: 1767, 1760, 1766, 1770, 1759, 1768, 1761, 1762}, + {382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 382, 375: 382, 377: 382, 382, 382, 382, 382, 384: 382, 386: 382, 391: 382, 382, 382, 382, 382, 382, 382, 382, 382, 401: 382, 382, 382, 407: 382, 382, 382, 411: 382, 382, 382, 382, 382, 382, 382, 382, 421: 382, 382, 382, 382, 382, 382, 382, 382, 432: 382, 434: 382}, + // 640 + {383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 375: 383, 377: 383, 383, 383, 383, 383, 384: 383, 386: 383, 391: 383, 383, 383, 383, 383, 383, 383, 383, 383, 401: 383, 383, 383, 407: 383, 383, 383, 411: 383, 383, 383, 383, 383, 383, 383, 383, 421: 383, 383, 383, 383, 383, 383, 383, 383, 432: 383, 434: 383}, + {8: 1801, 406: 1802}, + {358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, 375: 358, 377: 358, 358, 358, 358, 358, 384: 358, 386: 358, 391: 358, 358, 358, 358, 358, 358, 358, 358, 358, 401: 358, 358, 358, 407: 358, 358, 358, 411: 358, 358, 358, 358, 358, 358, 358, 358, 421: 358, 358, 358, 358, 358, 358, 358, 358, 432: 358, 434: 358}, + {8: 1803}, + {357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 357, 375: 357, 377: 357, 357, 357, 357, 357, 384: 357, 386: 357, 391: 357, 357, 357, 357, 357, 357, 357, 357, 357, 401: 357, 357, 357, 407: 357, 357, 357, 411: 357, 357, 357, 357, 357, 357, 357, 357, 421: 357, 357, 357, 357, 357, 357, 357, 357, 432: 357, 434: 357}, + // 645 + {8: 1805}, + {384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 384, 375: 384, 377: 384, 384, 384, 384, 384, 384: 384, 386: 384, 391: 384, 384, 384, 384, 384, 384, 384, 384, 384, 401: 384, 384, 384, 407: 384, 384, 384, 411: 384, 384, 384, 384, 384, 384, 384, 384, 421: 384, 384, 384, 384, 384, 384, 384, 384, 432: 384, 434: 384}, + {8: 1808}, + {385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 385, 375: 385, 377: 385, 385, 385, 385, 385, 384: 385, 386: 385, 391: 385, 385, 385, 385, 385, 385, 385, 385, 385, 401: 385, 385, 385, 407: 385, 385, 385, 411: 385, 385, 385, 385, 385, 385, 385, 385, 421: 385, 385, 385, 385, 385, 385, 385, 385, 432: 385, 434: 385}, + {398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, 375: 398, 377: 398, 398, 398, 398, 398, 384: 398, 386: 398, 391: 398, 398, 398, 398, 398, 398, 398, 398, 398, 401: 398, 398, 398, 407: 398, 398, 398, 411: 398, 398, 398, 398, 398, 398, 398, 398, 421: 398, 398, 398, 398, 398, 398, 398, 398, 432: 398, 434: 398}, + // 650 + {1309, 1332, 1217, 1442, 1436, 1426, 8: 962, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1810, 586: 1811, 630: 1812}, + {8: 964, 964, 392: 1720, 1719, 395: 1718, 1717, 1715, 547: 1716, 1714}, + {8: 961, 1814}, + {8: 1813}, + {386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 386, 375: 386, 377: 386, 386, 386, 386, 386, 384: 386, 386: 386, 391: 386, 386, 386, 386, 386, 386, 386, 386, 386, 401: 386, 386, 386, 407: 386, 386, 386, 411: 386, 386, 386, 386, 386, 386, 386, 386, 421: 386, 386, 386, 386, 386, 386, 386, 386, 432: 386, 434: 386}, + // 655 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1815}, + {8: 963, 963, 392: 1720, 1719, 395: 1718, 1717, 1715, 547: 1716, 1714}, + {1309, 1332, 1217, 1442, 1436, 1426, 8: 962, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1810, 586: 1811, 630: 1817}, + {8: 1818}, + {387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 387, 375: 387, 377: 387, 387, 387, 387, 387, 384: 387, 386: 387, 391: 387, 387, 387, 387, 387, 387, 387, 387, 387, 401: 387, 387, 387, 407: 387, 387, 387, 411: 387, 387, 387, 387, 387, 387, 387, 387, 421: 387, 387, 387, 387, 387, 387, 387, 387, 432: 387, 434: 387}, + // 660 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 400: 1658, 523: 1657, 1212, 1213, 1211, 528: 1820}, + {8: 1821}, + {437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 437, 375: 437, 377: 437, 437, 437, 437, 437, 384: 437, 386: 437, 391: 437, 437, 437, 437, 437, 437, 437, 437, 437, 401: 437, 437, 437, 407: 437, 437, 437, 411: 437, 437, 437, 437, 437, 437, 437, 437, 421: 437, 437, 437, 437, 437, 437, 437, 437, 432: 437, 434: 437}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 400: 1658, 523: 1657, 1212, 1213, 1211, 528: 1823}, + {8: 1824}, + // 665 + {438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 375: 438, 377: 438, 438, 438, 438, 438, 384: 438, 386: 438, 391: 438, 438, 438, 438, 438, 438, 438, 438, 438, 401: 438, 438, 438, 407: 438, 438, 438, 411: 438, 438, 438, 438, 438, 438, 438, 438, 421: 438, 438, 438, 438, 438, 438, 438, 438, 432: 438, 434: 438}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1826}, + {391: 1827, 1720, 1719, 395: 1718, 1717, 1715, 547: 1716, 1714}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 378: 1831, 472: 1830, 523: 1832, 1212, 1213, 1211, 581: 1829, 609: 1828}, + {8: 1833}, + // 670 + {193, 193, 193, 193, 193, 193, 193, 193, 193, 193, 11: 193, 42: 193, 372: 193, 374: 193, 193, 193, 193, 382: 193, 193, 385: 193, 387: 193, 193, 193, 193, 470: 193, 193, 193}, + {192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 11: 192, 42: 192, 372: 192, 374: 192, 192, 192, 192, 382: 192, 192, 385: 192, 387: 192, 192, 192, 192, 470: 192, 192, 192}, + {6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 374: 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 401: 6, 6, 6, 407: 6, 6, 6, 411: 6, 6, 6, 6, 6, 6, 6, 6, 421: 6, 6, 6, 6, 6, 6, 6, 6, 432: 6, 434: 6, 470: 6, 6, 6}, + {5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 374: 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 401: 5, 5, 5, 407: 5, 5, 5, 411: 5, 5, 5, 5, 5, 5, 5, 5, 421: 5, 5, 5, 5, 5, 5, 5, 5, 432: 5, 434: 5, 470: 5, 5, 5}, + {439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 375: 439, 377: 439, 439, 439, 439, 439, 384: 439, 386: 439, 391: 439, 439, 439, 439, 439, 439, 439, 439, 439, 401: 439, 439, 439, 407: 439, 439, 439, 411: 439, 439, 439, 439, 439, 439, 439, 439, 421: 439, 439, 439, 439, 439, 439, 439, 439, 432: 439, 434: 439}, + // 675 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1810, 586: 1835}, + {9: 1836}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1837}, + {8: 1838, 963, 392: 1720, 1719, 395: 1718, 1717, 1715, 547: 1716, 1714}, + {440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, 375: 440, 377: 440, 440, 440, 440, 440, 384: 440, 386: 440, 391: 440, 440, 440, 440, 440, 440, 440, 440, 440, 401: 440, 440, 440, 407: 440, 440, 440, 411: 440, 440, 440, 440, 440, 440, 440, 440, 421: 440, 440, 440, 440, 440, 440, 440, 440, 432: 440, 434: 440}, + // 680 + {8: 1844, 964, 392: 1720, 1719, 395: 1718, 1717, 1715, 547: 1716, 1714}, + {9: 1841}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1842}, + {8: 1843, 963, 392: 1720, 1719, 395: 1718, 1717, 1715, 547: 1716, 1714}, + {441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 375: 441, 377: 441, 441, 441, 441, 441, 384: 441, 386: 441, 391: 441, 441, 441, 441, 441, 441, 441, 441, 441, 401: 441, 441, 441, 407: 441, 441, 441, 411: 441, 441, 441, 441, 441, 441, 441, 441, 421: 441, 441, 441, 441, 441, 441, 441, 441, 432: 441, 434: 441}, + // 685 + {442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 442, 375: 442, 377: 442, 442, 442, 442, 442, 384: 442, 386: 442, 391: 442, 442, 442, 442, 442, 442, 442, 442, 442, 401: 442, 442, 442, 407: 442, 442, 442, 411: 442, 442, 442, 442, 442, 442, 442, 442, 421: 442, 442, 442, 442, 442, 442, 442, 442, 432: 442, 434: 442}, + {443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 443, 375: 443, 377: 1846, 443, 443, 443, 443, 384: 443, 386: 443, 391: 443, 443, 443, 443, 443, 443, 443, 443, 443, 401: 443, 443, 443, 407: 443, 443, 443, 411: 443, 443, 443, 443, 443, 443, 443, 443, 421: 443, 443, 443, 443, 443, 443, 443, 443, 432: 443, 434: 443}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 378: 1831, 523: 1832, 1212, 1213, 1211, 581: 1847}, + {451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 451, 375: 451, 377: 451, 451, 451, 451, 451, 384: 451, 386: 451, 391: 451, 451, 451, 451, 451, 451, 451, 451, 451, 401: 451, 451, 451, 407: 451, 451, 451, 411: 451, 451, 451, 451, 451, 451, 451, 451, 421: 451, 451, 451, 451, 451, 451, 451, 451, 432: 451, 434: 451}, + {444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 444, 375: 444, 377: 1846, 444, 444, 444, 444, 384: 444, 386: 444, 391: 444, 444, 444, 444, 444, 444, 444, 444, 444, 401: 444, 444, 444, 407: 444, 444, 444, 411: 444, 444, 444, 444, 444, 444, 444, 444, 421: 444, 444, 444, 444, 444, 444, 444, 444, 432: 444, 434: 444}, + // 690 + {445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 445, 375: 445, 377: 1846, 445, 445, 445, 445, 384: 445, 386: 445, 391: 445, 445, 445, 445, 445, 445, 445, 445, 445, 401: 445, 445, 445, 407: 445, 445, 445, 411: 445, 445, 445, 445, 445, 445, 445, 445, 421: 445, 445, 445, 445, 445, 445, 445, 445, 432: 445, 434: 445}, + {446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 375: 446, 377: 1846, 446, 446, 446, 446, 384: 446, 386: 446, 391: 446, 446, 446, 446, 446, 446, 446, 446, 446, 401: 446, 446, 446, 407: 446, 446, 446, 411: 446, 446, 446, 446, 446, 446, 446, 446, 421: 446, 446, 446, 446, 446, 446, 446, 446, 432: 446, 434: 446}, + {447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 447, 375: 447, 377: 1846, 447, 447, 447, 447, 384: 447, 386: 447, 391: 447, 447, 447, 447, 447, 447, 447, 447, 447, 401: 447, 447, 447, 407: 447, 447, 447, 411: 447, 447, 447, 447, 447, 447, 447, 447, 421: 447, 447, 447, 447, 447, 447, 447, 447, 432: 447, 434: 447}, + {400: 1853}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 1854, 1212, 1213, 1211}, + // 695 + {457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 457, 375: 457, 377: 457, 457, 457, 457, 457, 384: 457, 386: 457, 391: 457, 457, 457, 457, 457, 457, 457, 457, 457, 401: 457, 457, 457, 407: 457, 457, 457, 411: 457, 457, 457, 457, 457, 457, 457, 457, 421: 457, 457, 457, 457, 457, 457, 457, 457, 432: 457, 434: 457}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 1856, 1212, 1213, 1211}, + {458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 375: 458, 377: 458, 458, 458, 458, 458, 384: 458, 386: 458, 391: 458, 458, 458, 458, 458, 458, 458, 458, 458, 1857, 458, 458, 458, 407: 458, 458, 458, 411: 458, 458, 458, 458, 458, 458, 458, 458, 421: 458, 458, 458, 458, 458, 458, 458, 458, 432: 458, 434: 458}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 1858, 1212, 1213, 1211}, + {456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 456, 375: 456, 377: 456, 456, 456, 456, 456, 384: 456, 386: 456, 391: 456, 456, 456, 456, 456, 456, 456, 456, 456, 401: 456, 456, 456, 407: 456, 456, 456, 411: 456, 456, 456, 456, 456, 456, 456, 456, 421: 456, 456, 456, 456, 456, 456, 456, 456, 432: 456, 434: 456}, + // 700 + {486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 374: 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 486, 401: 486, 486, 486, 407: 486, 486, 486, 411: 486, 486, 486, 486, 486, 486, 486, 486, 421: 486, 486, 486, 486, 486, 486, 486, 486, 432: 486, 434: 486}, + {482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 374: 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 482, 401: 482, 482, 482, 407: 482, 482, 482, 411: 482, 482, 482, 482, 482, 482, 482, 482, 421: 482, 482, 482, 482, 482, 482, 482, 482, 432: 482, 434: 482}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1862}, + {8: 1863, 392: 1720, 1719, 395: 1718, 1717, 1715, 547: 1716, 1714}, + {368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 368, 375: 368, 377: 368, 368, 368, 368, 368, 384: 368, 386: 368, 391: 368, 368, 368, 368, 368, 368, 368, 368, 368, 401: 368, 368, 368, 407: 368, 368, 368, 411: 368, 368, 368, 368, 368, 368, 368, 368, 421: 368, 368, 368, 368, 368, 368, 368, 368, 432: 368, 434: 368}, + // 705 + {1309, 1332, 1217, 1442, 1436, 1426, 8: 962, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1810, 586: 1811, 630: 1865}, + {8: 1866}, + {360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 360, 375: 360, 377: 360, 360, 360, 360, 360, 384: 360, 386: 360, 391: 360, 360, 360, 360, 360, 360, 360, 360, 360, 401: 360, 360, 360, 407: 360, 360, 360, 411: 360, 360, 360, 360, 360, 360, 360, 360, 421: 360, 360, 360, 360, 360, 360, 360, 360, 432: 360, 434: 360}, + {945, 945, 945, 945, 945, 945, 10: 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 373: 945, 945, 376: 945, 378: 945, 945, 945, 945, 400: 945, 404: 945, 945, 945, 410: 945, 419: 945, 945, 429: 945, 945, 945, 433: 945, 435: 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945, 945}, + {432: 1879, 434: 1878}, + // 710 + {373: 941}, + {373: 1875}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 373: 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1758, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1872}, + {379: 1763, 1764, 1769, 392: 1873, 401: 1765, 421: 1767, 1760, 1766, 1770, 1759, 1768, 1761, 1762}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 373: 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1758, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1874}, + // 715 + {935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 935, 375: 935, 378: 935, 384: 935, 386: 935, 391: 935, 935, 935, 935, 935, 935, 935, 935, 935, 402: 935, 935, 407: 935, 935, 935, 411: 935, 935, 935, 935, 935, 935, 935, 935}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1810, 586: 1876}, + {8: 1877, 1814}, + {936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 936, 375: 936, 378: 936, 384: 936, 386: 936, 391: 936, 936, 936, 936, 936, 936, 936, 936, 936, 402: 936, 936, 407: 936, 936, 936, 411: 936, 936, 936, 936, 936, 936, 936, 936}, + {944, 944, 944, 944, 944, 944, 10: 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 373: 944, 944, 376: 944, 378: 944, 944, 944, 944, 400: 944, 404: 944, 944, 944, 410: 944, 419: 944, 944, 429: 944, 944, 944, 433: 944, 435: 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944}, + // 720 + {373: 940}, + {55: 1896, 376: 1897}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 373: 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1893, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1892}, + {953, 953, 953, 953, 953, 953, 10: 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 373: 953, 953, 376: 953, 378: 953, 953, 953, 953, 400: 953, 404: 953, 953, 953, 410: 953, 419: 953, 953, 429: 953, 953, 953, 433: 953, 435: 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953, 953}, + {952, 952, 952, 952, 952, 952, 10: 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 373: 952, 952, 376: 952, 378: 952, 952, 952, 952, 400: 952, 404: 952, 952, 952, 410: 952, 419: 952, 952, 429: 952, 952, 952, 433: 952, 435: 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952, 952}, + // 725 + {951, 951, 951, 951, 951, 951, 10: 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 373: 951, 951, 376: 951, 378: 951, 951, 951, 951, 400: 951, 404: 951, 951, 951, 410: 951, 419: 951, 951, 429: 951, 951, 951, 433: 951, 435: 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951, 951}, + {950, 950, 950, 950, 950, 950, 10: 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 373: 950, 950, 376: 950, 378: 950, 950, 950, 950, 400: 950, 404: 950, 950, 950, 410: 950, 419: 950, 950, 429: 950, 950, 950, 433: 950, 435: 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950, 950}, + {949, 949, 949, 949, 949, 949, 10: 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 373: 949, 949, 376: 949, 378: 949, 949, 949, 949, 400: 949, 404: 949, 949, 949, 410: 949, 419: 949, 949, 429: 949, 949, 949, 433: 949, 435: 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, 949}, + {948, 948, 948, 948, 948, 948, 10: 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 373: 948, 948, 376: 948, 378: 948, 948, 948, 948, 400: 948, 404: 948, 948, 948, 410: 948, 419: 948, 948, 429: 948, 948, 948, 433: 948, 435: 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948, 948}, + {947, 947, 947, 947, 947, 947, 10: 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 373: 947, 947, 376: 947, 378: 947, 947, 947, 947, 400: 947, 404: 947, 947, 947, 410: 947, 419: 947, 947, 429: 947, 947, 947, 433: 947, 435: 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947, 947}, + // 730 + {946, 946, 946, 946, 946, 946, 10: 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 373: 946, 946, 376: 946, 378: 946, 946, 946, 946, 400: 946, 404: 946, 946, 946, 410: 946, 419: 946, 946, 429: 946, 946, 946, 433: 946, 435: 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946, 946}, + {55: 943, 372: 1891, 376: 943}, + {55: 942, 376: 942}, + {956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 956, 375: 956, 378: 956, 384: 956, 386: 956, 391: 956, 956, 956, 956, 956, 956, 956, 956, 956, 402: 956, 956, 407: 956, 956, 956, 411: 956, 956, 956, 956, 956, 956, 956, 956}, + {184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 375: 184, 377: 184, 184, 184, 184, 184, 384: 184, 386: 184, 391: 184, 184, 184, 184, 184, 184, 184, 184, 184, 401: 184, 184, 184, 407: 184, 184, 184, 411: 184, 184, 184, 184, 184, 184, 184, 184, 421: 184, 184, 184, 184, 184, 184, 184, 184, 432: 184, 434: 184, 482: 1894}, + // 735 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 373: 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1758, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1895}, + {955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 955, 375: 955, 378: 955, 384: 955, 386: 955, 391: 955, 955, 955, 955, 955, 955, 955, 955, 955, 402: 955, 955, 407: 955, 955, 955, 411: 955, 955, 955, 955, 955, 955, 955, 955}, + {970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 970, 375: 970, 378: 970, 384: 970, 386: 970, 391: 970, 970, 970, 970, 970, 970, 970, 970, 970, 402: 970, 407: 970, 970, 970}, + {957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 957, 375: 957, 378: 957, 384: 957, 386: 957, 391: 957, 957, 957, 957, 957, 957, 957, 957, 957, 402: 957, 957, 407: 957, 957, 957, 411: 957, 957, 957, 957, 957, 957, 957, 957}, + {971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 971, 375: 971, 378: 971, 384: 971, 386: 971, 391: 971, 971, 971, 971, 971, 971, 971, 971, 971, 402: 971, 407: 971, 971, 971, 547: 1716, 1714}, + // 740 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1900}, + {975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 975, 375: 975, 378: 975, 384: 975, 386: 975, 391: 975, 1720, 1719, 975, 1718, 1717, 1715, 975, 975, 402: 975, 407: 975, 975, 975, 547: 1716, 1714}, + {84: 1902}, + {6: 183, 183, 91: 1903}, + {6: 2, 2, 406: 1904, 473: 1907, 564: 1906, 605: 1908, 1905}, + // 745 + {976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 976, 391: 976, 473: 976, 976}, + {6: 182, 182}, + {6: 2, 2, 473: 1907, 605: 1908, 1910}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1909}, + {6: 1, 1, 1, 384: 1, 386: 1, 394: 1, 399: 1}, + // 750 + {6: 3, 3, 3, 384: 3, 386: 3, 392: 1720, 1719, 3, 1718, 1717, 1715, 399: 3, 547: 1716, 1714}, + {6: 181, 181}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 400: 838, 403: 838, 482: 838, 523: 1915, 1212, 1213, 1211, 604: 1942}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 400: 833, 403: 833, 482: 833, 523: 1915, 1212, 1213, 1211, 604: 1939}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 400: 821, 403: 821, 482: 821, 523: 1915, 1212, 1213, 1211, 604: 1936}, + // 755 + {6: 208, 208, 9: 1934}, + {400: 1932, 403: 203, 482: 203}, + {403: 1920, 482: 1921, 585: 1930}, + {403: 1920, 482: 1921, 585: 1924}, + {403: 1920, 482: 1921, 585: 1922}, + // 760 + {6: 189, 189, 9: 189}, + {205, 205, 205, 205, 205, 205, 10: 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 376: 205, 378: 205, 205, 205, 205, 385: 205, 400: 205, 404: 205, 205, 205, 410: 205, 419: 205, 205, 429: 205, 205, 205, 433: 205, 435: 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205}, + {204, 204, 204, 204, 204, 204, 10: 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 376: 204, 378: 204, 204, 204, 204, 385: 204, 400: 204, 404: 204, 204, 204, 410: 204, 419: 204, 204, 429: 204, 204, 204, 433: 204, 435: 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1923}, + {6: 196, 196, 9: 196, 392: 1720, 1719, 395: 1718, 1717, 1715, 547: 1716, 1714}, + // 765 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1926, 376: 1646, 378: 1655, 1669, 1668, 1699, 385: 1927, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1925, 579: 1928, 621: 1929}, + {6: 500, 500, 500, 500, 392: 1720, 1719, 395: 1718, 1717, 1715, 547: 1716, 1714}, + {6: 499, 499, 499, 499, 373: 1822}, + {6: 207, 207, 9: 207}, + {6: 206, 206, 9: 206}, + // 770 + {6: 197, 197, 9: 197}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1926, 376: 1646, 378: 1655, 1669, 1668, 1699, 385: 1927, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1925, 579: 1928, 621: 1931}, + {6: 201, 201, 9: 201}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 1933, 1212, 1213, 1211}, + {403: 202, 482: 202}, + // 775 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1911, 1250, 1485, 1405, 1318, 1319, 1913, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1912, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 404: 1918, 438: 1917, 523: 1915, 1212, 1213, 1211, 604: 1916, 712: 1935}, + {6: 188, 188, 9: 188}, + {403: 1920, 482: 1921, 585: 1937}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1926, 376: 1646, 378: 1655, 1669, 1668, 1699, 385: 1927, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1925, 579: 1928, 621: 1938}, + {6: 199, 199, 9: 199}, + // 780 + {403: 1920, 482: 1921, 585: 1940}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1941}, + {6: 198, 198, 9: 198, 392: 1720, 1719, 395: 1718, 1717, 1715, 547: 1716, 1714}, + {403: 1920, 482: 1921, 585: 1943}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1926, 376: 1646, 378: 1655, 1669, 1668, 1699, 385: 1927, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1925, 579: 1928, 621: 1944}, + // 785 + {6: 200, 200, 9: 200}, + {608: 1957}, + {6: 473, 473, 473, 384: 473}, + {6: 277, 277, 277, 384: 1949, 640: 1948}, + {6: 320, 320, 320}, + // 790 + {406: 1904, 564: 1950, 577: 1951, 617: 1952}, + {977, 6: 977, 977, 977, 977, 35: 977, 977, 977, 977, 53: 977, 391: 977, 474: 977}, + {6: 278, 278, 278, 278, 53: 278}, + {6: 276, 276, 276, 1953, 53: 1954}, + {406: 1904, 564: 1950, 577: 1951, 617: 1956}, + // 795 + {406: 1904, 564: 1950, 577: 1951, 617: 1955}, + {6: 274, 274, 274}, + {6: 275, 275, 275}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1958, 623: 1960, 653: 1959}, + {6: 477, 477, 477, 477, 384: 477, 386: 477, 392: 1720, 1719, 477, 1718, 1717, 1715, 407: 1965, 1964, 547: 1716, 1714, 637: 1963}, + // 800 + {6: 481, 481, 481, 1961, 384: 481}, + {6: 480, 480, 480, 480, 384: 480, 386: 480, 394: 480}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1958, 623: 1962}, + {6: 479, 479, 479, 479, 384: 479, 386: 479, 394: 479}, + {6: 478, 478, 478, 478, 384: 478, 386: 478, 394: 478}, + // 805 + {6: 476, 476, 476, 476, 384: 476, 386: 476, 394: 476}, + {6: 475, 475, 475, 475, 384: 475, 386: 475, 394: 475}, + {6: 277, 277, 277, 384: 1949, 640: 1967}, + {6: 321, 321, 321}, + {6: 2, 2, 2, 384: 2, 386: 2, 473: 1907, 605: 1908, 2039}, + // 810 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 373: 1981, 523: 1584, 1212, 1213, 1211, 549: 1977, 555: 1980, 600: 1979, 602: 1978, 1976, 628: 1975, 707: 1974, 735: 1973, 793: 1972}, + {6: 277, 277, 277, 384: 1949, 640: 1971}, + {6: 322, 322, 322}, + {6: 2, 2, 2, 384: 2, 386: 2, 394: 2, 399: 2, 473: 1907, 605: 1908, 2030}, + {6: 319, 319, 319, 384: 319, 386: 319, 473: 319}, + // 815 + {6: 318, 318, 318, 1989, 384: 318, 386: 318, 394: 318, 399: 318, 473: 318}, + {6: 317, 317, 317, 317, 384: 317, 386: 317, 394: 317, 399: 317, 473: 317}, + {6: 315, 315, 315, 315, 384: 315, 386: 315, 394: 315, 399: 315, 473: 315, 476: 2026, 2027, 626: 2025}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2022, 1212, 1213, 1211}, + {6: 313, 313, 313, 313, 384: 313, 386: 313, 394: 313, 399: 313, 402: 313, 473: 313, 476: 313, 313}, + // 820 + {6: 312, 312, 312, 312, 384: 312, 386: 312, 394: 312, 399: 312, 402: 312, 473: 312, 476: 312, 312}, + {1309, 1332, 1217, 1442, 1436, 1426, 308, 308, 308, 308, 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 375: 1987, 384: 308, 386: 308, 394: 308, 399: 308, 402: 308, 473: 308, 476: 308, 308, 479: 308, 481: 308, 483: 308, 523: 1986, 1212, 1213, 1211, 643: 1993, 704: 1992}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 373: 1981, 478: 1177, 523: 1584, 1212, 1213, 1211, 549: 1977, 555: 1980, 566: 1983, 1178, 1179, 1180, 600: 1979, 602: 1978, 1976, 628: 1975, 707: 1982}, + {8: 1990, 1989}, + {8: 1984}, + // 825 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 375: 1987, 523: 1986, 1212, 1213, 1211, 643: 1985}, + {6: 310, 310, 310, 310, 384: 310, 386: 310, 394: 310, 399: 310, 402: 310, 473: 310, 476: 310, 310}, + {6: 306, 306, 306, 306, 384: 306, 386: 306, 394: 306, 399: 306, 402: 306, 473: 306, 476: 306, 306, 479: 306, 481: 306, 483: 306}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 1988, 1212, 1213, 1211}, + {6: 305, 305, 305, 305, 384: 305, 386: 305, 394: 305, 399: 305, 402: 305, 473: 305, 476: 305, 305, 479: 305, 481: 305, 483: 305}, + // 830 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 373: 1981, 523: 1584, 1212, 1213, 1211, 549: 1977, 555: 1980, 600: 1979, 602: 1978, 1976, 628: 1991}, + {6: 309, 309, 309, 309, 384: 309, 386: 309, 394: 309, 399: 309, 402: 309, 473: 309, 476: 309, 309}, + {6: 316, 316, 316, 316, 384: 316, 386: 316, 394: 316, 399: 316, 473: 316}, + {6: 289, 289, 289, 289, 384: 289, 386: 289, 394: 289, 399: 289, 402: 289, 473: 289, 476: 289, 289, 479: 1997, 481: 1995, 483: 1996, 632: 1999, 1998, 684: 2000, 1994}, + {6: 307, 307, 307, 307, 384: 307, 386: 307, 394: 307, 399: 307, 402: 307, 473: 307, 476: 307, 307, 479: 307, 481: 307, 483: 307}, + // 835 + {6: 311, 311, 311, 311, 384: 311, 386: 311, 394: 311, 399: 311, 402: 311, 473: 311, 476: 311, 311}, + {382: 2017, 475: 2018, 576: 2021}, + {382: 2017, 475: 2018, 576: 2020}, + {382: 2017, 475: 2018, 576: 2019}, + {373: 301, 409: 2002, 753: 2003}, + // 840 + {6: 291, 291, 291, 291, 384: 291, 386: 291, 394: 291, 399: 291, 402: 291, 473: 291, 476: 291, 291, 479: 291, 481: 291, 483: 291}, + {6: 288, 288, 288, 288, 384: 288, 386: 288, 394: 288, 399: 288, 402: 288, 473: 288, 476: 288, 288, 479: 1997, 481: 1995, 483: 1996, 632: 2001, 1998}, + {6: 290, 290, 290, 290, 384: 290, 386: 290, 394: 290, 399: 290, 402: 290, 473: 290, 476: 290, 290, 479: 290, 481: 290, 483: 290}, + {386: 2013, 399: 2014, 476: 2012}, + {373: 2004}, + // 845 + {1309, 1332, 1217, 1442, 1436, 1426, 8: 296, 296, 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 383: 2007, 523: 2006, 1212, 1213, 1211, 615: 2005}, + {8: 2008, 2009}, + {8: 295, 295}, + {8: 293, 293}, + {6: 297, 297, 297, 297, 384: 297, 386: 297, 394: 297, 399: 297, 402: 297, 473: 297, 476: 297, 297, 479: 297, 481: 297, 483: 297}, + // 850 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 383: 2011, 523: 2010, 1212, 1213, 1211}, + {8: 294, 294}, + {8: 292, 292}, + {373: 300}, + {608: 2016}, + // 855 + {608: 2015}, + {373: 298}, + {373: 299}, + {1124, 1124, 1124, 1124, 1124, 1124, 10: 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 373: 1124, 391: 1124, 405: 1124, 409: 1124}, + {1123, 1123, 1123, 1123, 1123, 1123, 10: 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 373: 1123, 391: 1123, 405: 1123, 409: 1123}, + // 860 + {373: 302, 409: 302}, + {373: 303, 409: 303}, + {373: 304, 409: 304}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 373: 1981, 523: 1584, 1212, 1213, 1211, 555: 1980, 600: 1979, 602: 1978, 2023}, + {402: 2024, 476: 2026, 2027, 626: 2025}, + // 865 + {6: 314, 314, 314, 314, 384: 314, 386: 314, 394: 314, 399: 314, 473: 314}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 373: 1981, 523: 1584, 1212, 1213, 1211, 555: 1980, 600: 1979, 602: 1978, 2029}, + {282, 282, 282, 282, 282, 282, 10: 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 373: 282}, + {476: 2028}, + {281, 281, 281, 281, 281, 281, 10: 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 373: 281}, + // 870 + {6: 287, 287, 287, 287, 384: 287, 386: 287, 394: 287, 399: 287, 402: 287, 473: 287, 476: 287, 287, 626: 2025}, + {6: 210, 210, 210, 384: 210, 386: 210, 394: 210, 399: 2031, 747: 2033, 774: 2032}, + {608: 2037}, + {6: 917, 917, 917, 384: 917, 386: 917, 394: 2034, 748: 2035}, + {6: 209, 209, 209, 384: 209, 386: 209, 394: 209}, + // 875 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 2036}, + {6: 323, 323, 323, 384: 323, 386: 323}, + {6: 916, 916, 916, 384: 916, 386: 916, 392: 1720, 1719, 395: 1718, 1717, 1715, 547: 1716, 1714}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1958, 623: 1960, 653: 2038}, + {6: 918, 918, 918, 1961, 384: 918, 386: 918, 394: 918}, + // 880 + {6: 324, 324, 324, 384: 324, 386: 324}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 2213, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 2214, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 2212, 549: 2215, 675: 2216, 739: 2217, 773: 2218}, + {432, 432, 432, 432, 432, 432, 10: 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 376: 432, 378: 432, 432, 432, 432, 400: 432, 432, 404: 432, 432, 432, 410: 432, 419: 432, 432, 429: 432, 432, 432, 433: 432, 435: 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 432, 549: 432, 551: 432, 553: 432, 557: 432, 432, 560: 432, 432, 432, 607: 2191, 611: 2189, 2190, 731: 2194, 733: 2192, 2193}, + {12: 2065, 2058, 2063, 2055, 2048, 2051, 2050, 2054, 2062, 2066, 2053, 2068, 2061, 2067, 2069, 2052, 2049, 2064, 2047, 2056, 2060, 2057, 2059, 642: 2046, 645: 2045, 737: 2044, 766: 2043}, + {9: 2185, 12: 2065, 2058, 2063, 2055, 2048, 2051, 2050, 2054, 2062, 2066, 2053, 2068, 2061, 2067, 2069, 2052, 2049, 2064, 2047, 2056, 2060, 2057, 2059, 550: 2183, 642: 2186, 645: 2184}, + // 885 + {550: 2182}, + {9: 269, 12: 269, 269, 269, 269, 269, 269, 269, 269, 269, 269, 269, 269, 269, 269, 269, 269, 269, 269, 269, 269, 269, 269, 269, 550: 269}, + {9: 268, 12: 268, 268, 268, 268, 268, 268, 268, 268, 268, 268, 268, 268, 268, 268, 268, 268, 268, 268, 268, 268, 268, 268, 268, 550: 268}, + {373: 2177}, + {373: 2172}, + // 890 + {373: 2168}, + {373: 2164}, + {373: 2160}, + {373: 2156}, + {373: 2152}, + // 895 + {373: 2148}, + {373: 2144}, + {373: 2139}, + {373: 2135}, + {373: 2129}, + // 900 + {373: 2125}, + {373: 2122}, + {373: 2116}, + {373: 2110}, + {373: 2107}, + // 905 + {373: 2104}, + {373: 2101}, + {373: 2098}, + {373: 2095}, + {373: 2092}, + // 910 + {373: 2070}, + {51: 237, 237, 404: 2072, 552: 2071}, + {51: 2077, 2076, 681: 2075, 2074, 752: 2073}, + {236, 236, 236, 236, 236, 236, 8: 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 383: 236, 406: 236, 419: 236, 236, 488: 236}, + {8: 2089, 2090}, + // 915 + {8: 240, 240}, + {622: 2078}, + {622: 229}, + {622: 228}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2080, 1212, 1213, 1211, 563: 2081, 574: 2079}, + // 920 + {9: 2087, 488: 2086}, + {237, 237, 237, 237, 237, 237, 8: 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 383: 237, 400: 2083, 404: 2072, 488: 237, 552: 2082}, + {8: 233, 233, 488: 233}, + {235, 235, 235, 235, 235, 235, 8: 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 383: 235, 488: 235}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2084, 1212, 1213, 1211}, + // 925 + {237, 237, 237, 237, 237, 237, 8: 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 383: 237, 404: 2072, 488: 237, 552: 2085}, + {234, 234, 234, 234, 234, 234, 8: 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 383: 234, 488: 234}, + {8: 238, 238}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2080, 1212, 1213, 1211, 563: 2088}, + {8: 232, 232, 488: 232}, + // 930 + {9: 241, 12: 241, 241, 241, 241, 241, 241, 241, 241, 241, 241, 241, 241, 241, 241, 241, 241, 241, 241, 241, 241, 241, 241, 241, 550: 241}, + {51: 2077, 2076, 681: 2075, 2091}, + {8: 239, 239}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2093, 1212, 1213, 1211}, + {8: 2094}, + // 935 + {9: 242, 12: 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 550: 242}, + {8: 237, 404: 2072, 552: 2096}, + {8: 2097}, + {9: 243, 12: 243, 243, 243, 243, 243, 243, 243, 243, 243, 243, 243, 243, 243, 243, 243, 243, 243, 243, 243, 243, 243, 243, 243, 550: 243}, + {8: 237, 404: 2072, 552: 2099}, + // 940 + {8: 2100}, + {9: 244, 12: 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 550: 244}, + {8: 237, 404: 2072, 552: 2102}, + {8: 2103}, + {9: 245, 12: 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 550: 245}, + // 945 + {8: 237, 404: 2072, 552: 2105}, + {8: 2106}, + {9: 246, 12: 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 550: 246}, + {8: 237, 404: 2072, 552: 2108}, + {8: 2109}, + // 950 + {9: 247, 12: 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 550: 247}, + {404: 2072, 406: 237, 552: 2111}, + {406: 1904, 564: 2113, 750: 2112}, + {8: 2115}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2114, 1212, 1213, 1211}, + // 955 + {8: 225}, + {9: 248, 12: 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 550: 248}, + {63: 237, 237, 404: 2072, 552: 2117}, + {63: 2119, 2120, 751: 2118}, + {8: 2121}, + // 960 + {8: 227}, + {8: 226}, + {9: 249, 12: 249, 249, 249, 249, 249, 249, 249, 249, 249, 249, 249, 249, 249, 249, 249, 249, 249, 249, 249, 249, 249, 249, 249, 550: 249}, + {8: 237, 404: 2072, 552: 2123}, + {8: 2124}, + // 965 + {9: 250, 12: 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 550: 250}, + {404: 2072, 406: 237, 552: 2126}, + {406: 1904, 564: 2127}, + {8: 2128}, + {9: 251, 12: 251, 251, 251, 251, 251, 251, 251, 251, 251, 251, 251, 251, 251, 251, 251, 251, 251, 251, 251, 251, 251, 251, 251, 550: 251}, + // 970 + {404: 2072, 419: 237, 237, 552: 2130}, + {419: 2133, 2132, 683: 2131}, + {8: 2134}, + {8: 231}, + {8: 230}, + // 975 + {9: 252, 12: 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 550: 252}, + {404: 2072, 419: 237, 237, 552: 2136}, + {419: 2133, 2132, 683: 2137}, + {8: 2138}, + {9: 253, 12: 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 550: 253}, + // 980 + {237, 237, 237, 237, 237, 237, 10: 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 404: 2072, 552: 2140}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2080, 1212, 1213, 1211, 563: 2141}, + {1309, 1332, 1217, 1442, 1436, 1426, 8: 296, 296, 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 383: 2007, 523: 2006, 1212, 1213, 1211, 615: 2142}, + {8: 2143, 2009}, + {9: 254, 12: 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 550: 254}, + // 985 + {237, 237, 237, 237, 237, 237, 10: 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 404: 2072, 552: 2145}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2080, 1212, 1213, 1211, 563: 2081, 574: 2146}, + {8: 2147, 2087}, + {9: 255, 12: 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 550: 255}, + {237, 237, 237, 237, 237, 237, 10: 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 404: 2072, 552: 2149}, + // 990 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2080, 1212, 1213, 1211, 563: 2081, 574: 2150}, + {8: 2151, 2087}, + {9: 256, 12: 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 550: 256}, + {237, 237, 237, 237, 237, 237, 10: 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 404: 2072, 552: 2153}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2080, 1212, 1213, 1211, 563: 2081, 574: 2154}, + // 995 + {8: 2155, 2087}, + {9: 257, 12: 257, 257, 257, 257, 257, 257, 257, 257, 257, 257, 257, 257, 257, 257, 257, 257, 257, 257, 257, 257, 257, 257, 257, 550: 257}, + {237, 237, 237, 237, 237, 237, 10: 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 404: 2072, 552: 2157}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2080, 1212, 1213, 1211, 563: 2081, 574: 2158}, + {8: 2159, 2087}, + // 1000 + {9: 258, 12: 258, 258, 258, 258, 258, 258, 258, 258, 258, 258, 258, 258, 258, 258, 258, 258, 258, 258, 258, 258, 258, 258, 258, 550: 258}, + {237, 237, 237, 237, 237, 237, 10: 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 404: 2072, 552: 2161}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2080, 1212, 1213, 1211, 563: 2081, 574: 2162}, + {8: 2163, 2087}, + {9: 259, 12: 259, 259, 259, 259, 259, 259, 259, 259, 259, 259, 259, 259, 259, 259, 259, 259, 259, 259, 259, 259, 259, 259, 259, 550: 259}, + // 1005 + {237, 237, 237, 237, 237, 237, 10: 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 404: 2072, 552: 2165}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2080, 1212, 1213, 1211, 563: 2081, 574: 2166}, + {8: 2167, 2087}, + {9: 260, 12: 260, 260, 260, 260, 260, 260, 260, 260, 260, 260, 260, 260, 260, 260, 260, 260, 260, 260, 260, 260, 260, 260, 260, 550: 260}, + {237, 237, 237, 237, 237, 237, 10: 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 404: 2072, 552: 2169}, + // 1010 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2080, 1212, 1213, 1211, 563: 2081, 574: 2170}, + {8: 2171, 2087}, + {9: 261, 12: 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 550: 261}, + {237, 237, 237, 237, 237, 237, 10: 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 404: 2072, 552: 2173}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2080, 1212, 1213, 1211, 563: 2174}, + // 1015 + {1309, 1332, 1217, 1442, 1436, 1426, 8: 296, 296, 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 383: 2007, 523: 2006, 1212, 1213, 1211, 615: 2175}, + {8: 2176, 2009}, + {9: 262, 12: 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, 550: 262}, + {237, 237, 237, 237, 237, 237, 10: 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 404: 2072, 552: 2178}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2080, 1212, 1213, 1211, 563: 2179}, + // 1020 + {1309, 1332, 1217, 1442, 1436, 1426, 8: 296, 296, 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 383: 2007, 523: 2006, 1212, 1213, 1211, 615: 2180}, + {8: 2181, 2009}, + {9: 263, 12: 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, 550: 263}, + {270, 270, 270, 270, 270, 270, 10: 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 376: 270, 378: 270, 270, 270, 270, 400: 270, 270, 404: 270, 270, 270, 410: 270, 419: 270, 270, 429: 270, 270, 270, 433: 270, 435: 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 549: 270, 551: 270, 553: 270, 557: 270, 270, 560: 270, 270, 270, 607: 270, 611: 270, 270}, + {271, 271, 271, 271, 271, 271, 10: 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 376: 271, 378: 271, 271, 271, 271, 400: 271, 271, 404: 271, 271, 271, 410: 271, 419: 271, 271, 429: 271, 271, 271, 433: 271, 435: 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 549: 271, 551: 271, 553: 271, 557: 271, 271, 560: 271, 271, 271, 607: 271, 611: 271, 271}, + // 1025 + {9: 267, 12: 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, 550: 267}, + {12: 2065, 2058, 2063, 2055, 2048, 2051, 2050, 2054, 2062, 2066, 2053, 2068, 2061, 2067, 2069, 2052, 2049, 2064, 2047, 2056, 2060, 2057, 2059, 642: 2188, 645: 2187}, + {9: 265, 12: 265, 265, 265, 265, 265, 265, 265, 265, 265, 265, 265, 265, 265, 265, 265, 265, 265, 265, 265, 265, 265, 265, 265, 550: 265}, + {9: 266, 12: 266, 266, 266, 266, 266, 266, 266, 266, 266, 266, 266, 266, 266, 266, 266, 266, 266, 266, 266, 266, 266, 266, 266, 550: 266}, + {9: 264, 12: 264, 264, 264, 264, 264, 264, 264, 264, 264, 264, 264, 264, 264, 264, 264, 264, 264, 264, 264, 264, 264, 264, 264, 550: 264}, + // 1030 + {436, 436, 436, 436, 436, 436, 10: 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 376: 436, 378: 436, 436, 436, 436, 400: 436, 436, 404: 436, 436, 436, 410: 436, 419: 436, 436, 429: 436, 436, 436, 433: 436, 435: 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 436, 549: 436, 551: 436, 553: 436, 557: 436, 436, 560: 436, 436, 436}, + {435, 435, 435, 435, 435, 435, 10: 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 376: 435, 378: 435, 435, 435, 435, 400: 435, 435, 404: 435, 435, 435, 410: 435, 419: 435, 435, 429: 435, 435, 435, 433: 435, 435: 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 435, 549: 435, 551: 435, 553: 435, 557: 435, 435, 560: 435, 435, 435}, + {434, 434, 434, 434, 434, 434, 10: 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 376: 434, 378: 434, 434, 434, 434, 400: 434, 434, 404: 434, 434, 434, 410: 434, 419: 434, 434, 429: 434, 434, 434, 433: 434, 435: 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 434, 549: 434, 551: 434, 553: 434, 557: 434, 434, 560: 434, 434, 434}, + {433, 433, 433, 433, 433, 433, 10: 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 376: 433, 378: 433, 433, 433, 433, 400: 433, 433, 404: 433, 433, 433, 410: 433, 419: 433, 433, 429: 433, 433, 433, 433: 433, 435: 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 433, 549: 433, 551: 433, 553: 433, 557: 433, 433, 560: 433, 433, 433}, + {431, 431, 431, 431, 431, 431, 10: 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 376: 431, 378: 431, 431, 431, 431, 400: 431, 431, 404: 431, 431, 431, 410: 431, 419: 431, 431, 429: 431, 431, 431, 433: 431, 435: 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 431, 549: 431, 551: 431, 553: 431, 557: 431, 431, 560: 431, 431, 431}, + // 1035 + {342, 342, 342, 342, 342, 342, 10: 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 376: 342, 378: 342, 342, 342, 342, 400: 342, 342, 404: 342, 342, 342, 410: 342, 419: 342, 342, 429: 342, 342, 342, 433: 342, 435: 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 342, 549: 342, 551: 342, 553: 342, 557: 342, 342, 560: 2197, 2196, 2195, 620: 2198}, + {341, 341, 341, 341, 341, 341, 10: 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 376: 341, 378: 341, 341, 341, 341, 398: 341, 400: 341, 341, 404: 341, 341, 341, 410: 341, 419: 341, 341, 429: 341, 341, 341, 433: 341, 435: 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 341, 549: 341, 551: 341, 553: 341, 557: 341, 341, 580: 341}, + {340, 340, 340, 340, 340, 340, 10: 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 376: 340, 378: 340, 340, 340, 340, 398: 340, 400: 340, 340, 404: 340, 340, 340, 410: 340, 419: 340, 340, 429: 340, 340, 340, 433: 340, 435: 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 340, 549: 340, 551: 340, 553: 340, 557: 340, 340, 580: 340}, + {339, 339, 339, 339, 339, 339, 10: 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 376: 339, 378: 339, 339, 339, 339, 398: 339, 400: 339, 339, 404: 339, 339, 339, 410: 339, 419: 339, 339, 429: 339, 339, 339, 433: 339, 435: 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 339, 549: 339, 551: 339, 553: 339, 557: 339, 339, 580: 339}, + {215, 215, 215, 215, 215, 215, 10: 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 376: 215, 378: 215, 215, 215, 215, 400: 215, 215, 404: 215, 215, 215, 410: 215, 419: 215, 215, 429: 215, 215, 215, 433: 215, 435: 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 549: 215, 551: 215, 553: 215, 557: 215, 2200, 779: 2199}, + // 1040 + {222, 222, 222, 222, 222, 222, 10: 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 376: 222, 378: 222, 222, 222, 222, 400: 222, 222, 404: 222, 222, 222, 410: 222, 419: 222, 222, 429: 222, 222, 222, 433: 222, 435: 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 549: 222, 551: 222, 553: 222, 557: 2202, 776: 2201}, + {214, 214, 214, 214, 214, 214, 10: 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 376: 214, 378: 214, 214, 214, 214, 400: 214, 214, 404: 214, 214, 214, 410: 214, 419: 214, 214, 429: 214, 214, 214, 433: 214, 435: 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 549: 214, 551: 214, 553: 214, 557: 214}, + {220, 220, 220, 220, 220, 220, 10: 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 2204, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 376: 220, 378: 220, 220, 220, 220, 400: 220, 220, 404: 220, 220, 220, 410: 220, 419: 220, 220, 429: 220, 220, 220, 433: 220, 435: 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 549: 220, 551: 220, 553: 220, 777: 2203}, + {221, 221, 221, 221, 221, 221, 10: 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 376: 221, 378: 221, 221, 221, 221, 400: 221, 221, 404: 221, 221, 221, 410: 221, 419: 221, 221, 429: 221, 221, 221, 433: 221, 435: 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 549: 221, 551: 221, 553: 221}, + {218, 218, 218, 218, 218, 218, 10: 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 2206, 2207, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 376: 218, 378: 218, 218, 218, 218, 400: 218, 218, 404: 218, 218, 218, 410: 218, 419: 218, 218, 429: 218, 218, 218, 433: 218, 435: 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 549: 218, 551: 218, 553: 218, 778: 2205}, + // 1045 + {219, 219, 219, 219, 219, 219, 10: 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 376: 219, 378: 219, 219, 219, 219, 400: 219, 219, 404: 219, 219, 219, 410: 219, 419: 219, 219, 429: 219, 219, 219, 433: 219, 435: 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 549: 219, 551: 219, 553: 219}, + {224, 224, 224, 224, 224, 224, 10: 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 376: 224, 378: 224, 224, 224, 224, 400: 224, 224, 404: 224, 224, 224, 410: 224, 419: 224, 224, 429: 224, 224, 224, 433: 224, 435: 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 549: 224, 551: 224, 553: 2209, 772: 2208}, + {217, 217, 217, 217, 217, 217, 10: 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 376: 217, 378: 217, 217, 217, 217, 400: 217, 217, 404: 217, 217, 217, 410: 217, 419: 217, 217, 429: 217, 217, 217, 433: 217, 435: 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 549: 217, 551: 217, 553: 217}, + {216, 216, 216, 216, 216, 216, 10: 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 376: 216, 378: 216, 216, 216, 216, 400: 216, 216, 404: 216, 216, 216, 410: 216, 419: 216, 216, 429: 216, 216, 216, 433: 216, 435: 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 549: 216, 551: 216, 553: 216}, + {213, 213, 213, 213, 213, 213, 10: 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 376: 213, 378: 213, 213, 213, 213, 400: 213, 213, 404: 213, 213, 213, 410: 213, 419: 213, 213, 429: 213, 213, 213, 433: 213, 435: 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 549: 213, 551: 2211, 780: 2210}, + // 1050 + {223, 223, 223, 223, 223, 223, 10: 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 376: 223, 378: 223, 223, 223, 223, 400: 223, 223, 404: 223, 223, 223, 410: 223, 419: 223, 223, 429: 223, 223, 223, 433: 223, 435: 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 549: 223, 551: 223}, + {273, 273, 273, 273, 273, 273, 10: 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 376: 273, 378: 273, 273, 273, 273, 400: 273, 273, 404: 273, 273, 273, 410: 273, 419: 273, 273, 429: 273, 273, 273, 433: 273, 435: 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 549: 273}, + {212, 212, 212, 212, 212, 212, 10: 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 376: 212, 378: 212, 212, 212, 212, 400: 212, 212, 404: 212, 212, 212, 410: 212, 419: 212, 212, 429: 212, 212, 212, 433: 212, 435: 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 549: 212}, + {1309, 1332, 1217, 1442, 1436, 1426, 926, 926, 926, 926, 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 375: 2227, 378: 2228, 384: 926, 386: 926, 392: 1720, 1719, 395: 1718, 1717, 1715, 926, 523: 2226, 1212, 1213, 1211, 547: 1716, 1714, 676: 2225, 2236}, + {6: 931, 931, 931, 931, 384: 931, 386: 931, 398: 931}, + // 1055 + {459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, 375: 459, 377: 459, 459, 459, 459, 459, 384: 459, 386: 459, 392: 459, 459, 395: 459, 459, 459, 459, 400: 2231, 459, 403: 459, 411: 459, 459, 459, 459, 459, 459, 459, 459, 421: 459, 459, 459, 459, 459, 459, 459, 459, 432: 459, 434: 459}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2221, 1212, 1213, 1211}, + {6: 920, 920, 920, 920, 384: 920, 386: 920, 398: 920}, + {6: 211, 211, 211, 2219, 384: 211, 386: 211, 398: 211}, + {6: 325, 325, 325, 384: 325, 386: 325, 398: 325}, + // 1060 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 2213, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 2214, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 2212, 549: 2215, 675: 2220}, + {6: 919, 919, 919, 919, 384: 919, 386: 919, 398: 919}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 2222}, + {392: 1720, 1719, 395: 1718, 1717, 1715, 402: 2223, 547: 1716, 1714}, + {1309, 1332, 1217, 1442, 1436, 1426, 926, 926, 926, 926, 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 375: 2227, 378: 2228, 384: 926, 386: 926, 398: 926, 523: 2226, 1212, 1213, 1211, 676: 2225, 2224}, + // 1065 + {6: 927, 927, 927, 927, 384: 927, 386: 927, 398: 927}, + {6: 925, 925, 925, 925, 384: 925, 386: 925, 398: 925}, + {6: 924, 924, 924, 924, 384: 924, 386: 924, 398: 924}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 378: 2230, 523: 2229, 1212, 1213, 1211}, + {6: 922, 922, 922, 922, 384: 922, 386: 922, 398: 922}, + // 1070 + {6: 923, 923, 923, 923, 384: 923, 386: 923, 398: 923}, + {6: 921, 921, 921, 921, 384: 921, 386: 921, 398: 921}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 401: 2232, 523: 2233, 1212, 1213, 1211}, + {6: 930, 930, 930, 930, 384: 930, 386: 930, 398: 930}, + {458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 458, 375: 458, 377: 458, 458, 458, 458, 458, 384: 458, 386: 458, 392: 458, 458, 395: 458, 458, 458, 458, 400: 2234, 458, 403: 458, 411: 458, 458, 458, 458, 458, 458, 458, 458, 421: 458, 458, 458, 458, 458, 458, 458, 458, 432: 458, 434: 458}, + // 1075 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 401: 2235, 523: 1858, 1212, 1213, 1211}, + {6: 929, 929, 929, 929, 384: 929, 386: 929, 398: 929}, + {6: 928, 928, 928, 928, 384: 928, 386: 928, 398: 928}, + {518, 518, 518, 518, 518, 518, 10: 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 580: 2238, 687: 2239}, + {517, 517, 517, 517, 517, 517, 10: 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517, 517}, + // 1080 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 1584, 1212, 1213, 1211, 555: 2240}, + {49: 2245, 373: 2241, 429: 2246, 478: 1177, 480: 2244, 566: 2243, 1178, 1179, 1180, 646: 2242, 686: 2247}, + {1309, 1332, 1217, 1442, 1436, 1426, 8: 1093, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 478: 1177, 523: 2248, 1212, 1213, 1211, 554: 2271, 566: 2274, 1178, 1179, 1180, 722: 2272, 2273}, + {373: 2262, 639: 2261, 710: 2260}, + {6: 511, 511}, + // 1085 + {1309, 1332, 1217, 1442, 1436, 1426, 497, 497, 9: 497, 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2248, 1212, 1213, 1211, 554: 2250, 657: 2251, 724: 2249}, + {373: 509}, + {373: 508}, + {6: 494, 494}, + {1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 373: 1098, 400: 2256, 403: 1098, 407: 1098, 1098, 470: 1098, 1098, 1098, 480: 1098, 484: 1098, 1098, 487: 1098, 489: 1098, 1098, 492: 1098, 1098, 495: 1098, 1098, 498: 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 509: 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 1098, 519: 1098, 1098, 1098, 1098}, + // 1090 + {6: 510, 510, 9: 2254}, + {403: 2252}, + {6: 496, 496, 9: 496}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1926, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1925, 579: 2253}, + {6: 498, 498, 9: 498}, + // 1095 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2248, 1212, 1213, 1211, 554: 2250, 657: 2255}, + {6: 495, 495, 9: 495}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2257, 1212, 1213, 1211}, + {1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 373: 1097, 400: 2258, 403: 1097, 407: 1097, 1097, 470: 1097, 1097, 1097, 480: 1097, 484: 1097, 1097, 487: 1097, 489: 1097, 1097, 492: 1097, 1097, 495: 1097, 1097, 498: 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 509: 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 1097, 519: 1097, 1097, 1097, 1097}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2259, 1212, 1213, 1211}, + // 1100 + {1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 373: 1096, 403: 1096, 407: 1096, 1096, 470: 1096, 1096, 1096, 480: 1096, 484: 1096, 1096, 487: 1096, 489: 1096, 1096, 492: 1096, 1096, 495: 1096, 1096, 498: 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 509: 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 1096, 519: 1096, 1096, 1096, 1096}, + {6: 513, 513, 9: 2269}, + {6: 507, 507, 9: 507}, + {1309, 1332, 1217, 1442, 1436, 1426, 8: 504, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1926, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1925, 579: 2265, 797: 2264, 2263}, + {8: 2268}, + // 1105 + {8: 503, 2266}, + {8: 501, 501}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1926, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 1925, 579: 2267}, + {8: 502, 502}, + {6: 505, 505, 9: 505}, + // 1110 + {373: 2262, 639: 2270}, + {6: 506, 506, 9: 506}, + {8: 1095, 1095}, + {8: 1092, 2283}, + {8: 2276}, + // 1115 + {8: 2275}, + {6: 512, 512}, + {49: 2245, 373: 2279, 429: 2246, 478: 1177, 566: 2278, 1178, 1179, 1180, 646: 2277}, + {373: 2262, 639: 2261, 710: 2282}, + {6: 515, 515}, + // 1120 + {478: 1177, 566: 2280, 1178, 1179, 1180}, + {8: 2281}, + {6: 514, 514}, + {6: 516, 516, 9: 2269}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2248, 1212, 1213, 1211, 554: 2284}, + // 1125 + {8: 1094, 1094}, + {518, 518, 518, 518, 518, 518, 10: 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 518, 580: 2238, 687: 2286}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 1584, 1212, 1213, 1211, 555: 2287}, + {49: 2245, 373: 2241, 429: 2246, 478: 1177, 480: 2244, 566: 2243, 1178, 1179, 1180, 646: 2242, 686: 2288}, + {6: 519, 519}, + // 1130 + {6: 982, 982}, + {403: 2295}, + {6: 132, 132}, + {6: 131, 131}, + {6: 130, 130}, + // 1135 + {6: 129, 129}, + {66: 2299, 100: 2298, 378: 2296, 738: 2297}, + {410: 1175, 478: 1177, 566: 2291, 1178, 1179, 1180, 1168, 1174, 593: 2292, 599: 2293, 601: 2294, 629: 2301}, + {410: 1175, 478: 1177, 566: 2291, 1178, 1179, 1180, 1168, 1174, 593: 2292, 599: 2293, 601: 2294, 629: 2300}, + {410: 979, 478: 979, 570: 979, 979}, + // 1140 + {410: 978, 478: 978, 570: 978, 978}, + {6: 980, 980}, + {6: 981, 981}, + {999, 999, 999, 999, 999, 999, 10: 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 405: 999}, + {915, 915, 915, 915, 915, 915, 10: 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 405: 2311, 575: 2324}, + // 1145 + {915, 915, 915, 915, 915, 915, 10: 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 405: 2311, 575: 2320}, + {43: 2309, 573: 2308, 792: 2307}, + {43: 994, 573: 994}, + {915, 915, 915, 915, 915, 915, 10: 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 405: 2311, 575: 2310}, + {990, 990, 990, 990, 990, 990, 10: 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 990, 405: 990}, + // 1150 + {989, 989, 989, 989, 989, 989, 10: 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 989, 405: 989}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 1584, 1212, 1213, 1211, 555: 2314, 706: 2313}, + {671: 2312}, + {914, 914, 914, 914, 914, 914, 10: 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914, 914}, + {6: 993, 993, 9: 2318, 485: 2317, 487: 2316, 696: 2315}, + // 1155 + {6: 336, 336, 9: 336, 485: 336, 487: 336}, + {6: 996, 996}, + {6: 992, 992, 9: 992}, + {6: 991, 991, 9: 991}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 1584, 1212, 1213, 1211, 555: 2319}, + // 1160 + {6: 335, 335, 9: 335, 485: 335, 487: 335}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2321, 1212, 1213, 1211}, + {385: 2322}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 1584, 1212, 1213, 1211, 555: 2323}, + {6: 997, 997}, + // 1165 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 1209, 1212, 1213, 1211, 592: 2325}, + {6: 998, 998}, + {68: 2328, 398: 328, 771: 2327}, + {398: 2329}, + {398: 327}, + // 1170 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 1584, 1212, 1213, 1211, 555: 2330}, + {1309, 1332, 1217, 1442, 1436, 1426, 308, 308, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 375: 1987, 384: 308, 386: 308, 473: 308, 479: 308, 481: 308, 483: 308, 523: 1986, 1212, 1213, 1211, 643: 1993, 704: 2331}, + {6: 289, 289, 384: 289, 386: 289, 473: 289, 479: 1997, 481: 1995, 483: 1996, 632: 1999, 1998, 684: 2000, 2332}, + {6: 2, 2, 384: 2, 386: 2, 473: 1907, 605: 1908, 2333}, + {6: 474, 474, 384: 474, 386: 1945, 618: 1946, 2334}, + // 1175 + {6: 280, 280, 384: 2336, 760: 2335}, + {6: 1000, 1000}, + {406: 1904, 564: 1950, 577: 1951, 617: 2337}, + {6: 279, 279}, + {475: 2694}, + // 1180 + {475: 1020}, + {475: 1019}, + {475: 1018}, + {913, 913, 913, 913, 913, 913, 10: 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 405: 1608, 587: 2677}, + {573: 2344}, + // 1185 + {913, 913, 913, 913, 913, 913, 10: 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 405: 1608, 587: 2345}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 1584, 1212, 1213, 1211, 555: 2346}, + {6: 121, 121, 373: 2350, 375: 121, 508: 2349, 759: 2348, 790: 2347}, + {6: 1004, 1004, 375: 2676, 715: 2675}, + {6: 1007, 1007}, + // 1190 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 1584, 1212, 1213, 1211, 555: 2674}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 382: 1114, 1114, 387: 1114, 1114, 2351, 475: 1114, 486: 1114, 508: 2353, 523: 2248, 1212, 1213, 1211, 554: 2352, 578: 2354, 584: 2355, 610: 2356, 644: 2357, 705: 2358}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 382: 1113, 1113, 387: 1113, 1113, 475: 1113, 486: 1113, 523: 2673, 1212, 1213, 1211, 703: 2672}, + {1: 2446, 62: 2470, 66: 2485, 79: 2474, 2467, 2466, 2505, 2504, 87: 2483, 93: 2490, 2489, 96: 2495, 2501, 2506, 2507, 103: 2494, 470: 2487, 2488, 2479, 480: 2484, 489: 2492, 2491, 493: 2480, 495: 2465, 2497, 498: 2468, 2473, 2471, 2459, 2460, 2461, 2462, 2463, 2464, 2458, 509: 2486, 2499, 2503, 2498, 2457, 2502, 2469, 2493, 2472, 519: 2456, 2496, 2455, 2500, 711: 2477, 717: 2454, 2481, 2451, 2475, 730: 2449, 740: 2452, 2453, 757: 2450, 761: 2476, 2447, 2478, 789: 2448, 794: 2482, 2445, 802: 2508}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 1584, 1212, 1213, 1211, 555: 2443}, + // 1195 + {382: 2017, 2362, 387: 2366, 2365, 475: 2018, 486: 2363, 576: 2364, 726: 2367}, + {8: 125, 125}, + {8: 124, 124}, + {8: 123, 123}, + {8: 2360, 2359}, + // 1200 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 382: 1114, 1114, 387: 1114, 1114, 2351, 475: 1114, 486: 1114, 523: 2248, 1212, 1213, 1211, 554: 2352, 578: 2354, 584: 2355, 610: 2356, 644: 2361}, + {6: 120, 120, 375: 120}, + {8: 122, 122}, + {382: 2437}, + {1122, 1122, 1122, 1122, 1122, 1122, 10: 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 373: 1122, 382: 2017, 475: 2018, 576: 2376, 688: 2430}, + // 1205 + {913, 913, 913, 913, 913, 913, 10: 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 373: 913, 391: 913, 405: 1608, 587: 2424}, + {1122, 1122, 1122, 1122, 1122, 1122, 10: 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 1122, 373: 1122, 382: 2017, 391: 1122, 475: 2018, 576: 2376, 688: 2377}, + {373: 2368}, + {6: 126, 126, 126, 126}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 2369}, + // 1210 + {8: 2370, 392: 1720, 1719, 395: 1718, 1717, 1715, 547: 1716, 1714}, + {6: 1086, 1086, 1086, 1086, 44: 2371, 372: 2372, 627: 2373, 670: 2374}, + {1088, 1088, 1088, 1088, 1088, 1088, 1088, 1088, 1088, 1088, 372: 1088, 374: 1088, 1088, 1088, 1088, 382: 1088, 1088, 385: 1088, 387: 1088, 1088, 1088, 1088}, + {44: 2375}, + {1085, 1085, 1085, 1085, 1085, 1085, 1085, 1085, 1085, 1085, 372: 1085, 374: 1085, 1085, 1085, 1085, 382: 1085, 1085, 385: 1085, 387: 1085, 1085, 1085, 1085}, + // 1215 + {6: 1047, 1047, 1047, 1047}, + {1087, 1087, 1087, 1087, 1087, 1087, 1087, 1087, 1087, 1087, 372: 1087, 374: 1087, 1087, 1087, 1087, 382: 1087, 1087, 385: 1087, 387: 1087, 1087, 1087, 1087}, + {1121, 1121, 1121, 1121, 1121, 1121, 10: 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 1121, 373: 1121, 391: 1121}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 373: 911, 391: 911, 523: 2379, 1212, 1213, 1211, 614: 2380, 634: 2378}, + {373: 2388}, + // 1220 + {35: 2386, 373: 910, 391: 910}, + {373: 902, 391: 2381}, + {45: 2383, 47: 2384, 2385, 616: 2382}, + {373: 901}, + {895, 6: 895, 895, 895, 895, 35: 895, 895, 895, 895, 373: 895, 385: 895, 391: 895, 474: 895}, + // 1225 + {894, 6: 894, 894, 894, 894, 35: 894, 894, 894, 894, 373: 894, 385: 894, 391: 894, 474: 894}, + {893, 6: 893, 893, 893, 893, 35: 893, 893, 893, 893, 373: 893, 385: 893, 391: 893, 474: 893}, + {45: 2383, 47: 2384, 2385, 616: 2387}, + {373: 900}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 373: 2392, 523: 2248, 1212, 1213, 1211, 554: 2391, 589: 2390, 598: 2389}, + // 1230 + {8: 2402, 2403}, + {8: 1025, 1025}, + {8: 31, 31, 373: 2397, 407: 31, 31, 556: 2398, 565: 2396}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 2393}, + {8: 2394, 392: 1720, 1719, 395: 1718, 1717, 1715, 547: 1716, 1714}, + // 1235 + {8: 477, 477, 407: 1965, 1964, 637: 2395}, + {8: 1022, 1022}, + {8: 477, 477, 407: 1965, 1964, 637: 2401}, + {406: 1904, 564: 1950, 577: 2399}, + {30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 39: 30, 30, 30, 372: 30, 374: 30, 30, 30, 30, 382: 30, 30, 385: 30, 387: 30, 30, 30, 30, 407: 30, 30, 470: 30, 30, 30, 545: 30, 30}, + // 1240 + {8: 2400}, + {32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 39: 32, 32, 32, 372: 32, 374: 32, 32, 32, 32, 382: 32, 32, 385: 32, 387: 32, 32, 32, 32, 407: 32, 32, 470: 32, 32, 32, 545: 32, 32}, + {8: 1023, 1023}, + {909, 6: 909, 909, 909, 909, 35: 909, 909, 909, 909, 391: 909, 474: 909, 597: 2405}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 373: 2392, 523: 2248, 1212, 1213, 1211, 554: 2391, 589: 2404}, + // 1245 + {8: 1024, 1024}, + {2410, 6: 1048, 1048, 1048, 1048, 35: 2413, 2415, 2414, 2407, 391: 2412, 474: 2409, 588: 2411, 590: 2408, 596: 2406}, + {908, 6: 908, 908, 908, 908, 35: 908, 908, 908, 908, 391: 908, 474: 908}, + {403: 2421, 406: 988, 613: 2422}, + {906, 6: 906, 906, 906, 906, 35: 906, 906, 906, 906, 391: 906, 474: 906}, + // 1250 + {769: 2419}, + {378: 2418}, + {903, 6: 903, 903, 903, 903, 35: 903, 903, 903, 903, 391: 903, 474: 903}, + {45: 2383, 47: 2384, 2385, 616: 2417}, + {45: 2383, 47: 2384, 2385, 616: 2416}, + // 1255 + {892, 6: 892, 892, 892, 892, 35: 892, 892, 892, 892, 391: 892, 474: 892}, + {891, 6: 891, 891, 891, 891, 35: 891, 891, 891, 891, 391: 891, 474: 891}, + {896, 6: 896, 896, 896, 896, 35: 896, 896, 896, 896, 385: 896, 391: 896, 474: 896}, + {897, 6: 897, 897, 897, 897, 35: 897, 897, 897, 897, 385: 897, 391: 897, 474: 897}, + {904, 6: 904, 904, 904, 904, 35: 904, 904, 904, 904, 391: 904, 474: 904}, + // 1260 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2420, 1212, 1213, 1211}, + {905, 6: 905, 905, 905, 905, 35: 905, 905, 905, 905, 391: 905, 474: 905}, + {987, 987, 987, 987, 987, 987, 10: 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 987, 378: 987, 406: 987, 472: 987}, + {406: 1904, 564: 1950, 577: 2423}, + {907, 6: 907, 907, 907, 907, 35: 907, 907, 907, 907, 391: 907, 474: 907}, + // 1265 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 373: 911, 391: 911, 523: 2379, 1212, 1213, 1211, 614: 2380, 634: 2425}, + {373: 2426}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 373: 2392, 523: 2248, 1212, 1213, 1211, 554: 2391, 589: 2390, 598: 2427}, + {8: 2428, 2403}, + {909, 6: 909, 909, 909, 909, 35: 909, 909, 909, 909, 391: 909, 474: 909, 597: 2429}, + // 1270 + {2410, 6: 1049, 1049, 1049, 1049, 35: 2413, 2415, 2414, 2407, 391: 2412, 474: 2409, 588: 2411, 590: 2408, 596: 2406}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 373: 911, 523: 2432, 1212, 1213, 1211, 614: 2431}, + {373: 2433}, + {373: 910}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 373: 2392, 523: 2248, 1212, 1213, 1211, 554: 2391, 589: 2390, 598: 2434}, + // 1275 + {8: 2435, 2403}, + {909, 6: 909, 909, 909, 909, 35: 909, 909, 909, 909, 391: 909, 474: 909, 597: 2436}, + {2410, 6: 1050, 1050, 1050, 1050, 35: 2413, 2415, 2414, 2407, 391: 2412, 474: 2409, 588: 2411, 590: 2408, 596: 2406}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 373: 911, 391: 911, 523: 2379, 1212, 1213, 1211, 614: 2380, 634: 2438}, + {373: 2439}, + // 1280 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 373: 2392, 523: 2248, 1212, 1213, 1211, 554: 2391, 589: 2390, 598: 2440}, + {8: 2441, 2403}, + {909, 6: 909, 909, 909, 909, 35: 909, 909, 909, 909, 391: 909, 474: 909, 597: 2442}, + {2410, 6: 1051, 1051, 1051, 1051, 35: 2413, 2415, 2414, 2407, 391: 2412, 474: 2409, 588: 2411, 590: 2408, 596: 2406}, + {8: 2444}, + // 1285 + {6: 1001, 1001}, + {2604, 2602, 2598, 2610, 2608, 2609, 1053, 1053, 1053, 1053, 372: 2596, 374: 2601, 1060, 2597, 2607, 382: 1090, 2595, 385: 2603, 387: 1114, 2600, 2351, 2611, 578: 2605, 624: 2612, 631: 2606, 638: 2599, 655: 2613, 2671}, + {2604, 2602, 2598, 2610, 2608, 2609, 1053, 1053, 1053, 1053, 372: 2596, 374: 2601, 1060, 2597, 2607, 382: 1090, 2595, 385: 2603, 387: 1114, 2600, 2351, 2611, 578: 2605, 624: 2612, 631: 2606, 638: 2599, 655: 2613, 2594}, + {116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 372: 116, 374: 116, 116, 116, 116, 382: 116, 116, 385: 116, 387: 116, 116, 116, 116}, + {115, 115, 115, 115, 115, 115, 115, 115, 115, 115, 372: 115, 374: 115, 115, 115, 115, 382: 115, 115, 385: 115, 387: 115, 115, 115, 115}, + // 1290 + {114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 372: 114, 374: 114, 114, 114, 114, 382: 114, 114, 385: 114, 387: 114, 114, 114, 114}, + {31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 372: 31, 2397, 31, 31, 31, 31, 382: 31, 31, 385: 31, 387: 31, 31, 31, 31, 545: 31, 31, 556: 2398, 565: 2592}, + {26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 372: 26, 374: 26, 26, 26, 26, 382: 26, 26, 385: 26, 387: 26, 26, 26, 26, 545: 26, 26, 595: 2591}, + {24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 372: 24, 2581, 24, 24, 24, 24, 382: 24, 24, 385: 24, 387: 24, 24, 24, 24, 545: 24, 24, 556: 2582, 678: 2589, 695: 2583}, + {24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 372: 24, 2581, 24, 24, 24, 24, 382: 24, 24, 385: 24, 387: 24, 24, 24, 24, 545: 24, 24, 556: 2582, 678: 2580, 695: 2583}, + // 1295 + {31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 372: 31, 2397, 31, 31, 31, 31, 382: 31, 31, 385: 31, 387: 31, 31, 31, 31, 556: 2398, 565: 2579}, + {108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 372: 108, 108, 108, 108, 108, 108, 382: 108, 108, 385: 108, 387: 108, 108, 108, 108, 545: 108, 108}, + {107, 107, 107, 107, 107, 107, 107, 107, 107, 107, 107, 372: 107, 107, 107, 107, 107, 107, 382: 107, 107, 385: 107, 387: 107, 107, 107, 107, 545: 107, 107}, + {106, 106, 106, 106, 106, 106, 106, 106, 106, 106, 106, 372: 106, 106, 106, 106, 106, 106, 382: 106, 106, 385: 106, 387: 106, 106, 106, 106, 545: 106, 106}, + {105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 372: 105, 105, 105, 105, 105, 105, 382: 105, 105, 385: 105, 387: 105, 105, 105, 105, 545: 105, 105}, + // 1300 + {104, 104, 104, 104, 104, 104, 104, 104, 104, 104, 104, 372: 104, 104, 104, 104, 104, 104, 382: 104, 104, 385: 104, 387: 104, 104, 104, 104, 545: 104, 104}, + {103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 372: 103, 103, 103, 103, 103, 103, 382: 103, 103, 385: 103, 387: 103, 103, 103, 103, 545: 103, 103}, + {102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 372: 102, 102, 102, 102, 102, 102, 382: 102, 102, 385: 102, 387: 102, 102, 102, 102, 545: 102, 102}, + {101, 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, 372: 101, 101, 101, 101, 101, 101, 382: 101, 101, 385: 101, 387: 101, 101, 101, 101, 545: 101, 101}, + {100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 372: 100, 100, 100, 100, 100, 100, 382: 100, 100, 385: 100, 387: 100, 100, 100, 100, 545: 100, 100}, + // 1305 + {99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 372: 99, 99, 99, 99, 99, 99, 382: 99, 99, 385: 99, 387: 99, 99, 99, 99, 545: 99, 99}, + {98, 98, 98, 98, 98, 98, 98, 98, 98, 98, 98, 372: 98, 98, 98, 98, 98, 98, 382: 98, 98, 385: 98, 387: 98, 98, 98, 98, 545: 98, 98}, + {97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 372: 97, 374: 97, 97, 97, 97, 382: 97, 97, 385: 97, 387: 97, 97, 97, 97, 545: 97, 97}, + {96, 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, 372: 96, 374: 96, 96, 96, 96, 382: 96, 96, 385: 96, 387: 96, 96, 96, 96, 545: 96, 96}, + {92, 92, 92, 92, 92, 92, 92, 92, 92, 92, 92, 372: 92, 92, 92, 92, 92, 92, 382: 92, 92, 385: 92, 387: 92, 92, 92, 92, 545: 92, 92}, + // 1310 + {91, 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, 372: 91, 91, 91, 91, 91, 91, 382: 91, 91, 385: 91, 387: 91, 91, 91, 91, 545: 91, 91}, + {90, 90, 90, 90, 90, 90, 90, 90, 90, 90, 90, 372: 90, 90, 90, 90, 90, 90, 382: 90, 90, 385: 90, 387: 90, 90, 90, 90, 545: 90, 90}, + {89, 89, 89, 89, 89, 89, 89, 89, 89, 89, 89, 372: 89, 89, 89, 89, 89, 89, 382: 89, 89, 385: 89, 387: 89, 89, 89, 89, 545: 89, 89}, + {88, 88, 88, 88, 88, 88, 88, 88, 88, 88, 88, 372: 88, 88, 88, 88, 88, 88, 382: 88, 88, 385: 88, 387: 88, 88, 88, 88, 545: 88, 88}, + {87, 87, 87, 87, 87, 87, 87, 87, 87, 87, 87, 372: 87, 87, 87, 87, 87, 87, 382: 87, 87, 385: 87, 387: 87, 87, 87, 87, 545: 87, 87, 770: 2578}, + // 1315 + {85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 372: 85, 85, 85, 85, 85, 85, 382: 85, 85, 385: 85, 387: 85, 85, 85, 85}, + {18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 11: 2542, 372: 18, 2397, 18, 18, 18, 18, 382: 18, 18, 385: 18, 387: 18, 18, 18, 18, 470: 2548, 2549, 2540, 556: 2575, 559: 2541, 572: 2576}, + {18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 11: 2542, 372: 18, 2397, 18, 18, 18, 18, 382: 18, 18, 385: 18, 387: 18, 18, 18, 18, 470: 2548, 2549, 2540, 556: 2572, 559: 2541, 572: 2573}, + {373: 2397, 556: 2570}, + {373: 2397, 556: 2568}, + // 1320 + {31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 372: 31, 2397, 31, 31, 31, 31, 382: 31, 31, 385: 31, 387: 31, 31, 31, 31, 556: 2398, 565: 2567}, + {373: 2397, 556: 2566}, + {76, 76, 76, 76, 76, 76, 76, 76, 76, 76, 372: 76, 374: 76, 76, 76, 76, 382: 76, 76, 385: 76, 387: 76, 76, 76, 76}, + {18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 11: 2542, 39: 2537, 2539, 2538, 372: 18, 374: 18, 18, 18, 18, 382: 18, 18, 385: 18, 387: 18, 18, 18, 18, 470: 2548, 2549, 2540, 559: 2541, 572: 2536, 636: 2565}, + {373: 2561}, + // 1325 + {373: 2554}, + {72, 72, 72, 72, 72, 72, 72, 72, 72, 72, 372: 72, 374: 72, 72, 72, 72, 382: 72, 72, 385: 72, 387: 72, 72, 72, 72}, + {18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 11: 2542, 39: 2537, 2539, 2538, 372: 18, 374: 18, 18, 18, 18, 382: 18, 18, 385: 18, 387: 18, 18, 18, 18, 470: 2533, 2534, 2540, 489: 2492, 2491, 493: 2535, 559: 2541, 572: 2536, 636: 2532, 711: 2531}, + {69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 11: 69, 372: 69, 69, 69, 69, 69, 69, 382: 69, 69, 385: 69, 387: 69, 69, 69, 69, 470: 69, 69, 69, 582: 2530}, + {68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 11: 68, 372: 68, 68, 68, 68, 68, 68, 382: 68, 68, 385: 68, 387: 68, 68, 68, 68, 470: 68, 68, 68, 582: 2529}, + // 1330 + {67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 11: 67, 372: 67, 67, 67, 67, 67, 67, 382: 67, 67, 385: 67, 387: 67, 67, 67, 67, 470: 67, 67, 67, 489: 2527, 2526, 582: 2528}, + {470: 2520, 2521, 489: 2523, 2522}, + {62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 11: 62, 39: 62, 62, 62, 372: 62, 62, 62, 62, 62, 62, 382: 62, 62, 385: 62, 387: 62, 62, 62, 62, 470: 62, 62, 62}, + {61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 11: 61, 39: 61, 61, 61, 372: 61, 61, 61, 61, 61, 61, 382: 61, 61, 385: 61, 387: 61, 61, 61, 61, 470: 61, 61, 61}, + {373: 58}, + // 1335 + {52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 372: 52, 52, 52, 52, 52, 52, 382: 52, 52, 385: 52, 387: 52, 52, 52, 52, 545: 52, 52}, + {51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 372: 51, 51, 51, 51, 51, 51, 382: 51, 51, 385: 51, 387: 51, 51, 51, 51, 545: 51, 51}, + {50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 372: 50, 374: 50, 50, 50, 50, 382: 50, 50, 385: 50, 387: 50, 50, 50, 50}, + {31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 372: 31, 2397, 31, 31, 31, 31, 382: 31, 31, 385: 31, 387: 31, 31, 31, 31, 556: 2398, 565: 2519}, + {48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 372: 48, 374: 48, 48, 48, 48, 382: 48, 48, 385: 48, 387: 48, 48, 48, 48}, + // 1340 + {47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 372: 47, 374: 47, 47, 47, 47, 382: 47, 47, 385: 47, 387: 47, 47, 47, 47}, + {45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 11: 45, 39: 45, 45, 45, 372: 45, 374: 45, 45, 45, 45, 382: 45, 45, 385: 45, 387: 45, 45, 45, 45, 470: 45, 45, 45}, + {31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 11: 31, 39: 31, 31, 31, 372: 31, 2397, 31, 31, 31, 31, 382: 31, 31, 385: 31, 387: 31, 31, 31, 31, 470: 31, 31, 31, 556: 2398, 565: 2518}, + {43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 11: 43, 39: 43, 43, 43, 372: 43, 374: 43, 43, 43, 43, 382: 43, 43, 385: 43, 387: 43, 43, 43, 43, 470: 43, 43, 43}, + {42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 11: 42, 39: 42, 42, 42, 372: 42, 374: 42, 42, 42, 42, 382: 42, 42, 385: 42, 387: 42, 42, 42, 42, 470: 42, 42, 42}, + // 1345 + {37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 372: 37, 374: 37, 37, 37, 37, 382: 37, 37, 385: 37, 387: 37, 37, 37, 37}, + {31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 372: 31, 2397, 31, 31, 31, 31, 382: 31, 31, 385: 31, 387: 31, 31, 31, 31, 556: 2398, 565: 2517}, + {31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 372: 31, 2397, 31, 31, 31, 31, 382: 31, 31, 385: 31, 387: 31, 31, 31, 31, 556: 2398, 565: 2516}, + {31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 372: 31, 2397, 31, 31, 31, 31, 382: 31, 31, 385: 31, 387: 31, 31, 31, 31, 556: 2398, 565: 2515}, + {31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 372: 31, 2397, 31, 31, 31, 31, 382: 31, 31, 385: 31, 387: 31, 31, 31, 31, 545: 31, 31, 556: 2398, 565: 2509}, + // 1350 + {26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 372: 26, 374: 26, 26, 26, 26, 382: 26, 26, 385: 26, 387: 26, 26, 26, 26, 545: 26, 26, 595: 2510}, + {33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 2512, 372: 33, 374: 33, 33, 33, 33, 382: 33, 33, 385: 33, 387: 33, 33, 33, 33, 545: 2511, 2513, 594: 2514}, + {29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 372: 29, 374: 29, 29, 29, 29, 382: 29, 29, 385: 29, 387: 29, 29, 29, 29, 545: 29, 29}, + {28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 372: 28, 374: 28, 28, 28, 28, 382: 28, 28, 385: 28, 387: 28, 28, 28, 28, 545: 28, 28}, + {27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 372: 27, 374: 27, 27, 27, 27, 382: 27, 27, 385: 27, 387: 27, 27, 27, 27, 545: 27, 27}, + // 1355 + {25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 372: 25, 374: 25, 25, 25, 25, 382: 25, 25, 385: 25, 387: 25, 25, 25, 25, 545: 25, 25}, + {34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 372: 34, 374: 34, 34, 34, 34, 382: 34, 34, 385: 34, 387: 34, 34, 34, 34}, + {35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 372: 35, 374: 35, 35, 35, 35, 382: 35, 35, 385: 35, 387: 35, 35, 35, 35}, + {36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 372: 36, 374: 36, 36, 36, 36, 382: 36, 36, 385: 36, 387: 36, 36, 36, 36}, + {44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 11: 44, 39: 44, 44, 44, 372: 44, 374: 44, 44, 44, 44, 382: 44, 44, 385: 44, 387: 44, 44, 44, 44, 470: 44, 44, 44}, + // 1360 + {49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 372: 49, 374: 49, 49, 49, 49, 382: 49, 49, 385: 49, 387: 49, 49, 49, 49}, + {66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 11: 66, 372: 66, 66, 66, 66, 66, 66, 382: 66, 66, 385: 66, 387: 66, 66, 66, 66, 470: 66, 66, 66, 582: 2525}, + {65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 11: 65, 372: 65, 65, 65, 65, 65, 65, 382: 65, 65, 385: 65, 387: 65, 65, 65, 65, 470: 65, 65, 65, 582: 2524}, + {373: 60}, + {373: 59}, + // 1365 + {373: 54}, + {373: 55}, + {373: 57}, + {373: 56}, + {373: 53}, + // 1370 + {63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 11: 63, 39: 63, 63, 63, 372: 63, 63, 63, 63, 63, 63, 382: 63, 63, 385: 63, 387: 63, 63, 63, 63, 470: 63, 63, 63}, + {64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 11: 64, 39: 64, 64, 64, 372: 64, 64, 64, 64, 64, 64, 382: 64, 64, 385: 64, 387: 64, 64, 64, 64, 470: 64, 64, 64}, + {18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 11: 2542, 39: 2537, 2539, 2538, 372: 18, 374: 18, 18, 18, 18, 382: 18, 18, 385: 18, 387: 18, 18, 18, 18, 470: 2548, 2549, 2540, 559: 2541, 572: 2536, 636: 2553}, + {70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 372: 70, 374: 70, 70, 70, 70, 382: 70, 70, 385: 70, 387: 70, 70, 70, 70}, + {480: 2551, 582: 2530}, + // 1375 + {480: 2550, 582: 2529}, + {46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 372: 46, 374: 46, 46, 46, 46, 382: 46, 46, 385: 46, 387: 46, 46, 46, 46}, + {41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 372: 41, 374: 41, 41, 41, 41, 382: 41, 41, 385: 41, 387: 41, 41, 41, 41}, + {40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 372: 40, 374: 40, 40, 40, 40, 382: 40, 40, 385: 40, 387: 40, 40, 40, 40}, + {39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 372: 39, 374: 39, 39, 39, 39, 382: 39, 39, 385: 39, 387: 39, 39, 39, 39}, + // 1380 + {38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 372: 38, 374: 38, 38, 38, 38, 382: 38, 38, 385: 38, 387: 38, 38, 38, 38}, + {15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 11: 2542, 372: 15, 374: 15, 15, 15, 15, 382: 15, 15, 385: 15, 387: 15, 15, 15, 15, 470: 2548, 2549, 559: 2547, 635: 2546}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 378: 1831, 472: 1830, 523: 1832, 1212, 1213, 1211, 581: 1829, 609: 2543}, + {12, 12, 12, 12, 12, 12, 10: 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 378: 12, 403: 12, 472: 12}, + {20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 372: 20, 374: 20, 20, 20, 20, 382: 20, 20, 385: 20, 387: 20, 20, 20, 20, 472: 2544, 764: 2545}, + // 1385 + {19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 372: 19, 374: 19, 19, 19, 19, 382: 19, 19, 385: 19, 387: 19, 19, 19, 19}, + {16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 372: 16, 374: 16, 16, 16, 16, 382: 16, 16, 385: 16, 387: 16, 16, 16, 16}, + {17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 372: 17, 374: 17, 17, 17, 17, 382: 17, 17, 385: 17, 387: 17, 17, 17, 17}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 378: 1831, 472: 1830, 523: 1832, 1212, 1213, 1211, 581: 1829, 609: 2552}, + {480: 2551}, + // 1390 + {480: 2550}, + {11, 11, 11, 11, 11, 11, 10: 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 378: 11, 403: 11, 472: 11}, + {13, 13, 13, 13, 13, 13, 10: 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 378: 13, 403: 13, 472: 13}, + {14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 372: 14, 374: 14, 14, 14, 14, 382: 14, 14, 385: 14, 387: 14, 14, 14, 14}, + {71, 71, 71, 71, 71, 71, 71, 71, 71, 71, 372: 71, 374: 71, 71, 71, 71, 382: 71, 71, 385: 71, 387: 71, 71, 71, 71}, + // 1395 + {378: 2556, 702: 2555}, + {8: 2557, 2558}, + {8: 8, 8}, + {15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 11: 2542, 372: 15, 374: 15, 15, 15, 15, 382: 15, 15, 385: 15, 387: 15, 15, 15, 15, 470: 2548, 2549, 559: 2547, 635: 2560}, + {378: 2559}, + // 1400 + {8: 7, 7}, + {73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 372: 73, 374: 73, 73, 73, 73, 382: 73, 73, 385: 73, 387: 73, 73, 73, 73}, + {378: 2556, 702: 2562}, + {8: 2563, 2558}, + {15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 11: 2542, 372: 15, 374: 15, 15, 15, 15, 382: 15, 15, 385: 15, 387: 15, 15, 15, 15, 470: 2548, 2549, 559: 2547, 635: 2564}, + // 1405 + {74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 372: 74, 374: 74, 74, 74, 74, 382: 74, 74, 385: 74, 387: 74, 74, 74, 74}, + {75, 75, 75, 75, 75, 75, 75, 75, 75, 75, 372: 75, 374: 75, 75, 75, 75, 382: 75, 75, 385: 75, 387: 75, 75, 75, 75}, + {77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 372: 77, 374: 77, 77, 77, 77, 382: 77, 77, 385: 77, 387: 77, 77, 77, 77}, + {78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 372: 78, 374: 78, 78, 78, 78, 382: 78, 78, 385: 78, 387: 78, 78, 78, 78}, + {18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 11: 2542, 372: 18, 374: 18, 18, 18, 18, 382: 18, 18, 385: 18, 387: 18, 18, 18, 18, 470: 2548, 2549, 2540, 559: 2541, 572: 2569}, + // 1410 + {79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 372: 79, 374: 79, 79, 79, 79, 382: 79, 79, 385: 79, 387: 79, 79, 79, 79}, + {18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 11: 2542, 372: 18, 374: 18, 18, 18, 18, 382: 18, 18, 385: 18, 387: 18, 18, 18, 18, 470: 2548, 2549, 2540, 559: 2541, 572: 2571}, + {80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 372: 80, 374: 80, 80, 80, 80, 382: 80, 80, 385: 80, 387: 80, 80, 80, 80}, + {18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 11: 2542, 372: 18, 374: 18, 18, 18, 18, 382: 18, 18, 385: 18, 387: 18, 18, 18, 18, 470: 2548, 2549, 2540, 559: 2541, 572: 2574}, + {81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 372: 81, 374: 81, 81, 81, 81, 382: 81, 81, 385: 81, 387: 81, 81, 81, 81}, + // 1415 + {82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 372: 82, 374: 82, 82, 82, 82, 382: 82, 82, 385: 82, 387: 82, 82, 82, 82}, + {18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 11: 2542, 372: 18, 374: 18, 18, 18, 18, 382: 18, 18, 385: 18, 387: 18, 18, 18, 18, 470: 2548, 2549, 2540, 559: 2541, 572: 2577}, + {83, 83, 83, 83, 83, 83, 83, 83, 83, 83, 372: 83, 374: 83, 83, 83, 83, 382: 83, 83, 385: 83, 387: 83, 83, 83, 83}, + {84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 372: 84, 374: 84, 84, 84, 84, 382: 84, 84, 385: 84, 387: 84, 84, 84, 84}, + {86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 372: 86, 86, 86, 86, 86, 86, 382: 86, 86, 385: 86, 387: 86, 86, 86, 86, 545: 86, 86}, + // 1420 + {109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 372: 109, 374: 109, 109, 109, 109, 382: 109, 109, 385: 109, 387: 109, 109, 109, 109}, + {26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 372: 26, 374: 26, 26, 26, 26, 382: 26, 26, 385: 26, 387: 26, 26, 26, 26, 545: 26, 26, 595: 2588}, + {406: 1904, 564: 1950, 577: 2584}, + {23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 372: 23, 374: 23, 23, 23, 23, 382: 23, 23, 385: 23, 387: 23, 23, 23, 23, 545: 23, 23}, + {22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 372: 22, 374: 22, 22, 22, 22, 382: 22, 22, 385: 22, 387: 22, 22, 22, 22, 545: 22, 22}, + // 1425 + {8: 2400, 2585}, + {406: 1904, 564: 1950, 577: 2586}, + {8: 2587}, + {21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 372: 21, 374: 21, 21, 21, 21, 382: 21, 21, 385: 21, 387: 21, 21, 21, 21, 545: 21, 21}, + {110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 2512, 372: 110, 374: 110, 110, 110, 110, 382: 110, 110, 385: 110, 387: 110, 110, 110, 110, 545: 2511, 2513, 594: 2514}, + // 1430 + {26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 372: 26, 374: 26, 26, 26, 26, 382: 26, 26, 385: 26, 387: 26, 26, 26, 26, 545: 26, 26, 595: 2590}, + {111, 111, 111, 111, 111, 111, 111, 111, 111, 111, 2512, 372: 111, 374: 111, 111, 111, 111, 382: 111, 111, 385: 111, 387: 111, 111, 111, 111, 545: 2511, 2513, 594: 2514}, + {112, 112, 112, 112, 112, 112, 112, 112, 112, 112, 2512, 372: 112, 374: 112, 112, 112, 112, 382: 112, 112, 385: 112, 387: 112, 112, 112, 112, 545: 2511, 2513, 594: 2514}, + {26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 372: 26, 374: 26, 26, 26, 26, 382: 26, 26, 385: 26, 387: 26, 26, 26, 26, 545: 26, 26, 595: 2593}, + {113, 113, 113, 113, 113, 113, 113, 113, 113, 113, 2512, 372: 113, 374: 113, 113, 113, 113, 382: 113, 113, 385: 113, 387: 113, 113, 113, 113, 545: 2511, 2513, 594: 2514}, + // 1435 + {6: 1099, 1099, 1099, 1099}, + {382: 1089}, + {376: 2670}, + {1081, 1081, 1081, 1081, 1081, 1081, 1081, 1081, 1081, 1081, 372: 1081, 374: 1081, 1081, 1081, 1081, 382: 1081, 1081, 385: 1081, 387: 1081, 1081, 1081, 1081}, + {1080, 1080, 1080, 1080, 1080, 1080, 1080, 1080, 1080, 1080, 372: 1080, 374: 1080, 1080, 1080, 1080, 382: 1080, 1080, 385: 1080, 387: 1080, 1080, 1080, 1080}, + // 1440 + {382: 2669}, + {1078, 1078, 1078, 1078, 1078, 1078, 1078, 1078, 1078, 1078, 372: 1078, 374: 1078, 1078, 1078, 1078, 382: 2668, 1078, 385: 1078, 387: 1078, 1078, 1078, 1078}, + {376: 1646, 378: 1655, 2661, 2662, 406: 1650, 419: 1645, 1647, 430: 1649, 1648, 435: 1654, 2650, 2647, 439: 1653, 2648, 2649, 1652, 527: 2660, 529: 1651, 690: 2645, 2646, 2658, 700: 2659, 732: 2657}, + {374: 2655}, + {796: 2643}, + // 1445 + {378: 2642}, + {387: 2634}, + {375: 2627}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 378: 1831, 523: 1832, 1212, 1213, 1211, 581: 2626, 654: 2625}, + {62: 2623, 86: 2624, 374: 2622, 721: 2621}, + // 1450 + {85: 2619, 92: 2620, 374: 2618, 787: 2617}, + {31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 372: 31, 2397, 31, 31, 31, 31, 382: 31, 31, 385: 31, 387: 31, 31, 31, 31, 556: 2398, 565: 2616}, + {78: 2615}, + {1055, 1055, 1055, 1055, 1055, 1055, 1055, 1055, 1055, 1055, 372: 1055, 374: 1055, 1055, 1055, 1055, 382: 1055, 1055, 385: 1055, 387: 1055, 1055, 1055, 1055}, + {2604, 2602, 2598, 2610, 2608, 2609, 1052, 1052, 1052, 1052, 372: 2596, 374: 2601, 1060, 2597, 2607, 382: 1090, 2595, 385: 2603, 387: 1114, 2600, 2351, 2611, 578: 2605, 624: 2614, 631: 2606, 638: 2599}, + // 1455 + {1054, 1054, 1054, 1054, 1054, 1054, 1054, 1054, 1054, 1054, 372: 1054, 374: 1054, 1054, 1054, 1054, 382: 1054, 1054, 385: 1054, 387: 1054, 1054, 1054, 1054}, + {375: 1059}, + {1067, 1067, 1067, 1067, 1067, 1067, 1067, 1067, 1067, 1067, 372: 1067, 374: 1067, 1067, 1067, 1067, 382: 1067, 1067, 385: 1067, 387: 1067, 1067, 1067, 1067}, + {1068, 1068, 1068, 1068, 1068, 1068, 1068, 1068, 1068, 1068, 372: 1068, 374: 1068, 1068, 1068, 1068, 382: 1068, 1068, 385: 1068, 387: 1068, 1068, 1068, 1068}, + {1066, 1066, 1066, 1066, 1066, 1066, 1066, 1066, 1066, 1066, 372: 1066, 374: 1066, 1066, 1066, 1066, 382: 1066, 1066, 385: 1066, 387: 1066, 1066, 1066, 1066}, + // 1460 + {1065, 1065, 1065, 1065, 1065, 1065, 1065, 1065, 1065, 1065, 372: 1065, 374: 1065, 1065, 1065, 1065, 382: 1065, 1065, 385: 1065, 387: 1065, 1065, 1065, 1065}, + {1064, 1064, 1064, 1064, 1064, 1064, 1064, 1064, 1064, 1064, 372: 1064, 374: 1064, 1064, 1064, 1064, 382: 1064, 1064, 385: 1064, 387: 1064, 1064, 1064, 1064}, + {1069, 1069, 1069, 1069, 1069, 1069, 1069, 1069, 1069, 1069, 372: 1069, 374: 1069, 1069, 1069, 1069, 382: 1069, 1069, 385: 1069, 387: 1069, 1069, 1069, 1069}, + {1063, 1063, 1063, 1063, 1063, 1063, 1063, 1063, 1063, 1063, 372: 1063, 374: 1063, 1063, 1063, 1063, 382: 1063, 1063, 385: 1063, 387: 1063, 1063, 1063, 1063}, + {1062, 1062, 1062, 1062, 1062, 1062, 1062, 1062, 1062, 1062, 372: 1062, 374: 1062, 1062, 1062, 1062, 382: 1062, 1062, 385: 1062, 387: 1062, 1062, 1062, 1062}, + // 1465 + {1061, 1061, 1061, 1061, 1061, 1061, 1061, 1061, 1061, 1061, 372: 1061, 374: 1061, 1061, 1061, 1061, 382: 1061, 1061, 385: 1061, 387: 1061, 1061, 1061, 1061}, + {1070, 1070, 1070, 1070, 1070, 1070, 1070, 1070, 1070, 1070, 372: 1070, 374: 1070, 1070, 1070, 1070, 382: 1070, 1070, 385: 1070, 387: 1070, 1070, 1070, 1070}, + {191, 191, 191, 191, 191, 191, 191, 191, 191, 191, 11: 191, 42: 191, 372: 191, 374: 191, 191, 191, 191, 382: 191, 191, 385: 191, 387: 191, 191, 191, 191, 470: 191, 191}, + {373: 2628}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 2629}, + // 1470 + {8: 2630, 392: 1720, 1719, 395: 1718, 1717, 1715, 547: 1716, 1714}, + {1058, 1058, 1058, 1058, 1058, 1058, 1058, 1058, 1058, 1058, 372: 1058, 374: 1058, 1058, 1058, 1058, 382: 1058, 1058, 385: 1058, 387: 1058, 1058, 1058, 1058, 788: 2633, 800: 2632, 2631}, + {1071, 1071, 1071, 1071, 1071, 1071, 1071, 1071, 1071, 1071, 372: 1071, 374: 1071, 1071, 1071, 1071, 382: 1071, 1071, 385: 1071, 387: 1071, 1071, 1071, 1071}, + {1057, 1057, 1057, 1057, 1057, 1057, 1057, 1057, 1057, 1057, 372: 1057, 374: 1057, 1057, 1057, 1057, 382: 1057, 1057, 385: 1057, 387: 1057, 1057, 1057, 1057}, + {1056, 1056, 1056, 1056, 1056, 1056, 1056, 1056, 1056, 1056, 372: 1056, 374: 1056, 1056, 1056, 1056, 382: 1056, 1056, 385: 1056, 387: 1056, 1056, 1056, 1056}, + // 1475 + {373: 2635}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 2636}, + {8: 2637, 392: 1720, 1719, 395: 1718, 1717, 1715, 547: 1716, 1714}, + {1086, 1086, 1086, 1086, 1086, 1086, 1086, 1086, 1086, 1086, 44: 2371, 372: 2638, 374: 1086, 1086, 1086, 1086, 382: 1086, 1086, 385: 1086, 387: 1086, 1086, 1086, 1086, 627: 2373, 670: 2639, 736: 2640}, + {44: 2375, 376: 2641}, + // 1480 + {1083, 1083, 1083, 1083, 1083, 1083, 1083, 1083, 1083, 1083, 372: 1083, 374: 1083, 1083, 1083, 1083, 382: 1083, 1083, 385: 1083, 387: 1083, 1083, 1083, 1083}, + {1072, 1072, 1072, 1072, 1072, 1072, 1072, 1072, 1072, 1072, 372: 1072, 374: 1072, 1072, 1072, 1072, 382: 1072, 1072, 385: 1072, 387: 1072, 1072, 1072, 1072}, + {1084, 1084, 1084, 1084, 1084, 1084, 1084, 1084, 1084, 1084, 372: 1084, 374: 1084, 1084, 1084, 1084, 382: 1084, 1084, 385: 1084, 387: 1084, 1084, 1084, 1084}, + {1073, 1073, 1073, 1073, 1073, 1073, 1073, 1073, 1073, 1073, 372: 1073, 374: 1073, 1073, 1073, 1073, 382: 1073, 1073, 385: 1073, 387: 1073, 1073, 1073, 1073}, + {436: 2650, 2647, 440: 2648, 2649, 690: 2645, 2646, 2644}, + // 1485 + {1074, 1074, 1074, 1074, 1074, 1074, 1074, 1074, 1074, 1074, 372: 1074, 374: 1074, 1074, 1074, 1074, 382: 1074, 1074, 385: 1074, 387: 1074, 1074, 1074, 1074}, + {1044, 1044, 1044, 1044, 1044, 1044, 1044, 1044, 1044, 1044, 372: 1044, 374: 1044, 1044, 1044, 1044, 382: 1044, 1044, 385: 1044, 387: 1044, 1044, 1044, 1044}, + {373: 2651}, + {1037, 1037, 1037, 1037, 1037, 1037, 1037, 1037, 1037, 1037, 372: 1037, 1041, 1037, 1037, 1037, 1037, 382: 1037, 1037, 385: 1037, 387: 1037, 1037, 1037, 1037}, + {1036, 1036, 1036, 1036, 1036, 1036, 1036, 1036, 1036, 1036, 372: 1036, 1040, 1036, 1036, 1036, 1036, 382: 1036, 1036, 385: 1036, 387: 1036, 1036, 1036, 1036}, + // 1490 + {1035, 1035, 1035, 1035, 1035, 1035, 1035, 1035, 1035, 1035, 372: 1035, 1039, 1035, 1035, 1035, 1035, 382: 1035, 1035, 385: 1035, 387: 1035, 1035, 1035, 1035}, + {373: 1038}, + {8: 2652, 406: 1904, 564: 2653}, + {1043, 1043, 1043, 1043, 1043, 1043, 1043, 1043, 1043, 1043, 372: 1043, 374: 1043, 1043, 1043, 1043, 382: 1043, 1043, 385: 1043, 387: 1043, 1043, 1043, 1043}, + {8: 2654}, + // 1495 + {1042, 1042, 1042, 1042, 1042, 1042, 1042, 1042, 1042, 1042, 372: 1042, 374: 1042, 1042, 1042, 1042, 382: 1042, 1042, 385: 1042, 387: 1042, 1042, 1042, 1042}, + {49: 2656}, + {1075, 1075, 1075, 1075, 1075, 1075, 1075, 1075, 1075, 1075, 372: 1075, 374: 1075, 1075, 1075, 1075, 382: 1075, 1075, 385: 1075, 387: 1075, 1075, 1075, 1075}, + {1076, 1076, 1076, 1076, 1076, 1076, 1076, 1076, 1076, 1076, 372: 1076, 374: 1076, 1076, 1076, 1076, 382: 1076, 1076, 385: 1076, 387: 1076, 1076, 1076, 1076}, + {1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 372: 1046, 374: 1046, 1046, 1046, 1046, 382: 1046, 1046, 385: 1046, 387: 1046, 1046, 1046, 1046}, + // 1500 + {1045, 1045, 1045, 1045, 1045, 1045, 1045, 1045, 1045, 1045, 372: 1045, 374: 1045, 1045, 1045, 1045, 382: 1045, 1045, 385: 1045, 387: 1045, 1045, 1045, 1045}, + {1034, 1034, 1034, 1034, 1034, 1034, 1034, 1034, 1034, 1034, 372: 1034, 374: 1034, 1034, 1034, 1034, 382: 1034, 1034, 385: 1034, 387: 1034, 1034, 1034, 1034}, + {406: 2664, 430: 2666, 2665, 693: 2667}, + {406: 2664, 430: 2666, 2665, 693: 2663}, + {1032, 1032, 1032, 1032, 1032, 1032, 1032, 1032, 1032, 1032, 372: 1032, 374: 1032, 1032, 1032, 1032, 382: 1032, 1032, 385: 1032, 387: 1032, 1032, 1032, 1032}, + // 1505 + {1031, 1031, 1031, 1031, 1031, 1031, 1031, 1031, 1031, 1031, 372: 1031, 374: 1031, 1031, 1031, 1031, 382: 1031, 1031, 385: 1031, 387: 1031, 1031, 1031, 1031}, + {1030, 1030, 1030, 1030, 1030, 1030, 1030, 1030, 1030, 1030, 372: 1030, 374: 1030, 1030, 1030, 1030, 382: 1030, 1030, 385: 1030, 387: 1030, 1030, 1030, 1030}, + {1029, 1029, 1029, 1029, 1029, 1029, 1029, 1029, 1029, 1029, 372: 1029, 374: 1029, 1029, 1029, 1029, 382: 1029, 1029, 385: 1029, 387: 1029, 1029, 1029, 1029}, + {1033, 1033, 1033, 1033, 1033, 1033, 1033, 1033, 1033, 1033, 372: 1033, 374: 1033, 1033, 1033, 1033, 382: 1033, 1033, 385: 1033, 387: 1033, 1033, 1033, 1033}, + {1077, 1077, 1077, 1077, 1077, 1077, 1077, 1077, 1077, 1077, 372: 1077, 374: 1077, 1077, 1077, 1077, 382: 1077, 1077, 385: 1077, 387: 1077, 1077, 1077, 1077}, + // 1510 + {1079, 1079, 1079, 1079, 1079, 1079, 1079, 1079, 1079, 1079, 372: 1079, 374: 1079, 1079, 1079, 1079, 382: 1079, 1079, 385: 1079, 387: 1079, 1079, 1079, 1079}, + {1082, 1082, 1082, 1082, 1082, 1082, 1082, 1082, 1082, 1082, 372: 1082, 374: 1082, 1082, 1082, 1082, 382: 1082, 1082, 385: 1082, 387: 1082, 1082, 1082, 1082}, + {6: 1100, 1100, 1100, 1100}, + {382: 1112, 1112, 387: 1112, 1112, 475: 1112, 486: 1112}, + {6: 1111, 1111, 9: 1111, 382: 1111, 1111, 387: 1111, 1111, 475: 1111, 486: 1111}, + // 1515 + {6: 1002, 1002}, + {6: 1008, 1008}, + {6: 1003, 1003}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 1209, 1212, 1213, 1211, 592: 2678}, + {6: 1012, 1012, 11: 1006, 42: 1006, 374: 2683, 377: 1006, 470: 1006, 1006, 662: 2682, 664: 2680, 727: 2681, 2679}, + // 1520 + {6: 1017, 1017}, + {11: 2542, 42: 2687, 377: 2686, 470: 2548, 2549, 559: 2685}, + {6: 1011, 1011, 11: 1006, 42: 1006, 374: 2683, 377: 1006, 470: 1006, 1006, 662: 2684, 664: 2680}, + {6: 1010, 1010, 11: 1010, 42: 1010, 374: 1010, 377: 1010, 470: 1010, 1010}, + {11: 1005, 42: 1005, 377: 1005, 470: 1005, 1005}, + // 1525 + {6: 1009, 1009, 11: 1009, 42: 1009, 374: 1009, 377: 1009, 470: 1009, 1009}, + {988, 988, 988, 988, 988, 988, 10: 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 378: 988, 403: 2421, 472: 988, 613: 2692}, + {988, 988, 988, 988, 988, 988, 10: 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 988, 378: 988, 403: 2421, 613: 2690}, + {378: 988, 403: 2421, 613: 2688}, + {378: 2689}, + // 1530 + {6: 1013, 1013, 11: 1013, 42: 1013, 374: 1013, 377: 1013, 470: 1013, 1013}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 378: 1831, 523: 1832, 1212, 1213, 1211, 581: 2626, 654: 2691}, + {6: 1014, 1014, 11: 1014, 42: 1014, 374: 1014, 377: 1014, 470: 1014, 1014}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 378: 1831, 472: 1830, 523: 1832, 1212, 1213, 1211, 581: 1829, 609: 2693}, + {6: 1015, 1015, 11: 1015, 42: 1015, 374: 1015, 377: 1015, 470: 1015, 1015}, + // 1535 + {913, 913, 913, 913, 913, 913, 10: 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 405: 1608, 587: 2695}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2696, 1212, 1213, 1211}, + {35: 2413, 385: 899, 391: 2412, 590: 2698, 755: 2697}, + {385: 2699}, + {385: 898}, + // 1540 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 1584, 1212, 1213, 1211, 555: 2700}, + {373: 2701}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 373: 2392, 523: 2248, 1212, 1213, 1211, 554: 2391, 589: 2390, 598: 2702}, + {8: 2703, 2403}, + {909, 6: 909, 909, 35: 909, 909, 909, 909, 391: 909, 474: 909, 597: 2704}, + // 1545 + {2410, 6: 1028, 1028, 35: 2413, 2415, 2414, 2407, 391: 2412, 474: 2409, 588: 2411, 590: 2408, 596: 2406}, + {6: 1103, 1103}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 1584, 1212, 1213, 1211, 555: 2314, 706: 2707}, + {6: 1110, 1110, 9: 2318}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 1584, 1212, 1213, 1211, 555: 2709}, + // 1550 + {6: 1118, 1118, 59: 2715, 2714, 2716, 65: 2713, 67: 2717, 70: 2724, 2725, 77: 2723, 474: 2722, 479: 2721, 484: 2712, 491: 2719, 494: 2711, 497: 2718, 518: 2720, 648: 2727, 713: 2726, 2710}, + {6: 1157, 1157}, + {1120, 1120, 1120, 1120, 1120, 1120, 10: 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 373: 1120, 382: 1114, 1114, 387: 1114, 1114, 2351, 405: 1120, 475: 1114, 486: 1114, 578: 2354, 583: 2743, 591: 2783, 610: 2784}, + {1120, 1120, 1120, 1120, 1120, 1120, 10: 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 382: 2017, 2769, 387: 2772, 405: 1120, 475: 2018, 576: 2770, 583: 2743, 591: 2768, 742: 2771}, + {73: 2767}, + // 1555 + {73: 2766}, + {689: 2765}, + {689: 2764}, + {1120, 1120, 1120, 1120, 1120, 1120, 10: 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 405: 1120, 583: 2743, 591: 2761}, + {1120, 1120, 1120, 1120, 1120, 1120, 10: 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 405: 1120, 583: 2743, 591: 2757}, + // 1560 + {1120, 1120, 1120, 1120, 1120, 1120, 10: 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 1120, 387: 2741, 475: 2742, 583: 2743, 591: 2740}, + {382: 2017, 475: 2018, 576: 2733, 583: 2732}, + {6: 1136, 1136, 9: 1136}, + {76: 2731}, + {76: 2730}, + // 1565 + {6: 1133, 1133, 9: 1133}, + {6: 1132, 1132, 9: 1132}, + {6: 1117, 1117, 9: 2728}, + {6: 1116, 1116, 9: 1116}, + {59: 2715, 2714, 2716, 65: 2713, 67: 2717, 70: 2724, 2725, 77: 2723, 474: 2722, 479: 2721, 484: 2712, 491: 2719, 494: 2711, 497: 2718, 518: 2720, 648: 2729}, + // 1570 + {6: 1115, 1115, 9: 1115}, + {6: 1134, 1134, 9: 1134}, + {6: 1135, 1135, 9: 1135}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2248, 1212, 1213, 1211, 554: 2737}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2734, 1212, 1213, 1211}, + // 1575 + {492: 2735}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2736, 1212, 1213, 1211}, + {6: 1137, 1137, 9: 1137}, + {492: 2738}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2248, 1212, 1213, 1211, 554: 2739}, + // 1580 + {6: 1138, 1138, 9: 1138}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2248, 1212, 1213, 1211, 554: 2748}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2746, 1212, 1213, 1211}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2744, 1212, 1213, 1211}, + {1119, 1119, 1119, 1119, 1119, 1119, 10: 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 1119, 373: 1119, 405: 1119}, + // 1585 + {36: 2415, 2414, 588: 2745}, + {6: 1129, 1129, 9: 1129}, + {44: 2371, 372: 2372, 627: 2747}, + {6: 1131, 1131, 9: 1131}, + {480: 2749, 484: 2750}, + // 1590 + {374: 2752}, + {374: 2751}, + {6: 1139, 1139, 9: 1139}, + {373: 2754, 376: 1646, 378: 1655, 2661, 2662, 406: 1650, 419: 1645, 1647, 430: 1649, 1648, 435: 1654, 439: 1653, 442: 1652, 527: 2660, 529: 1651, 700: 2753}, + {6: 1141, 1141, 9: 1141}, + // 1595 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1625, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1623, 1226, 1441, 1244, 1288, 1246, 1225, 1628, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1632, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1626, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1631, 1630, 1415, 1295, 1300, 1633, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1624, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1640, 1636, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1627, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1629, 1307, 1622, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1642, 1545, 1317, 1643, 1463, 1302, 1641, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1637, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1638, 1277, 1385, 1639, 1320, 1490, 1644, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1635, 1558, 1559, 1560, 1347, 1634, 1526, 1399, 1618, 1671, 1674, 376: 1646, 378: 1655, 1669, 1668, 1699, 400: 1658, 404: 1616, 1677, 1650, 410: 1682, 419: 1645, 1647, 429: 1675, 1649, 1648, 433: 1676, 435: 1654, 1680, 1689, 1711, 1653, 1690, 1691, 1652, 1666, 1667, 1705, 1697, 1700, 1706, 1707, 1702, 1703, 1708, 1701, 1704, 1695, 1673, 1685, 1686, 1688, 1684, 1678, 1679, 1670, 1681, 1683, 1672, 1687, 1692, 1693, 523: 1657, 1212, 1213, 1211, 1663, 1659, 1651, 1662, 1660, 1661, 1694, 1698, 1696, 1656, 1665, 1709, 1710, 1664, 1621, 1620, 1619, 2755}, + {8: 2756, 392: 1720, 1719, 395: 1718, 1717, 1715, 547: 1716, 1714}, + {6: 1140, 1140, 9: 1140}, + {915, 915, 915, 915, 915, 915, 10: 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 405: 2311, 575: 2758}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2248, 1212, 1213, 1211, 554: 2759}, + // 1600 + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2248, 1212, 1213, 1211, 554: 2352, 584: 2760}, + {6: 1142, 1142, 9: 1142}, + {915, 915, 915, 915, 915, 915, 10: 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 405: 2311, 575: 2762}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2248, 1212, 1213, 1211, 554: 2352, 584: 2763}, + {6: 1143, 1143, 9: 1143}, + // 1605 + {6: 1144, 1144, 9: 1144}, + {6: 1145, 1145, 9: 1145}, + {6: 1148, 1148, 9: 1148}, + {6: 1149, 1149, 9: 1149}, + {915, 915, 915, 915, 915, 915, 10: 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 405: 2311, 575: 2780}, + // 1610 + {382: 2779}, + {915, 915, 915, 915, 915, 915, 10: 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 405: 2311, 575: 2777}, + {382: 2774}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2773, 1212, 1213, 1211}, + {6: 1130, 1130, 9: 1130}, + // 1615 + {915, 915, 915, 915, 915, 915, 10: 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 915, 405: 2311, 575: 2775}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2673, 1212, 1213, 1211, 703: 2776}, + {6: 1146, 1146, 9: 1146}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2778, 1212, 1213, 1211}, + {6: 1147, 1147, 9: 1147}, + // 1620 + {6: 1150, 1150, 9: 1150}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 523: 2248, 1212, 1213, 1211, 554: 2781}, + {6: 993, 993, 9: 993, 485: 2317, 487: 2316, 696: 2782}, + {6: 1151, 1151, 9: 1151}, + {913, 913, 913, 913, 913, 913, 10: 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 913, 373: 913, 405: 1608, 587: 2785}, + // 1625 + {6: 1152, 1152, 9: 1152}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 373: 2787, 523: 2248, 1212, 1213, 1211, 554: 2352, 584: 2786}, + {6: 1154, 1154, 9: 1154}, + {1309, 1332, 1217, 1442, 1436, 1426, 10: 1280, 1229, 1477, 1511, 1504, 1497, 1507, 1500, 1499, 1501, 1517, 1509, 1503, 1515, 1516, 1513, 1514, 1502, 1498, 1505, 1506, 1508, 1512, 1510, 1547, 1453, 1451, 1452, 1314, 1216, 1226, 1441, 1244, 1288, 1246, 1225, 1260, 1263, 1434, 1299, 1335, 1522, 1521, 1270, 1338, 1298, 1476, 1221, 1231, 1340, 1439, 1341, 1257, 1518, 1519, 1438, 1326, 1350, 1273, 1278, 1430, 1431, 1283, 1289, 1384, 1296, 1432, 1433, 1219, 1222, 1224, 1223, 1238, 1237, 1482, 1427, 1243, 1249, 1261, 1262, 1250, 1485, 1405, 1318, 1319, 1279, 1450, 1290, 1293, 1292, 1415, 1295, 1300, 1301, 1402, 1214, 1529, 1215, 1218, 1460, 1387, 1304, 1220, 1310, 1348, 1349, 1345, 1530, 1531, 1532, 1406, 1576, 1478, 1479, 1467, 1480, 1227, 1394, 1533, 1312, 1396, 1228, 1381, 1481, 1360, 1308, 1230, 1329, 1232, 1233, 1313, 1311, 1234, 1408, 1534, 1535, 1404, 1235, 1536, 1468, 1236, 1537, 1538, 1239, 1240, 1388, 1324, 1483, 1417, 1241, 1484, 1242, 1245, 1247, 1248, 1251, 1386, 1351, 1252, 1577, 1435, 1356, 1253, 1461, 1401, 1574, 1254, 1539, 1411, 1255, 1256, 1580, 1258, 1259, 1346, 1540, 1322, 1541, 1418, 1459, 1264, 1307, 1210, 1462, 1403, 1337, 1542, 1265, 1543, 1544, 1389, 1407, 1412, 1325, 1398, 1486, 1457, 1268, 1266, 1334, 1419, 1267, 1456, 1458, 1315, 1546, 1473, 1472, 1376, 1377, 1316, 1378, 1379, 1390, 1365, 1545, 1317, 1366, 1463, 1302, 1361, 1269, 1400, 1573, 1344, 1466, 1469, 1420, 1487, 1488, 1464, 1465, 1353, 1470, 1548, 1454, 1354, 1331, 1285, 1524, 1575, 1410, 1422, 1425, 1352, 1271, 1475, 1474, 1525, 1367, 1550, 1368, 1272, 1343, 1362, 1363, 1364, 1489, 1321, 1370, 1369, 1274, 1549, 1395, 1275, 1528, 1527, 1383, 1424, 1276, 1437, 1327, 1455, 1380, 1328, 1342, 1277, 1385, 1359, 1320, 1490, 1371, 1429, 1393, 1372, 1471, 1333, 1373, 1374, 1281, 1423, 1382, 1375, 1282, 1305, 1414, 1523, 1416, 1336, 1339, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1578, 1491, 1358, 1494, 1495, 1493, 1492, 1357, 1428, 1284, 1554, 1555, 1556, 1557, 1579, 1551, 1397, 1287, 1286, 1552, 1553, 1355, 1413, 1409, 1421, 1440, 1391, 1291, 1496, 1561, 1562, 1563, 1564, 1565, 1566, 1568, 1567, 1569, 1570, 1571, 1520, 1294, 1323, 1572, 1297, 1330, 1392, 1306, 1558, 1559, 1560, 1347, 1303, 1526, 1399, 382: 1114, 1114, 387: 1114, 1114, 2351, 475: 1114, 486: 1114, 523: 2248, 1212, 1213, 1211, 554: 2352, 578: 2354, 584: 2355, 610: 2356, 644: 2357, 705: 2788}, + {8: 2789, 2359}, + // 1630 + {6: 1153, 1153, 9: 1153}, + {6: 986, 986, 56: 1182, 1164, 1166, 69: 1176, 72: 1165, 75: 1207, 407: 1172, 410: 1175, 478: 1177, 480: 1181, 1208, 484: 1169, 491: 1162, 566: 1201, 1178, 1179, 1180, 1168, 1174, 593: 1190, 599: 1198, 601: 1200, 625: 1167, 641: 1183, 647: 1185, 649: 1186, 1163, 1187, 1188, 658: 1189, 1192, 1193, 1194, 665: 1171, 1195, 1196, 1197, 1184, 672: 1170, 1191, 1173, 697: 1199, 1202, 1203, 701: 2791, 708: 1204, 1205}, + {6: 127, 127}, + } +) + +var yyDebug = 0 + +type yyLexer interface { + Lex(lval *yySymType) int + Errorf(format string, a ...interface{}) error + AppendError(err error) + Errors() (warns []error, errs []error) +} + +type yyLexerEx interface { + yyLexer + Reduced(rule, state int, lval *yySymType) bool +} + +func yySymName(c int) (s string) { + x, ok := yyXLAT[c] + if ok { + return yySymNames[x] + } + + return __yyfmt__.Sprintf("%d", c) +} + +func yylex1(yylex yyLexer, lval *yySymType) (n int) { + n = yylex.Lex(lval) + if n <= 0 { + n = yyEOFCode + } + if yyDebug >= 3 { + __yyfmt__.Printf("\nlex %s(%#x %d), lval: %+v\n", yySymName(n), n, n, lval) + } + return n +} + +func yyParse(yylex yyLexer, parser *Parser) int { + const yyError = 737 + + yyEx, _ := yylex.(yyLexerEx) + var yyn int + parser.yylval = yySymType{} + yyS := parser.cache + + Nerrs := 0 /* number of errors */ + Errflag := 0 /* error recovery flag */ + yyerrok := func() { + if yyDebug >= 2 { + __yyfmt__.Printf("yyerrok()\n") + } + Errflag = 0 + } + _ = yyerrok + yystate := 0 + yychar := -1 + var yyxchar int + var yyshift int + yyp := -1 + goto yystack + +ret0: + return 0 + +ret1: + return 1 + +yystack: + /* put a state and value onto the stack */ + yyp++ + if yyp+1 >= len(yyS) { + nyys := make([]yySymType, len(yyS)*2) + copy(nyys, yyS) + yyS = nyys + parser.cache = yyS + } + parser.yyVAL = &yyS[yyp+1] + yyS[yyp].yys = yystate + +yynewstate: + if yychar < 0 { + yychar = yylex1(yylex, &parser.yylval) + var ok bool + if yyxchar, ok = yyXLAT[yychar]; !ok { + yyxchar = len(yySymNames) // > tab width + } + } + if yyDebug >= 4 { + var a []int + for _, v := range yyS[:yyp+1] { + a = append(a, v.yys) + } + __yyfmt__.Printf("state stack %v\n", a) + } + row := yyParseTab[yystate] + yyn = 0 + if yyxchar < len(row) { + if yyn = int(row[yyxchar]); yyn != 0 { + yyn += yyTabOfs + } + } + switch { + case yyn > 0: // shift + yychar = -1 + *parser.yyVAL = parser.yylval + yystate = yyn + yyshift = yyn + if yyDebug >= 2 { + __yyfmt__.Printf("shift, and goto state %d\n", yystate) + } + if Errflag > 0 { + Errflag-- + } + goto yystack + case yyn < 0: // reduce + case yystate == 1: // accept + if yyDebug >= 2 { + __yyfmt__.Println("accept") + } + goto ret0 + } + + if yyn == 0 { + /* error ... attempt to resume parsing */ + switch Errflag { + case 0: /* brand new error */ + if yyDebug >= 1 { + __yyfmt__.Printf("no action for %s in state %d\n", yySymName(yychar), yystate) + } + msg, ok := yyXErrors[yyXError{yystate, yyxchar}] + if !ok { + msg, ok = yyXErrors[yyXError{yystate, -1}] + } + if !ok && yyshift != 0 { + msg, ok = yyXErrors[yyXError{yyshift, yyxchar}] + } + if !ok { + msg, ok = yyXErrors[yyXError{yyshift, -1}] + } + if !ok || msg == "" { + msg = "syntax error" + } + // ignore goyacc error message + yylex.AppendError(yylex.Errorf("")) + Nerrs++ + fallthrough + + case 1, 2: /* incompletely recovered error ... try again */ + Errflag = 3 + + /* find a state where "error" is a legal shift action */ + for yyp >= 0 { + row := yyParseTab[yyS[yyp].yys] + if yyError < len(row) { + yyn = int(row[yyError]) + yyTabOfs + if yyn > 0 { // hit + if yyDebug >= 2 { + __yyfmt__.Printf("error recovery found error shift in state %d\n", yyS[yyp].yys) + } + yystate = yyn /* simulate a shift of "error" */ + goto yystack + } + } + + /* the current p has no shift on "error", pop stack */ + if yyDebug >= 2 { + __yyfmt__.Printf("error recovery pops state %d\n", yyS[yyp].yys) + } + yyp-- + } + /* there is no state on the stack with an error shift ... abort */ + if yyDebug >= 2 { + __yyfmt__.Printf("error recovery failed\n") + } + goto ret1 + + case 3: /* no shift yet; clobber input char */ + if yyDebug >= 2 { + __yyfmt__.Printf("error recovery discards %s\n", yySymName(yychar)) + } + if yychar == yyEOFCode { + goto ret1 + } + + yychar = -1 + goto yynewstate /* try again in the same state */ + } + } + + r := -yyn + x0 := yyReductions[r] + x, n := x0.xsym, x0.components + yypt := yyp + _ = yypt // guard against "declared and not used" + + yyp -= n + if yyp+1 >= len(yyS) { + nyys := make([]yySymType, len(yyS)*2) + copy(nyys, yyS) + yyS = nyys + parser.cache = yyS + } + parser.yyVAL = &yyS[yyp+1] + + /* consult goto table to find next state */ + exState := yystate + yystate = int(yyParseTab[yyS[yyp].yys][x]) + yyTabOfs + /* reduction by production r */ + if yyDebug >= 2 { + __yyfmt__.Printf("reduce using rule %v (%s), and goto state %d\n", r, yySymNames[x], yystate) + } + + switch r { + case 2: + { + specs := yyS[yypt-0].item.([]*ast.AlterTableSpec) + parser.yyVAL.statement = &ast.AlterTableStmt{ + Table: yyS[yypt-1].item.(*ast.TableName), + Specs: specs, + } + } + case 3: + { + parser.yyVAL.item = []string{} + } + case 4: + { + parser.yyVAL.item = yyS[yypt-0].item + } + case 5: + { + parser.yyVAL.item = &ast.AlterTableSpec{ + IfNotExists: yyS[yypt-1].item.(bool), + Tp: ast.AlterTableAddColumns, + NewColumns: []*ast.ColumnDef{yyS[yypt-0].item.(*ast.ColumnDef)}, + } + } + case 6: + { + tes := yyS[yypt-1].item.([]interface{}) + var columnDefs []*ast.ColumnDef + var constraints []*ast.Constraint + for _, te := range tes { + switch te := te.(type) { + case *ast.ColumnDef: + columnDefs = append(columnDefs, te) + case *ast.Constraint: + constraints = append(constraints, te) + } + } + parser.yyVAL.item = &ast.AlterTableSpec{ + IfNotExists: yyS[yypt-3].item.(bool), + Tp: ast.AlterTableAddColumns, + NewColumns: columnDefs, + NewConstraints: constraints, + } + } + case 7: + { + constraint := yyS[yypt-0].item.(*ast.Constraint) + parser.yyVAL.item = &ast.AlterTableSpec{ + Tp: ast.AlterTableAddConstraint, + Constraint: constraint, + } + } + case 8: + { + parser.yyVAL.item = &ast.AlterTableSpec{ + IfExists: yyS[yypt-2].item.(bool), + Tp: ast.AlterTableDropColumn, + OldColumnName: yyS[yypt-1].item.(*ast.ColumnName), + } + } + case 9: + { + parser.yyVAL.item = &ast.AlterTableSpec{Tp: ast.AlterTableDropPrimaryKey} + } + case 10: + { + ret := &ast.AlterTableSpec{ + Tp: ast.AlterTableImportTablespace, + } + parser.yyVAL.item = ret + yylex.AppendError(yylex.Errorf("The IMPORT TABLESPACE clause is parsed but ignored by all storage engines.")) + parser.lastErrorAsWarn() + } + case 11: + { + ret := &ast.AlterTableSpec{ + Tp: ast.AlterTableDiscardTablespace, + } + parser.yyVAL.item = ret + yylex.AppendError(yylex.Errorf("The DISCARD TABLESPACE clause is parsed but ignored by all storage engines.")) + parser.lastErrorAsWarn() + } + case 12: + { + parser.yyVAL.item = &ast.AlterTableSpec{ + IfExists: yyS[yypt-1].item.(bool), + Tp: ast.AlterTableDropIndex, + Name: yyS[yypt-0].ident, + } + } + case 13: + { + parser.yyVAL.item = &ast.AlterTableSpec{ + IfExists: yyS[yypt-1].item.(bool), + Tp: ast.AlterTableDropForeignKey, + Name: yyS[yypt-0].item.(string), + } + } + case 14: + { + parser.yyVAL.item = &ast.AlterTableSpec{ + Tp: ast.AlterTableDisableKeys, + } + } + case 15: + { + parser.yyVAL.item = &ast.AlterTableSpec{ + Tp: ast.AlterTableEnableKeys, + } + } + case 16: + { + parser.yyVAL.item = &ast.AlterTableSpec{ + IfExists: yyS[yypt-1].item.(bool), + Tp: ast.AlterTableModifyColumn, + NewColumns: []*ast.ColumnDef{yyS[yypt-0].item.(*ast.ColumnDef)}, + } + } + case 17: + { + parser.yyVAL.item = &ast.AlterTableSpec{ + IfExists: yyS[yypt-2].item.(bool), + Tp: ast.AlterTableChangeColumn, + OldColumnName: yyS[yypt-1].item.(*ast.ColumnName), + NewColumns: []*ast.ColumnDef{yyS[yypt-0].item.(*ast.ColumnDef)}, + } + } + case 18: + { + option := &ast.ColumnOption{Expr: yyS[yypt-0].expr} + colDef := &ast.ColumnDef{ + Name: yyS[yypt-3].item.(*ast.ColumnName), + Options: []*ast.ColumnOption{option}, + } + parser.yyVAL.item = &ast.AlterTableSpec{ + Tp: ast.AlterTableAlterColumn, + NewColumns: []*ast.ColumnDef{colDef}, + } + } + case 19: + { + option := &ast.ColumnOption{Expr: yyS[yypt-1].expr} + colDef := &ast.ColumnDef{ + Name: yyS[yypt-5].item.(*ast.ColumnName), + Options: []*ast.ColumnOption{option}, + } + parser.yyVAL.item = &ast.AlterTableSpec{ + Tp: ast.AlterTableAlterColumn, + NewColumns: []*ast.ColumnDef{colDef}, + } + } + case 20: + { + colDef := &ast.ColumnDef{ + Name: yyS[yypt-2].item.(*ast.ColumnName), + } + parser.yyVAL.item = &ast.AlterTableSpec{ + Tp: ast.AlterTableAlterColumn, + NewColumns: []*ast.ColumnDef{colDef}, + } + } + case 21: + { + parser.yyVAL.item = &ast.AlterTableSpec{ + Tp: ast.AlterTableRenameColumn, + OldColumnName: yyS[yypt-2].item.(*ast.ColumnName), + NewColumnName: yyS[yypt-0].item.(*ast.ColumnName), + } + } + case 22: + { + parser.yyVAL.item = &ast.AlterTableSpec{ + Tp: ast.AlterTableRenameIndex, + FromKey: model.NewCIStr(yyS[yypt-2].ident), + ToKey: model.NewCIStr(yyS[yypt-0].ident), + } + } + case 23: + { + // Parse it and ignore it. Just for compatibility. + parser.yyVAL.item = &ast.AlterTableSpec{ + Tp: ast.AlterTableForce, + } + } + case 24: + { + // Parse it and ignore it. Just for compatibility. + parser.yyVAL.item = &ast.AlterTableSpec{ + Tp: ast.AlterTableWithValidation, + } + yylex.AppendError(yylex.Errorf("The WITH/WITHOUT VALIDATION clause is parsed but ignored by all storage engines.")) + parser.lastErrorAsWarn() + } + case 25: + { + // Parse it and ignore it. Just for compatibility. + parser.yyVAL.item = &ast.AlterTableSpec{ + Tp: ast.AlterTableWithoutValidation, + } + yylex.AppendError(yylex.Errorf("The WITH/WITHOUT VALIDATION clause is parsed but ignored by all storage engines.")) + parser.lastErrorAsWarn() + } + case 26: + { + // Parse it and ignore it. Just for compatibility. + parser.yyVAL.item = &ast.AlterTableSpec{ + Tp: ast.AlterTableSecondaryLoad, + } + yylex.AppendError(yylex.Errorf("The SECONDARY_LOAD clause is parsed but not implement yet.")) + parser.lastErrorAsWarn() + } + case 27: + { + // Parse it and ignore it. Just for compatibility. + parser.yyVAL.item = &ast.AlterTableSpec{ + Tp: ast.AlterTableSecondaryUnload, + } + yylex.AppendError(yylex.Errorf("The SECONDARY_UNLOAD VALIDATION clause is parsed but not implement yet.")) + parser.lastErrorAsWarn() + } + case 28: + { + // Parse it and ignore it. Just for compatibility. + c := &ast.Constraint{ + Name: yyS[yypt-1].ident, + Enforced: yyS[yypt-0].item.(bool), + } + parser.yyVAL.item = &ast.AlterTableSpec{ + Tp: ast.AlterTableAlterCheck, + Constraint: c, + } + yylex.AppendError(yylex.Errorf("The ALTER CHECK clause is parsed but not implemented yet.")) + parser.lastErrorAsWarn() + } + case 29: + { + // Parse it and ignore it. Just for compatibility. + c := &ast.Constraint{ + Name: yyS[yypt-0].ident, + } + parser.yyVAL.item = &ast.AlterTableSpec{ + Tp: ast.AlterTableDropCheck, + Constraint: c, + } + yylex.AppendError(yylex.Errorf("The DROP CHECK clause is parsed but not implemented yet.")) + parser.lastErrorAsWarn() + } + case 30: + { + parser.yyVAL.item = &ast.AlterTableSpec{ + Tp: ast.AlterTableIndexInvisible, + Name: yyS[yypt-1].ident, + Visibility: yyS[yypt-0].item.(ast.IndexVisibility), + } + } + case 31: + { + parser.yyVAL.item = true + } + case 32: + { + parser.yyVAL.item = yyS[yypt-0].item + } + case 33: + { + parser.yyVAL.item = true + } + case 34: + { + parser.yyVAL.item = false + } + case 41: + { + parser.yyVAL.item = make([]*ast.AlterTableSpec, 0, 1) + } + case 42: + { + parser.yyVAL.item = yyS[yypt-0].item + } + case 43: + { + parser.yyVAL.item = []*ast.AlterTableSpec{yyS[yypt-0].item.(*ast.AlterTableSpec)} + } + case 44: + { + parser.yyVAL.item = append(yyS[yypt-2].item.([]*ast.AlterTableSpec), yyS[yypt-0].item.(*ast.AlterTableSpec)) + } + case 45: + { + parser.yyVAL.item = nil + } + case 46: + { + parser.yyVAL.item = nil + } + case 47: + { + parser.yyVAL.item = yyS[yypt-0].item.(string) + } + case 48: + { + parser.yyVAL.item = yyS[yypt-0].ident + } + case 49: + { + parser.yyVAL.statement = &ast.AnalyzeTableStmt{TableNames: yyS[yypt-0].item.([]*ast.TableName)} + } + case 50: + { + parser.yyVAL.item = &ast.Assignment{Column: yyS[yypt-2].item.(*ast.ColumnName), Expr: yyS[yypt-0].expr} + } + case 51: + { + parser.yyVAL.item = []*ast.Assignment{yyS[yypt-0].item.(*ast.Assignment)} + } + case 52: + { + parser.yyVAL.item = append(yyS[yypt-2].item.([]*ast.Assignment), yyS[yypt-0].item.(*ast.Assignment)) + } + case 53: + { + parser.yyVAL.item = []*ast.Assignment{} + } + case 55: + { + parser.yyVAL.statement = &ast.BeginStmt{} + } + case 56: + { + parser.yyVAL.statement = &ast.BeginStmt{} + } + case 57: + { + parser.yyVAL.item = []*ast.ColumnDef{yyS[yypt-0].item.(*ast.ColumnDef)} + } + case 58: + { + parser.yyVAL.item = append(yyS[yypt-2].item.([]*ast.ColumnDef), yyS[yypt-0].item.(*ast.ColumnDef)) + } + case 59: + { + colDef := &ast.ColumnDef{Name: yyS[yypt-2].item.(*ast.ColumnName), Tp: yyS[yypt-1].item.(*types.FieldType), Options: yyS[yypt-0].item.([]*ast.ColumnOption)} + if !colDef.Validate() { + yylex.AppendError(yylex.Errorf("Invalid column definition")) + return 1 + } + parser.yyVAL.item = colDef + } + case 60: + { + // TODO: check flen 0 + tp := types.NewFieldType(mysql.TypeLonglong) + options := []*ast.ColumnOption{{Tp: ast.ColumnOptionNotNull}, {Tp: ast.ColumnOptionAutoIncrement}, {Tp: ast.ColumnOptionUniqKey}} + options = append(options, yyS[yypt-0].item.([]*ast.ColumnOption)...) + tp.Flag |= mysql.UnsignedFlag + colDef := &ast.ColumnDef{Name: yyS[yypt-2].item.(*ast.ColumnName), Tp: tp, Options: options} + if !colDef.Validate() { + yylex.AppendError(yylex.Errorf("Invalid column definition")) + return 1 + } + parser.yyVAL.item = colDef + } + case 61: + { + parser.yyVAL.item = &ast.ColumnName{Name: model.NewCIStr(yyS[yypt-0].ident)} + } + case 62: + { + parser.yyVAL.item = &ast.ColumnName{Table: model.NewCIStr(yyS[yypt-2].ident), Name: model.NewCIStr(yyS[yypt-0].ident)} + } + case 63: + { + parser.yyVAL.item = &ast.ColumnName{Schema: model.NewCIStr(yyS[yypt-4].ident), Table: model.NewCIStr(yyS[yypt-2].ident), Name: model.NewCIStr(yyS[yypt-0].ident)} + } + case 64: + { + parser.yyVAL.item = []*ast.ColumnName{yyS[yypt-0].item.(*ast.ColumnName)} + } + case 65: + { + parser.yyVAL.item = append(yyS[yypt-2].item.([]*ast.ColumnName), yyS[yypt-0].item.(*ast.ColumnName)) + } + case 66: + { + parser.yyVAL.item = []*ast.ColumnName{} + } + case 67: + { + parser.yyVAL.item = yyS[yypt-0].item.([]*ast.ColumnName) + } + case 68: + { + parser.yyVAL.statement = &ast.CommitStmt{} + } + case 71: + { + parser.yyVAL.item = true + } + case 72: + { + parser.yyVAL.item = false + } + case 73: + { + parser.yyVAL.item = true + } + case 74: + { + parser.yyVAL.item = yyS[yypt-0].item + } + case 75: + { + parser.yyVAL.item = 0 + } + case 76: + { + if yyS[yypt-0].item.(bool) { + parser.yyVAL.item = 1 + } else { + parser.yyVAL.item = 2 + } + } + case 77: + { + parser.yyVAL.item = &ast.ColumnOption{Tp: ast.ColumnOptionNotNull} + } + case 78: + { + parser.yyVAL.item = &ast.ColumnOption{Tp: ast.ColumnOptionNull} + } + case 79: + { + parser.yyVAL.item = &ast.ColumnOption{Tp: ast.ColumnOptionAutoIncrement} + } + case 80: + { + // KEY is normally a synonym for INDEX. The key attribute PRIMARY KEY + // can also be specified as just KEY when given in a column definition. + // See http://dev.mysql.com/doc/refman/5.7/en/create-table.html + parser.yyVAL.item = &ast.ColumnOption{Tp: ast.ColumnOptionPrimaryKey} + } + case 81: + { + parser.yyVAL.item = &ast.ColumnOption{Tp: ast.ColumnOptionUniqKey} + } + case 82: + { + parser.yyVAL.item = &ast.ColumnOption{Tp: ast.ColumnOptionUniqKey} + } + case 83: + { + parser.yyVAL.item = &ast.ColumnOption{Tp: ast.ColumnOptionDefaultValue, Expr: yyS[yypt-0].expr} + } + case 84: + { + parser.yyVAL.item = []*ast.ColumnOption{{Tp: ast.ColumnOptionNotNull}, {Tp: ast.ColumnOptionAutoIncrement}, {Tp: ast.ColumnOptionUniqKey}} + } + case 85: + { + parser.yyVAL.item = &ast.ColumnOption{Tp: ast.ColumnOptionOnUpdate, Expr: yyS[yypt-0].expr} + } + case 86: + { + parser.yyVAL.item = &ast.ColumnOption{Tp: ast.ColumnOptionComment, Expr: ast.NewValueExpr(yyS[yypt-0].ident)} + } + case 87: + { + // See https://dev.mysql.com/doc/refman/5.7/en/create-table.html + // The CHECK clause is parsed but ignored by all storage engines. + // See the branch named `EnforcedOrNotOrNotNullOpt`. + + optionCheck := &ast.ColumnOption{ + Tp: ast.ColumnOptionCheck, + Expr: yyS[yypt-2].expr, + Enforced: true, + } + switch yyS[yypt-0].item.(int) { + case 0: + parser.yyVAL.item = []*ast.ColumnOption{optionCheck, {Tp: ast.ColumnOptionNotNull}} + case 1: + optionCheck.Enforced = true + parser.yyVAL.item = optionCheck + case 2: + optionCheck.Enforced = false + parser.yyVAL.item = optionCheck + default: + } + yylex.AppendError(yylex.Errorf("The CHECK clause is parsed but ignored by all storage engines.")) + parser.lastErrorAsWarn() + } + case 88: + { + startOffset := parser.startOffset(&yyS[yypt-2]) + endOffset := parser.endOffset(&yyS[yypt-1]) + expr := yyS[yypt-2].expr + expr.SetText(parser.src[startOffset:endOffset]) + + parser.yyVAL.item = &ast.ColumnOption{ + Tp: ast.ColumnOptionGenerated, + Expr: expr, + Stored: yyS[yypt-0].item.(bool), + } + } + case 89: + { + parser.yyVAL.item = &ast.ColumnOption{Tp: ast.ColumnOptionCollate, StrValue: yyS[yypt-0].item.(string)} + } + case 90: + { + parser.yyVAL.item = &ast.ColumnOption{Tp: ast.ColumnOptionColumnFormat, StrValue: yyS[yypt-0].item.(string)} + } + case 91: + { + parser.yyVAL.item = &ast.ColumnOption{Tp: ast.ColumnOptionStorage, StrValue: yyS[yypt-0].ident} + yylex.AppendError(yylex.Errorf("The STORAGE clause is parsed but ignored by all storage engines.")) + parser.lastErrorAsWarn() + } + case 92: + { + parser.yyVAL.item = &ast.ColumnOption{Tp: ast.ColumnOptionAutoRandom, AutoRandomBitLength: yyS[yypt-0].item.(int)} + } + case 96: + { + parser.yyVAL.item = "DEFAULT" + } + case 97: + { + parser.yyVAL.item = "FIXED" + } + case 98: + { + parser.yyVAL.item = "DYNAMIC" + } + case 101: + { + parser.yyVAL.item = false + } + case 102: + { + parser.yyVAL.item = false + } + case 103: + { + parser.yyVAL.item = true + } + case 104: + { + if columnOption, ok := yyS[yypt-0].item.(*ast.ColumnOption); ok { + parser.yyVAL.item = []*ast.ColumnOption{columnOption} + } else { + parser.yyVAL.item = yyS[yypt-0].item + } + } + case 105: + { + if columnOption, ok := yyS[yypt-0].item.(*ast.ColumnOption); ok { + parser.yyVAL.item = append(yyS[yypt-1].item.([]*ast.ColumnOption), columnOption) + } else { + parser.yyVAL.item = append(yyS[yypt-1].item.([]*ast.ColumnOption), yyS[yypt-0].item.([]*ast.ColumnOption)...) + } + } + case 106: + { + parser.yyVAL.item = []*ast.ColumnOption{} + } + case 107: + { + parser.yyVAL.item = yyS[yypt-0].item.([]*ast.ColumnOption) + } + case 108: + { + c := &ast.Constraint{ + Tp: ast.ConstraintPrimaryKey, + Keys: yyS[yypt-2].item.([]*ast.IndexPartSpecification), + Name: yyS[yypt-4].item.([]interface{})[0].(string), + } + if yyS[yypt-0].item != nil { + c.Option = yyS[yypt-0].item.(*ast.IndexOption) + } + if indexType := yyS[yypt-4].item.([]interface{})[1]; indexType != nil { + if c.Option == nil { + c.Option = &ast.IndexOption{} + } + c.Option.Tp = indexType.(model.IndexType) + } + parser.yyVAL.item = c + } + case 109: + { + c := &ast.Constraint{ + Tp: ast.ConstraintFulltext, + Keys: yyS[yypt-2].item.([]*ast.IndexPartSpecification), + Name: yyS[yypt-4].item.(string), + } + if yyS[yypt-0].item != nil { + c.Option = yyS[yypt-0].item.(*ast.IndexOption) + } + parser.yyVAL.item = c + } + case 110: + { + c := &ast.Constraint{ + IfNotExists: yyS[yypt-5].item.(bool), + Tp: ast.ConstraintIndex, + Keys: yyS[yypt-2].item.([]*ast.IndexPartSpecification), + } + if yyS[yypt-0].item != nil { + c.Option = yyS[yypt-0].item.(*ast.IndexOption) + } + c.Name = yyS[yypt-4].item.([]interface{})[0].(string) + if indexType := yyS[yypt-4].item.([]interface{})[1]; indexType != nil { + if c.Option == nil { + c.Option = &ast.IndexOption{} + } + c.Option.Tp = indexType.(model.IndexType) + } + parser.yyVAL.item = c + } + case 111: + { + c := &ast.Constraint{ + Tp: ast.ConstraintUniq, + Keys: yyS[yypt-2].item.([]*ast.IndexPartSpecification), + } + if yyS[yypt-0].item != nil { + c.Option = yyS[yypt-0].item.(*ast.IndexOption) + } + c.Name = yyS[yypt-4].item.([]interface{})[0].(string) + if indexType := yyS[yypt-4].item.([]interface{})[1]; indexType != nil { + if c.Option == nil { + c.Option = &ast.IndexOption{} + } + c.Option.Tp = indexType.(model.IndexType) + } + parser.yyVAL.item = c + } + case 112: + { + parser.yyVAL.item = &ast.Constraint{ + Tp: ast.ConstraintCheck, + Expr: yyS[yypt-2].expr.(ast.ExprNode), + Enforced: yyS[yypt-0].item.(bool), + } + yylex.AppendError(yylex.Errorf("The CHECK clause is parsed but ignored by all storage engines.")) + parser.lastErrorAsWarn() + } + case 115: + { + parser.yyVAL.expr = &ast.FuncCallExpr{FnName: model.NewCIStr("CURRENT_TIMESTAMP")} + } + case 116: + { + parser.yyVAL.expr = &ast.FuncCallExpr{FnName: model.NewCIStr("CURRENT_TIMESTAMP")} + } + case 117: + { + parser.yyVAL.expr = &ast.FuncCallExpr{FnName: model.NewCIStr("CURRENT_TIMESTAMP"), Args: []ast.ExprNode{ast.NewValueExpr(yyS[yypt-1].item)}} + } + case 125: + { + parser.yyVAL.expr = ast.NewValueExpr(yyS[yypt-0].expr) + } + case 126: + { + parser.yyVAL.expr = &ast.UnaryOperationExpr{Op: opcode.Plus, V: ast.NewValueExpr(yyS[yypt-0].item)} + } + case 127: + { + parser.yyVAL.expr = &ast.UnaryOperationExpr{Op: opcode.Minus, V: ast.NewValueExpr(yyS[yypt-0].item)} + } + case 131: + { + var indexOption *ast.IndexOption + if yyS[yypt-0].item != nil { + indexOption = yyS[yypt-0].item.(*ast.IndexOption) + if indexOption.Tp == model.IndexTypeInvalid { + if yyS[yypt-6].item != nil { + indexOption.Tp = yyS[yypt-6].item.(model.IndexType) + } + } + } else { + indexOption = &ast.IndexOption{} + if yyS[yypt-6].item != nil { + indexOption.Tp = yyS[yypt-6].item.(model.IndexType) + } + } + parser.yyVAL.statement = &ast.CreateIndexStmt{ + IfNotExists: yyS[yypt-8].item.(bool), + IndexName: yyS[yypt-7].ident, + Table: yyS[yypt-4].item.(*ast.TableName), + IndexPartSpecifications: yyS[yypt-2].item.([]*ast.IndexPartSpecification), + IndexOption: indexOption, + KeyType: yyS[yypt-10].item.(ast.IndexKeyType), + } + } + case 132: + { + parser.yyVAL.item = ([]*ast.IndexPartSpecification)(nil) + } + case 133: + { + parser.yyVAL.item = yyS[yypt-1].item + } + case 134: + { + parser.yyVAL.item = []*ast.IndexPartSpecification{yyS[yypt-0].item.(*ast.IndexPartSpecification)} + } + case 135: + { + parser.yyVAL.item = append(yyS[yypt-2].item.([]*ast.IndexPartSpecification), yyS[yypt-0].item.(*ast.IndexPartSpecification)) + } + case 136: + { + // Order is parsed but just ignored as MySQL did. + parser.yyVAL.item = &ast.IndexPartSpecification{Column: yyS[yypt-2].item.(*ast.ColumnName), Length: yyS[yypt-1].item.(int)} + } + case 137: + { + parser.yyVAL.item = &ast.IndexPartSpecification{Expr: yyS[yypt-2].expr} + } + case 138: + { + parser.yyVAL.item = ast.IndexKeyTypeNone + } + case 139: + { + parser.yyVAL.item = ast.IndexKeyTypeUnique + } + case 140: + { + parser.yyVAL.item = ast.IndexKeyTypeSpatial + } + case 141: + { + parser.yyVAL.item = ast.IndexKeyTypeFullText + } + case 142: + { + parser.yyVAL.statement = &ast.CreateDatabaseStmt{ + IfNotExists: yyS[yypt-2].item.(bool), + Name: yyS[yypt-1].item.(string), + Options: yyS[yypt-0].item.([]*ast.DatabaseOption), + } + } + case 143: + { + parser.yyVAL.item = yyS[yypt-0].ident + } + case 144: + { + parser.yyVAL.item = &ast.DatabaseOption{Tp: ast.DatabaseOptionCharset, Value: yyS[yypt-0].item.(string)} + } + case 145: + { + parser.yyVAL.item = &ast.DatabaseOption{Tp: ast.DatabaseOptionCollate, Value: yyS[yypt-0].item.(string)} + } + case 146: + { + parser.yyVAL.item = &ast.DatabaseOption{Tp: ast.DatabaseOptionEncryption, Value: yyS[yypt-0].ident} + } + case 147: + { + parser.yyVAL.item = []*ast.DatabaseOption{} + } + case 149: + { + parser.yyVAL.item = []*ast.DatabaseOption{yyS[yypt-0].item.(*ast.DatabaseOption)} + } + case 150: + { + parser.yyVAL.item = append(yyS[yypt-1].item.([]*ast.DatabaseOption), yyS[yypt-0].item.(*ast.DatabaseOption)) + } + case 151: + { + stmt := yyS[yypt-1].item.(*ast.CreateTableStmt) + stmt.Table = yyS[yypt-2].item.(*ast.TableName) + stmt.IfNotExists = yyS[yypt-3].item.(bool) + stmt.IsTemporary = yyS[yypt-5].item.(bool) + parser.yyVAL.statement = stmt + } + case 152: + { + parser.yyVAL.statement = &ast.CreateTableStmt{ + Table: yyS[yypt-1].item.(*ast.TableName), + ReferTable: yyS[yypt-0].item.(*ast.TableName), + IfNotExists: yyS[yypt-2].item.(bool), + IsTemporary: yyS[yypt-4].item.(bool), + } + } + case 157: + { + parser.yyVAL.item = yyS[yypt-0].item + } + case 158: + { + parser.yyVAL.item = yyS[yypt-1].item + } + case 159: + { + // Single Table + tn := yyS[yypt-5].item.(*ast.TableName) + tn.IndexHints = yyS[yypt-3].item.([]*ast.IndexHint) + join := &ast.Join{Left: &ast.TableSource{Source: tn, AsName: yyS[yypt-4].item.(model.CIStr)}, Right: nil} + x := &ast.DeleteStmt{ + TableRefs: &ast.TableRefsClause{TableRefs: join}, + Priority: yyS[yypt-8].item.(mysql.PriorityEnum), + Quick: yyS[yypt-7].item.(bool), + } + if yyS[yypt-2].item != nil { + x.Where = yyS[yypt-2].item.(ast.ExprNode) + } + if yyS[yypt-1].item != nil { + x.Order = yyS[yypt-1].item.(*ast.OrderByClause) + } + if yyS[yypt-0].item != nil { + x.Limit = yyS[yypt-0].item.(*ast.Limit) + } + + parser.yyVAL.statement = x + } + case 161: + { + parser.yyVAL.statement = &ast.DropDatabaseStmt{IfExists: yyS[yypt-1].item.(bool), Name: yyS[yypt-0].item.(string)} + } + case 162: + { + parser.yyVAL.statement = &ast.DropIndexStmt{IfExists: yyS[yypt-3].item.(bool), IndexName: yyS[yypt-2].ident, Table: yyS[yypt-0].item.(*ast.TableName)} + } + case 163: + { + parser.yyVAL.statement = &ast.DropTableStmt{IfExists: yyS[yypt-2].item.(bool), Tables: yyS[yypt-1].item.([]*ast.TableName), IsView: false, IsTemporary: yyS[yypt-4].item.(bool)} + } + case 164: + { + parser.yyVAL.item = false + } + case 165: + { + parser.yyVAL.item = true + yylex.AppendError(yylex.Errorf("TiDB doesn't support TEMPORARY TABLE, TEMPORARY will be parsed but ignored.")) + parser.lastErrorAsWarn() + } + case 173: + { + parser.yyVAL.statement = nil + } + case 177: + { + parser.yyVAL.statement = &ast.ExplainStmt{ + Stmt: yyS[yypt-0].statement, + Format: "row", + } + } + case 178: + { + parser.yyVAL.statement = &ast.ExplainStmt{ + Stmt: yyS[yypt-0].statement, + Format: yyS[yypt-1].ident, + } + } + case 179: + { + parser.yyVAL.statement = &ast.ExplainStmt{ + Stmt: yyS[yypt-0].statement, + Format: yyS[yypt-1].item.(string), + } + } + case 180: + { + parser.yyVAL.item = "row" + } + case 181: + { + parser.yyVAL.item = "json" + } + case 182: + { + parser.yyVAL.item = getUint64FromNUM(yyS[yypt-0].item) + } + case 184: + { + v := yyS[yypt-2].ident + v = strings.TrimPrefix(v, "@") + parser.yyVAL.expr = &ast.VariableExpr{ + Name: v, + IsGlobal: false, + IsSystem: false, + Value: yyS[yypt-0].expr, + } + } + case 185: + { + parser.yyVAL.expr = &ast.BinaryOperationExpr{Op: opcode.LogicOr, L: yyS[yypt-2].expr, R: yyS[yypt-0].expr} + } + case 186: + { + parser.yyVAL.expr = &ast.BinaryOperationExpr{Op: opcode.LogicXor, L: yyS[yypt-2].expr, R: yyS[yypt-0].expr} + } + case 187: + { + parser.yyVAL.expr = &ast.BinaryOperationExpr{Op: opcode.LogicAnd, L: yyS[yypt-2].expr, R: yyS[yypt-0].expr} + } + case 188: + { + parser.yyVAL.expr = &ast.UnaryOperationExpr{Op: opcode.Not, V: yyS[yypt-0].expr} + } + case 189: + { + /* https://dev.mysql.com/doc/refman/5.7/en/comparison-operators.html#operator_is */ + parser.yyVAL.expr = &ast.IsNullExpr{Expr: yyS[yypt-2].expr, Not: !yyS[yypt-1].item.(bool)} + } + case 195: + { + parser.yyVAL.item = []ast.ExprNode{yyS[yypt-0].expr} + } + case 196: + { + parser.yyVAL.item = append(yyS[yypt-2].item.([]ast.ExprNode), yyS[yypt-0].expr) + } + case 197: + { + parser.yyVAL.item = []ast.ExprNode{} + } + case 199: + { + parser.yyVAL.item = []ast.ExprNode{} + } + case 200: + { + parser.yyVAL.item = yyS[yypt-0].item + } + case 201: + { + expr := ast.NewValueExpr(yyS[yypt-0].item) + parser.yyVAL.item = []ast.ExprNode{expr} + } + case 202: + { + parser.yyVAL.expr = &ast.IsNullExpr{Expr: yyS[yypt-2].expr, Not: !yyS[yypt-1].item.(bool)} + } + case 203: + { + parser.yyVAL.expr = &ast.BinaryOperationExpr{Op: yyS[yypt-1].item.(opcode.Op), L: yyS[yypt-2].expr, R: yyS[yypt-0].expr} + } + case 204: + { + v := yyS[yypt-2].ident + v = strings.TrimPrefix(v, "@") + variable := &ast.VariableExpr{ + Name: v, + IsGlobal: false, + IsSystem: false, + Value: yyS[yypt-0].expr, + } + parser.yyVAL.expr = &ast.BinaryOperationExpr{Op: yyS[yypt-3].item.(opcode.Op), L: yyS[yypt-4].expr, R: variable} + } + case 206: + { + parser.yyVAL.item = opcode.GE + } + case 207: + { + parser.yyVAL.item = opcode.GT + } + case 208: + { + parser.yyVAL.item = opcode.LE + } + case 209: + { + parser.yyVAL.item = opcode.LT + } + case 210: + { + parser.yyVAL.item = opcode.NE + } + case 211: + { + parser.yyVAL.item = opcode.NE + } + case 212: + { + parser.yyVAL.item = opcode.EQ + } + case 213: + { + parser.yyVAL.item = opcode.NullEQ + } + case 214: + { + parser.yyVAL.item = true + } + case 215: + { + parser.yyVAL.item = false + } + case 216: + { + parser.yyVAL.item = true + } + case 217: + { + parser.yyVAL.item = false + } + case 218: + { + parser.yyVAL.item = true + } + case 219: + { + parser.yyVAL.item = false + } + case 220: + { + parser.yyVAL.item = false + } + case 221: + { + parser.yyVAL.item = false + } + case 222: + { + parser.yyVAL.item = true + } + case 223: + { + parser.yyVAL.expr = &ast.PatternInExpr{Expr: yyS[yypt-4].expr, Not: !yyS[yypt-3].item.(bool), List: yyS[yypt-1].item.([]ast.ExprNode)} + } + case 224: + { + parser.yyVAL.expr = &ast.BetweenExpr{ + Expr: yyS[yypt-4].expr, + Left: yyS[yypt-2].expr, + Right: yyS[yypt-0].expr, + Not: !yyS[yypt-3].item.(bool), + } + } + case 226: + { + parser.yyVAL.item = "\\" + } + case 227: + { + parser.yyVAL.item = yyS[yypt-0].ident + } + case 228: + { + parser.yyVAL.item = &ast.SelectField{WildCard: &ast.WildCardField{}} + } + case 229: + { + wildCard := &ast.WildCardField{Table: model.NewCIStr(yyS[yypt-2].ident)} + parser.yyVAL.item = &ast.SelectField{WildCard: wildCard} + } + case 230: + { + wildCard := &ast.WildCardField{Schema: model.NewCIStr(yyS[yypt-4].ident), Table: model.NewCIStr(yyS[yypt-2].ident)} + parser.yyVAL.item = &ast.SelectField{WildCard: wildCard} + } + case 231: + { + expr := yyS[yypt-1].expr + asName := yyS[yypt-0].item.(string) + parser.yyVAL.item = &ast.SelectField{Expr: expr, AsName: model.NewCIStr(asName)} + } + case 232: + { + /* + * ODBC escape syntax. + * See https://dev.mysql.com/doc/refman/5.7/en/expressions.html + */ + expr := yyS[yypt-2].expr + asName := yyS[yypt-0].item.(string) + parser.yyVAL.item = &ast.SelectField{Expr: expr, AsName: model.NewCIStr(asName)} + } + case 233: + { + parser.yyVAL.item = "" + } + case 234: + { + parser.yyVAL.item = yyS[yypt-0].item + } + case 235: + { + parser.yyVAL.item = yyS[yypt-0].ident + } + case 236: + { + parser.yyVAL.item = yyS[yypt-0].ident + } + case 237: + { + parser.yyVAL.item = yyS[yypt-0].ident + } + case 238: + { + parser.yyVAL.item = yyS[yypt-0].ident + } + case 239: + { + field := yyS[yypt-0].item.(*ast.SelectField) + field.Offset = parser.startOffset(&yyS[yypt]) + parser.yyVAL.item = []*ast.SelectField{field} + } + case 240: + { + + fl := yyS[yypt-2].item.([]*ast.SelectField) + last := fl[len(fl)-1] + if last.Expr != nil && last.AsName.O == "" { + lastEnd := parser.endOffset(&yyS[yypt-1]) + last.SetText(parser.src[last.Offset:lastEnd]) + } + newField := yyS[yypt-0].item.(*ast.SelectField) + newField.Offset = parser.startOffset(&yyS[yypt]) + parser.yyVAL.item = append(fl, newField) + } + case 241: + { + parser.yyVAL.item = &ast.GroupByClause{Items: yyS[yypt-0].item.([]*ast.ByItem)} + } + case 242: + { + parser.yyVAL.item = nil + } + case 243: + { + parser.yyVAL.item = &ast.HavingClause{Expr: yyS[yypt-0].expr} + } + case 244: + { + parser.yyVAL.item = false + } + case 245: + { + parser.yyVAL.item = true + } + case 246: + { + parser.yyVAL.item = false + } + case 247: + { + parser.yyVAL.item = true + } + case 248: + { + parser.yyVAL.item = "" + } + case 249: + { + //"index name" + parser.yyVAL.item = yyS[yypt-0].ident + } + case 250: + { + parser.yyVAL.item = nil + } + case 251: + { + // Merge the options + if yyS[yypt-1].item == nil { + parser.yyVAL.item = yyS[yypt-0].item + } else { + opt1 := yyS[yypt-1].item.(*ast.IndexOption) + opt2 := yyS[yypt-0].item.(*ast.IndexOption) + if len(opt2.Comment) > 0 { + opt1.Comment = opt2.Comment + } else if opt2.Tp != 0 { + opt1.Tp = opt2.Tp + } else if opt2.KeyBlockSize > 0 { + opt1.KeyBlockSize = opt2.KeyBlockSize + } else if len(opt2.ParserName.O) > 0 { + opt1.ParserName = opt2.ParserName + } else if opt2.Visibility != ast.IndexVisibilityDefault { + opt1.Visibility = opt2.Visibility + } + parser.yyVAL.item = opt1 + } + } + case 252: + { + parser.yyVAL.item = &ast.IndexOption{ + KeyBlockSize: yyS[yypt-0].item.(uint64), + } + } + case 253: + { + parser.yyVAL.item = &ast.IndexOption{ + Tp: yyS[yypt-0].item.(model.IndexType), + } + } + case 254: + { + parser.yyVAL.item = &ast.IndexOption{ + ParserName: model.NewCIStr(yyS[yypt-0].ident), + } + yylex.AppendError(yylex.Errorf("The WITH PARASER clause is parsed but ignored by all storage engines.")) + parser.lastErrorAsWarn() + } + case 255: + { + parser.yyVAL.item = &ast.IndexOption{ + Comment: yyS[yypt-0].ident, + } + } + case 256: + { + parser.yyVAL.item = &ast.IndexOption{ + Visibility: yyS[yypt-0].item.(ast.IndexVisibility), + } + } + case 257: + { + parser.yyVAL.item = []interface{}{yyS[yypt-0].item, nil} + } + case 258: + { + parser.yyVAL.item = []interface{}{yyS[yypt-2].item, yyS[yypt-0].item} + } + case 259: + { + parser.yyVAL.item = []interface{}{yyS[yypt-2].ident, yyS[yypt-0].item} + } + case 260: + { + parser.yyVAL.item = nil + } + case 261: + { + parser.yyVAL.item = yyS[yypt-0].item + } + case 262: + { + parser.yyVAL.item = yyS[yypt-0].item + } + case 263: + { + parser.yyVAL.item = yyS[yypt-0].item + } + case 264: + { + parser.yyVAL.item = model.IndexTypeBtree + } + case 265: + { + parser.yyVAL.item = model.IndexTypeHash + } + case 266: + { + parser.yyVAL.item = model.IndexTypeRtree + } + case 267: + { + parser.yyVAL.item = ast.IndexVisibilityVisible + } + case 268: + { + parser.yyVAL.item = ast.IndexVisibilityInvisible + } + case 640: + { + x := yyS[yypt-0].item.(*ast.InsertStmt) + x.Priority = yyS[yypt-3].item.(mysql.PriorityEnum) + // Wraps many layers here so that it can be processed the same way as select statement. + ts := &ast.TableSource{Source: yyS[yypt-1].item.(*ast.TableName)} + x.Table = &ast.TableRefsClause{TableRefs: &ast.Join{Left: ts}} + parser.yyVAL.statement = x + } + case 643: + { + parser.yyVAL.item = &ast.InsertStmt{ + Columns: yyS[yypt-3].item.([]*ast.ColumnName), + Lists: yyS[yypt-0].item.([][]ast.ExprNode), + } + } + case 644: + { + parser.yyVAL.item = &ast.InsertStmt{Columns: yyS[yypt-2].item.([]*ast.ColumnName), Select: yyS[yypt-0].statement.(*ast.SelectStmt)} + } + case 645: + { + parser.yyVAL.item = &ast.InsertStmt{Columns: yyS[yypt-4].item.([]*ast.ColumnName), Select: yyS[yypt-1].statement.(*ast.SelectStmt)} + } + case 646: + { + parser.yyVAL.item = &ast.InsertStmt{Lists: yyS[yypt-0].item.([][]ast.ExprNode)} + } + case 647: + { + parser.yyVAL.item = &ast.InsertStmt{Select: yyS[yypt-1].statement.(*ast.SelectStmt)} + } + case 648: + { + parser.yyVAL.item = &ast.InsertStmt{Select: yyS[yypt-0].statement.(*ast.SelectStmt)} + } + case 649: + { + parser.yyVAL.item = &ast.InsertStmt{Setlist: yyS[yypt-0].item.([]*ast.Assignment)} + } + case 652: + { + parser.yyVAL.item = [][]ast.ExprNode{yyS[yypt-0].item.([]ast.ExprNode)} + } + case 653: + { + parser.yyVAL.item = append(yyS[yypt-2].item.([][]ast.ExprNode), yyS[yypt-0].item.([]ast.ExprNode)) + } + case 654: + { + parser.yyVAL.item = yyS[yypt-1].item + } + case 655: + { + parser.yyVAL.item = []ast.ExprNode{} + } + case 657: + { + parser.yyVAL.item = append(yyS[yypt-2].item.([]ast.ExprNode), yyS[yypt-0].expr) + } + case 658: + { + parser.yyVAL.item = []ast.ExprNode{yyS[yypt-0].expr} + } + case 660: + { + parser.yyVAL.expr = &ast.DefaultExpr{} + } + case 661: + { + parser.yyVAL.item = &ast.Assignment{ + Column: yyS[yypt-2].item.(*ast.ColumnName), + Expr: yyS[yypt-0].expr, + } + } + case 662: + { + parser.yyVAL.item = []*ast.Assignment{} + } + case 663: + { + parser.yyVAL.item = []*ast.Assignment{yyS[yypt-0].item.(*ast.Assignment)} + } + case 664: + { + parser.yyVAL.item = append(yyS[yypt-2].item.([]*ast.Assignment), yyS[yypt-0].item.(*ast.Assignment)) + } + case 665: + { + x := yyS[yypt-0].item.(*ast.InsertStmt) + x.IsReplace = true + x.Priority = yyS[yypt-3].item.(mysql.PriorityEnum) + ts := &ast.TableSource{Source: yyS[yypt-1].item.(*ast.TableName)} + x.Table = &ast.TableRefsClause{TableRefs: &ast.Join{Left: ts}} + parser.yyVAL.statement = x + } + case 666: + { + parser.yyVAL.expr = ast.NewValueExpr(false) + } + case 667: + { + parser.yyVAL.expr = ast.NewValueExpr(nil) + } + case 668: + { + parser.yyVAL.expr = ast.NewValueExpr(true) + } + case 669: + { + parser.yyVAL.expr = ast.NewValueExpr(yyS[yypt-0].item) + } + case 670: + { + parser.yyVAL.expr = ast.NewValueExpr(yyS[yypt-0].item) + } + case 671: + { + parser.yyVAL.expr = ast.NewValueExpr(yyS[yypt-0].item) + } + case 672: + { + parser.yyVAL.expr = yyS[yypt-0].expr + } + case 673: + { + // See https://dev.mysql.com/doc/refman/5.7/en/charset-literal.html + co, err := charset.GetDefaultCollation(yyS[yypt-1].ident) + if err != nil { + yylex.AppendError(yylex.Errorf("Get collation error for charset: %s", yyS[yypt-1].ident)) + return 1 + } + expr := ast.NewValueExpr(yyS[yypt-0].ident) + tp := expr.GetType() + tp.Charset = yyS[yypt-1].ident + tp.Collate = co + if tp.Collate == charset.CollationBin { + tp.Flag |= mysql.BinaryFlag + } + parser.yyVAL.expr = expr + } + case 674: + { + parser.yyVAL.expr = ast.NewValueExpr(yyS[yypt-0].item) + } + case 675: + { + parser.yyVAL.expr = ast.NewValueExpr(yyS[yypt-0].item) + } + case 676: + { + expr := ast.NewValueExpr(yyS[yypt-0].ident) + parser.yyVAL.expr = expr + } + case 677: + { + valExpr := yyS[yypt-1].expr.(ast.ValueExpr) + strLit := valExpr.GetString() + expr := ast.NewValueExpr(strLit + yyS[yypt-0].ident) + // Fix #4239, use first string literal as projection name. + if valExpr.GetProjectionOffset() >= 0 { + expr.SetProjectionOffset(valExpr.GetProjectionOffset()) + } else { + expr.SetProjectionOffset(len(strLit)) + } + parser.yyVAL.expr = expr + } + case 678: + { + parser.yyVAL.item = &ast.OrderByClause{Items: yyS[yypt-0].item.([]*ast.ByItem)} + } + case 679: + { + parser.yyVAL.item = []*ast.ByItem{yyS[yypt-0].item.(*ast.ByItem)} + } + case 680: + { + parser.yyVAL.item = append(yyS[yypt-2].item.([]*ast.ByItem), yyS[yypt-0].item.(*ast.ByItem)) + } + case 681: + { + parser.yyVAL.item = &ast.ByItem{Expr: yyS[yypt-1].expr, Desc: yyS[yypt-0].item.(bool)} + } + case 682: + { + parser.yyVAL.item = false // ASC by default + } + case 683: + { + parser.yyVAL.item = false + } + case 684: + { + parser.yyVAL.item = true + } + case 685: + { + parser.yyVAL.item = nil + } + case 686: + { + parser.yyVAL.item = yyS[yypt-0].item + } + case 687: + { + parser.yyVAL.expr = &ast.BinaryOperationExpr{Op: opcode.Or, L: yyS[yypt-2].expr, R: yyS[yypt-0].expr} + } + case 688: + { + parser.yyVAL.expr = &ast.BinaryOperationExpr{Op: opcode.And, L: yyS[yypt-2].expr, R: yyS[yypt-0].expr} + } + case 689: + { + parser.yyVAL.expr = &ast.BinaryOperationExpr{Op: opcode.LeftShift, L: yyS[yypt-2].expr, R: yyS[yypt-0].expr} + } + case 690: + { + parser.yyVAL.expr = &ast.BinaryOperationExpr{Op: opcode.RightShift, L: yyS[yypt-2].expr, R: yyS[yypt-0].expr} + } + case 691: + { + parser.yyVAL.expr = &ast.BinaryOperationExpr{Op: opcode.Plus, L: yyS[yypt-2].expr, R: yyS[yypt-0].expr} + } + case 692: + { + parser.yyVAL.expr = &ast.BinaryOperationExpr{Op: opcode.Minus, L: yyS[yypt-2].expr, R: yyS[yypt-0].expr} + } + case 693: + { + parser.yyVAL.expr = &ast.BinaryOperationExpr{Op: opcode.Mul, L: yyS[yypt-2].expr, R: yyS[yypt-0].expr} + } + case 694: + { + parser.yyVAL.expr = &ast.BinaryOperationExpr{Op: opcode.Div, L: yyS[yypt-2].expr, R: yyS[yypt-0].expr} + } + case 695: + { + parser.yyVAL.expr = &ast.BinaryOperationExpr{Op: opcode.Mod, L: yyS[yypt-2].expr, R: yyS[yypt-0].expr} + } + case 696: + { + parser.yyVAL.expr = &ast.BinaryOperationExpr{Op: opcode.IntDiv, L: yyS[yypt-2].expr, R: yyS[yypt-0].expr} + } + case 697: + { + parser.yyVAL.expr = &ast.BinaryOperationExpr{Op: opcode.Mod, L: yyS[yypt-2].expr, R: yyS[yypt-0].expr} + } + case 698: + { + parser.yyVAL.expr = &ast.BinaryOperationExpr{Op: opcode.Xor, L: yyS[yypt-2].expr, R: yyS[yypt-0].expr} + } + case 700: + { + parser.yyVAL.expr = &ast.ColumnNameExpr{Name: &ast.ColumnName{ + Name: model.NewCIStr(yyS[yypt-0].ident), + }} + } + case 701: + { + parser.yyVAL.expr = &ast.ColumnNameExpr{Name: &ast.ColumnName{ + Table: model.NewCIStr(yyS[yypt-2].ident), + Name: model.NewCIStr(yyS[yypt-0].ident), + }} + } + case 702: + { + parser.yyVAL.expr = &ast.ColumnNameExpr{Name: &ast.ColumnName{ + Table: model.NewCIStr(yyS[yypt-2].ident), + Name: model.NewCIStr(yyS[yypt-0].ident), + }} + } + case 703: + { + parser.yyVAL.expr = &ast.ColumnNameExpr{Name: &ast.ColumnName{ + Schema: model.NewCIStr(yyS[yypt-4].ident), + Table: model.NewCIStr(yyS[yypt-2].ident), + Name: model.NewCIStr(yyS[yypt-0].ident), + }} + } + case 708: + { + // TODO: Create a builtin function hold expr and collation. When do evaluation, convert expr result using the collation. + parser.yyVAL.expr = yyS[yypt-2].expr + } + case 712: + { + parser.yyVAL.expr = &ast.UnaryOperationExpr{Op: opcode.Not, V: yyS[yypt-0].expr} + } + case 713: + { + parser.yyVAL.expr = &ast.UnaryOperationExpr{Op: opcode.BitNeg, V: yyS[yypt-0].expr} + } + case 714: + { + parser.yyVAL.expr = &ast.UnaryOperationExpr{Op: opcode.Minus, V: yyS[yypt-0].expr} + } + case 715: + { + parser.yyVAL.expr = &ast.UnaryOperationExpr{Op: opcode.Plus, V: yyS[yypt-0].expr} + } + case 716: + { + parser.yyVAL.expr = &ast.UnaryOperationExpr{Op: opcode.Not, V: yyS[yypt-0].expr} + } + case 717: + { + startOffset := parser.startOffset(&yyS[yypt-1]) + endOffset := parser.endOffset(&yyS[yypt]) + expr := yyS[yypt-1].expr + expr.SetText(parser.src[startOffset:endOffset]) + parser.yyVAL.expr = &ast.ParenthesesExpr{Expr: expr} + } + case 718: + { + values := append(yyS[yypt-3].item.([]ast.ExprNode), yyS[yypt-1].expr) + parser.yyVAL.expr = &ast.RowExpr{Values: values} + } + case 719: + { + values := append(yyS[yypt-3].item.([]ast.ExprNode), yyS[yypt-1].expr) + parser.yyVAL.expr = &ast.RowExpr{Values: values} + } + case 720: + { + // See https://dev.mysql.com/doc/refman/5.7/en/cast-functions.html#function_convert + charset1 := ast.NewValueExpr(yyS[yypt-1].item) + parser.yyVAL.expr = &ast.FuncCallExpr{ + FnName: model.NewCIStr(yyS[yypt-5].ident), + Args: []ast.ExprNode{yyS[yypt-3].expr, charset1}, + } + } + case 721: + { + parser.yyVAL.expr = &ast.DefaultExpr{Name: yyS[yypt-1].expr.(*ast.ColumnNameExpr).Name} + } + case 722: + { + parser.yyVAL.expr = &ast.ValuesExpr{Column: yyS[yypt-1].expr.(*ast.ColumnNameExpr)} + } + case 725: + { + parser.yyVAL.item = false + } + case 726: + { + parser.yyVAL.item = true + } + case 727: + { + parser.yyVAL.item = false + } + case 729: + { + parser.yyVAL.item = true + } + case 772: + { + parser.yyVAL.expr = &ast.FuncCallExpr{FnName: model.NewCIStr(yyS[yypt-3].ident), Args: yyS[yypt-1].item.([]ast.ExprNode)} + } + case 773: + { + parser.yyVAL.expr = &ast.FuncCallExpr{FnName: model.NewCIStr(yyS[yypt-3].ident), Args: yyS[yypt-1].item.([]ast.ExprNode)} + } + case 774: + { + parser.yyVAL.expr = &ast.FuncCallExpr{FnName: model.NewCIStr(yyS[yypt-1].ident)} + } + case 775: + { + parser.yyVAL.expr = &ast.FuncCallExpr{FnName: model.NewCIStr(yyS[yypt-2].ident)} + } + case 776: + { + args := []ast.ExprNode{} + if yyS[yypt-0].item != nil { + args = append(args, yyS[yypt-0].item.(ast.ExprNode)) + } + parser.yyVAL.expr = &ast.FuncCallExpr{FnName: model.NewCIStr(yyS[yypt-1].ident), Args: args} + } + case 777: + { + parser.yyVAL.expr = &ast.BinaryOperationExpr{Op: opcode.Mod, L: yyS[yypt-3].expr, R: yyS[yypt-1].expr} + } + case 778: + { + parser.yyVAL.expr = &ast.FuncCallExpr{FnName: model.NewCIStr(yyS[yypt-3].ident), Args: yyS[yypt-1].item.([]ast.ExprNode)} + } + case 779: + { + parser.yyVAL.expr = &ast.FuncCallExpr{FnName: model.NewCIStr(yyS[yypt-3].ident), Args: yyS[yypt-1].item.([]ast.ExprNode)} + } + case 780: + { + parser.yyVAL.expr = &ast.FuncCallExpr{FnName: model.NewCIStr(yyS[yypt-5].ident), Args: []ast.ExprNode{yyS[yypt-3].expr, yyS[yypt-1].expr}} + } + case 781: + { + parser.yyVAL.expr = &ast.FuncCallExpr{ + FnName: model.NewCIStr(yyS[yypt-5].ident), + Args: []ast.ExprNode{yyS[yypt-3].expr, yyS[yypt-1].expr}, + } + } + case 782: + { + parser.yyVAL.expr = &ast.FuncCallExpr{ + FnName: model.NewCIStr(yyS[yypt-5].ident), + Args: []ast.ExprNode{yyS[yypt-3].expr, yyS[yypt-1].expr}, + } + } + case 783: + { + parser.yyVAL.expr = &ast.FuncCallExpr{ + FnName: model.NewCIStr(yyS[yypt-7].ident), + Args: []ast.ExprNode{yyS[yypt-5].expr, yyS[yypt-3].expr, yyS[yypt-1].expr}, + } + } + case 784: + { + parser.yyVAL.expr = &ast.FuncCallExpr{ + FnName: model.NewCIStr(yyS[yypt-7].ident), + Args: []ast.ExprNode{yyS[yypt-5].expr, yyS[yypt-3].expr, yyS[yypt-1].expr}, + } + } + case 785: + { + parser.yyVAL.expr = &ast.FuncCallExpr{ + FnName: model.NewCIStr(yyS[yypt-3].ident), + Args: []ast.ExprNode{yyS[yypt-1].expr}, + } + } + case 786: + { + parser.yyVAL.expr = &ast.FuncCallExpr{ + FnName: model.NewCIStr(yyS[yypt-5].ident), + Args: []ast.ExprNode{yyS[yypt-1].expr, yyS[yypt-3].expr}, + } + } + case 791: + { + parser.yyVAL.expr = &ast.AggregateFuncExpr{F: yyS[yypt-3].ident, Args: []ast.ExprNode{yyS[yypt-1].expr}} + } + case 792: + { + parser.yyVAL.expr = &ast.AggregateFuncExpr{F: yyS[yypt-3].ident, Args: []ast.ExprNode{yyS[yypt-1].expr}} + } + case 793: + { + args := []ast.ExprNode{ast.NewValueExpr(1)} + parser.yyVAL.expr = &ast.AggregateFuncExpr{F: yyS[yypt-3].ident, Args: args} + } + case 794: + { + parser.yyVAL.expr = &ast.AggregateFuncExpr{F: yyS[yypt-3].ident, Args: []ast.ExprNode{yyS[yypt-1].expr}} + } + case 795: + { + parser.yyVAL.expr = &ast.AggregateFuncExpr{F: yyS[yypt-3].ident, Args: []ast.ExprNode{yyS[yypt-1].expr}} + } + case 796: + { + parser.yyVAL.expr = &ast.AggregateFuncExpr{F: yyS[yypt-3].ident, Args: []ast.ExprNode{yyS[yypt-1].expr}} + } + case 797: + { + parser.yyVAL.item = ast.NewValueExpr(",") + } + case 798: + { + parser.yyVAL.item = ast.NewValueExpr(yyS[yypt-0].ident) + } + case 799: + { + parser.yyVAL.expr = &ast.FuncCallExpr{FnName: model.NewCIStr(yyS[yypt-3].ident), Args: yyS[yypt-1].item.([]ast.ExprNode)} + } + case 800: + { + parser.yyVAL.item = nil + } + case 801: + { + parser.yyVAL.item = nil + } + case 802: + { + expr := ast.NewValueExpr(yyS[yypt-1].item) + parser.yyVAL.item = expr + } + case 803: + { + parser.yyVAL.expr = nil + } + case 804: + { + parser.yyVAL.expr = yyS[yypt-0].expr + } + case 805: + { + x := types.NewFieldType(mysql.TypeVarString) + x.Flen = yyS[yypt-0].item.(int) // TODO: Flen should be the flen of expression + if x.Flen != types.UnspecifiedLength { + x.Tp = mysql.TypeString + } + x.Charset = charset.CharsetBin + x.Collate = charset.CollationBin + x.Flag |= mysql.BinaryFlag + parser.yyVAL.item = x + } + case 806: + { + x := types.NewFieldType(mysql.TypeVarString) + x.Flen = yyS[yypt-1].item.(int) // TODO: Flen should be the flen of expression + x.Charset = yyS[yypt-0].item.(*ast.OptBinary).Charset + if yyS[yypt-0].item.(*ast.OptBinary).IsBinary { + x.Flag |= mysql.BinaryFlag + } + if x.Charset == "" { + x.Charset = mysql.DefaultCharset + x.Collate = mysql.DefaultCollationName + } + parser.yyVAL.item = x + } + case 807: + { + x := types.NewFieldType(mysql.TypeDate) + x.Charset = charset.CharsetBin + x.Collate = charset.CollationBin + x.Flag |= mysql.BinaryFlag + parser.yyVAL.item = x + } + case 808: + { + x := types.NewFieldType(mysql.TypeDatetime) + x.Flen, _ = mysql.GetDefaultFieldLengthAndDecimalForCast(mysql.TypeDatetime) + x.Decimal = yyS[yypt-0].item.(int) + if x.Decimal > 0 { + x.Flen = x.Flen + 1 + x.Decimal + } + x.Charset = charset.CharsetBin + x.Collate = charset.CollationBin + x.Flag |= mysql.BinaryFlag + parser.yyVAL.item = x + } + case 809: + { + fopt := yyS[yypt-0].item.(*ast.FloatOpt) + x := types.NewFieldType(mysql.TypeNewDecimal) + x.Flen = fopt.Flen + x.Decimal = fopt.Decimal + x.Charset = charset.CharsetBin + x.Collate = charset.CollationBin + x.Flag |= mysql.BinaryFlag + parser.yyVAL.item = x + } + case 810: + { + x := types.NewFieldType(mysql.TypeDuration) + x.Flen, _ = mysql.GetDefaultFieldLengthAndDecimalForCast(mysql.TypeDuration) + x.Decimal = yyS[yypt-0].item.(int) + if x.Decimal > 0 { + x.Flen = x.Flen + 1 + x.Decimal + } + x.Charset = charset.CharsetBin + x.Collate = charset.CollationBin + x.Flag |= mysql.BinaryFlag + parser.yyVAL.item = x + } + case 811: + { + x := types.NewFieldType(mysql.TypeLonglong) + x.Charset = charset.CharsetBin + x.Collate = charset.CollationBin + x.Flag |= mysql.BinaryFlag + parser.yyVAL.item = x + } + case 812: + { + x := types.NewFieldType(mysql.TypeLonglong) + x.Flag |= mysql.UnsignedFlag | mysql.BinaryFlag + x.Charset = charset.CharsetBin + x.Collate = charset.CollationBin + parser.yyVAL.item = x + } + case 813: + { + x := types.NewFieldType(mysql.TypeJSON) + x.Flag |= mysql.BinaryFlag | (mysql.ParseToJSONFlag) + x.Charset = mysql.DefaultCharset + x.Collate = mysql.DefaultCollationName + parser.yyVAL.item = x + } + case 814: + { + x := types.NewFieldType(mysql.TypeDouble) + x.Flen, x.Decimal = mysql.GetDefaultFieldLengthAndDecimalForCast(mysql.TypeDouble) + x.Flag |= mysql.BinaryFlag + x.Charset = charset.CharsetBin + x.Collate = charset.CollationBin + parser.yyVAL.item = x + } + case 815: + { + x := types.NewFieldType(mysql.TypeFloat) + fopt := yyS[yypt-0].item.(*ast.FloatOpt) + if fopt.Flen >= 54 { + yylex.AppendError(ErrTooBigPrecision.GenWithStackByArgs(fopt.Flen, "CAST", 53)) + } else if fopt.Flen >= 25 { + x = types.NewFieldType(mysql.TypeDouble) + } + x.Flen, x.Decimal = mysql.GetDefaultFieldLengthAndDecimalForCast(x.Tp) + x.Flag |= mysql.BinaryFlag + x.Charset = charset.CharsetBin + x.Collate = charset.CollationBin + parser.yyVAL.item = x + } + case 816: + { + var x *types.FieldType + if parser.lexer.GetSQLMode().HasRealAsFloatMode() { + x = types.NewFieldType(mysql.TypeFloat) + } else { + x = types.NewFieldType(mysql.TypeDouble) + } + x.Flen, x.Decimal = mysql.GetDefaultFieldLengthAndDecimalForCast(x.Tp) + x.Flag |= mysql.BinaryFlag + x.Charset = charset.CharsetBin + x.Collate = charset.CollationBin + parser.yyVAL.item = x + } + case 817: + { + parser.yyVAL.item = mysql.NoPriority + } + case 818: + { + parser.yyVAL.item = mysql.LowPriority + } + case 819: + { + parser.yyVAL.item = mysql.HighPriority + } + case 820: + { + parser.yyVAL.item = mysql.DelayedPriority + } + case 821: + { + parser.yyVAL.item = &ast.TableName{Name: model.NewCIStr(yyS[yypt-0].ident)} + } + case 822: + { + parser.yyVAL.item = &ast.TableName{Schema: model.NewCIStr(yyS[yypt-2].ident), Name: model.NewCIStr(yyS[yypt-0].ident)} + } + case 823: + { + tbl := []*ast.TableName{yyS[yypt-0].item.(*ast.TableName)} + parser.yyVAL.item = tbl + } + case 824: + { + parser.yyVAL.item = append(yyS[yypt-2].item.([]*ast.TableName), yyS[yypt-0].item.(*ast.TableName)) + } + case 825: + { + parser.yyVAL.item = &ast.TableName{Name: model.NewCIStr(yyS[yypt-1].ident)} + } + case 826: + { + parser.yyVAL.item = &ast.TableName{Schema: model.NewCIStr(yyS[yypt-3].ident), Name: model.NewCIStr(yyS[yypt-1].ident)} + } + case 827: + { + tbl := []*ast.TableName{yyS[yypt-0].item.(*ast.TableName)} + parser.yyVAL.item = tbl + } + case 828: + { + parser.yyVAL.item = append(yyS[yypt-2].item.([]*ast.TableName), yyS[yypt-0].item.(*ast.TableName)) + } + case 831: + { + parser.yyVAL.item = false + } + case 832: + { + parser.yyVAL.item = true + } + case 833: + { + parser.yyVAL.statement = &ast.RollbackStmt{} + } + case 834: + { + st := &ast.SelectStmt{ + SelectStmtOpts: yyS[yypt-1].item.(*ast.SelectStmtOpts), + Distinct: yyS[yypt-1].item.(*ast.SelectStmtOpts).Distinct, + Fields: yyS[yypt-0].item.(*ast.FieldList), + } + if st.SelectStmtOpts.TableHints != nil { + st.TableHints = st.SelectStmtOpts.TableHints + } + parser.yyVAL.item = st + } + case 835: + { + st := yyS[yypt-2].item.(*ast.SelectStmt) + lastField := st.Fields.Fields[len(st.Fields.Fields)-1] + if lastField.Expr != nil && lastField.AsName.O == "" { + lastEnd := yyS[yypt-1].offset - 1 + lastField.SetText(parser.src[lastField.Offset:lastEnd]) + } + if yyS[yypt-0].item != nil { + st.Where = yyS[yypt-0].item.(ast.ExprNode) + } + } + case 836: + { + st := yyS[yypt-5].item.(*ast.SelectStmt) + st.From = yyS[yypt-3].item.(*ast.TableRefsClause) + lastField := st.Fields.Fields[len(st.Fields.Fields)-1] + if lastField.Expr != nil && lastField.AsName.O == "" { + lastEnd := parser.endOffset(&yyS[yypt-4]) + lastField.SetText(parser.src[lastField.Offset:lastEnd]) + } + if yyS[yypt-2].item != nil { + st.Where = yyS[yypt-2].item.(ast.ExprNode) + } + if yyS[yypt-1].item != nil { + st.GroupBy = yyS[yypt-1].item.(*ast.GroupByClause) + } + if yyS[yypt-0].item != nil { + st.Having = yyS[yypt-0].item.(*ast.HavingClause) + } + parser.yyVAL.item = st + } + case 837: + { + st := yyS[yypt-2].item.(*ast.SelectStmt) + lastField := st.Fields.Fields[len(st.Fields.Fields)-1] + if lastField.Expr != nil && lastField.AsName.O == "" { + src := parser.src + var lastEnd int + if yyS[yypt-1].item != nil { + lastEnd = yyS[yypt-1].offset - 1 + } else if yyS[yypt-0].item != nil { + lastEnd = yyS[yypt-0].offset - 1 + } else { + lastEnd = len(src) + if src[lastEnd-1] == ';' { + lastEnd-- + } + } + lastField.SetText(src[lastField.Offset:lastEnd]) + } + if yyS[yypt-1].item != nil { + st.OrderBy = yyS[yypt-1].item.(*ast.OrderByClause) + } + if yyS[yypt-0].item != nil { + st.Limit = yyS[yypt-0].item.(*ast.Limit) + } + parser.yyVAL.statement = st + } + case 838: + { + st := yyS[yypt-2].item.(*ast.SelectStmt) + if yyS[yypt-1].item != nil { + st.OrderBy = yyS[yypt-1].item.(*ast.OrderByClause) + } + if yyS[yypt-0].item != nil { + st.Limit = yyS[yypt-0].item.(*ast.Limit) + } + parser.yyVAL.statement = st + } + case 839: + { + st := yyS[yypt-2].item.(*ast.SelectStmt) + if yyS[yypt-1].item != nil { + st.OrderBy = yyS[yypt-1].item.(*ast.OrderByClause) + } + if yyS[yypt-0].item != nil { + st.Limit = yyS[yypt-0].item.(*ast.Limit) + } + parser.yyVAL.statement = st + } + case 841: + { + parser.yyVAL.item = &ast.TableRefsClause{TableRefs: yyS[yypt-0].item.(*ast.Join)} + } + case 842: + { + if j, ok := yyS[yypt-0].item.(*ast.Join); ok { + // if $1 is Join, use it directly + parser.yyVAL.item = j + } else { + parser.yyVAL.item = &ast.Join{Left: yyS[yypt-0].item.(ast.ResultSetNode), Right: nil} + } + } + case 843: + { + /* from a, b is default cross join */ + parser.yyVAL.item = &ast.Join{Left: yyS[yypt-2].item.(ast.ResultSetNode), Right: yyS[yypt-0].item.(ast.ResultSetNode), Tp: ast.CrossJoin} + } + case 844: + { + parser.yyVAL.item = yyS[yypt-0].item + } + case 845: + { + /* + * ODBC escape syntax for outer join is { OJ join_table } + * Use an Identifier for OJ + */ + parser.yyVAL.item = yyS[yypt-1].item + } + case 846: + { + parser.yyVAL.item = yyS[yypt-0].item + } + case 847: + { + parser.yyVAL.item = yyS[yypt-0].item + } + case 848: + { + tn := yyS[yypt-2].item.(*ast.TableName) + tn.IndexHints = yyS[yypt-0].item.([]*ast.IndexHint) + parser.yyVAL.item = &ast.TableSource{Source: tn, AsName: yyS[yypt-1].item.(model.CIStr)} + } + case 849: + { + st := yyS[yypt-2].statement.(*ast.SelectStmt) + endOffset := parser.endOffset(&yyS[yypt-1]) + parser.setLastSelectFieldText(st, endOffset) + parser.yyVAL.item = &ast.TableSource{Source: yyS[yypt-2].statement.(*ast.SelectStmt), AsName: yyS[yypt-0].item.(model.CIStr)} + } + case 850: + { + parser.yyVAL.item = yyS[yypt-1].item + } + case 851: + { + parser.yyVAL.item = model.CIStr{} + } + case 852: + { + parser.yyVAL.item = yyS[yypt-0].item + } + case 853: + { + parser.yyVAL.item = model.NewCIStr(yyS[yypt-0].ident) + } + case 854: + { + parser.yyVAL.item = model.NewCIStr(yyS[yypt-0].ident) + } + case 855: + { + parser.yyVAL.item = ast.HintUse + } + case 856: + { + parser.yyVAL.item = ast.HintIgnore + } + case 857: + { + parser.yyVAL.item = ast.HintForce + } + case 858: + { + parser.yyVAL.item = ast.HintForScan + } + case 859: + { + parser.yyVAL.item = ast.HintForJoin + } + case 860: + { + parser.yyVAL.item = ast.HintForOrderBy + } + case 861: + { + parser.yyVAL.item = ast.HintForGroupBy + } + case 862: + { + parser.yyVAL.item = &ast.IndexHint{ + IndexNames: yyS[yypt-1].item.([]model.CIStr), + HintType: yyS[yypt-4].item.(ast.IndexHintType), + HintScope: yyS[yypt-3].item.(ast.IndexHintScope), + } + } + case 863: + { + var nameList []model.CIStr + parser.yyVAL.item = nameList + } + case 864: + { + parser.yyVAL.item = []model.CIStr{model.NewCIStr(yyS[yypt-0].ident)} + } + case 865: + { + parser.yyVAL.item = append(yyS[yypt-2].item.([]model.CIStr), model.NewCIStr(yyS[yypt-0].ident)) + } + case 866: + { + parser.yyVAL.item = []model.CIStr{model.NewCIStr(yyS[yypt-0].ident)} + } + case 867: + { + parser.yyVAL.item = append(yyS[yypt-2].item.([]model.CIStr), model.NewCIStr(yyS[yypt-0].ident)) + } + case 868: + { + parser.yyVAL.item = []*ast.IndexHint{yyS[yypt-0].item.(*ast.IndexHint)} + } + case 869: + { + parser.yyVAL.item = append(yyS[yypt-1].item.([]*ast.IndexHint), yyS[yypt-0].item.(*ast.IndexHint)) + } + case 870: + { + var hintList []*ast.IndexHint + parser.yyVAL.item = hintList + } + case 871: + { + parser.yyVAL.item = yyS[yypt-0].item + } + case 872: + { + parser.yyVAL.item = &ast.Join{Left: yyS[yypt-2].item.(ast.ResultSetNode), Right: yyS[yypt-0].item.(ast.ResultSetNode), Tp: ast.CrossJoin} + } + case 873: + { + parser.yyVAL.item = ast.LeftJoin + } + case 874: + { + parser.yyVAL.item = ast.RightJoin + } + case 879: + { + parser.yyVAL.item = nil + } + case 880: + { + parser.yyVAL.item = &ast.Limit{Count: yyS[yypt-0].item.(ast.ValueExpr)} + } + case 881: + { + parser.yyVAL.item = ast.NewValueExpr(yyS[yypt-0].item) + } + case 882: + { + parser.yyVAL.item = nil + } + case 883: + { + parser.yyVAL.item = &ast.Limit{Count: yyS[yypt-0].item.(ast.ExprNode)} + } + case 884: + { + parser.yyVAL.item = &ast.Limit{Offset: yyS[yypt-2].item.(ast.ExprNode), Count: yyS[yypt-0].item.(ast.ExprNode)} + } + case 885: + { + parser.yyVAL.item = &ast.Limit{Offset: yyS[yypt-0].item.(ast.ExprNode), Count: yyS[yypt-2].item.(ast.ExprNode)} + } + case 886: + { + opt := &ast.SelectStmtOpts{} + if yyS[yypt-8].item != nil { + opt.TableHints = yyS[yypt-8].item.([]*ast.TableOptimizerHint) + } + if yyS[yypt-7].item != nil { + opt.Distinct = yyS[yypt-7].item.(bool) + } + if yyS[yypt-6].item != nil { + opt.Priority = yyS[yypt-6].item.(mysql.PriorityEnum) + } + if yyS[yypt-5].item != nil { + opt.SQLSmallResult = yyS[yypt-5].item.(bool) + } + if yyS[yypt-4].item != nil { + opt.SQLBigResult = yyS[yypt-4].item.(bool) + } + if yyS[yypt-3].item != nil { + opt.SQLBufferResult = yyS[yypt-3].item.(bool) + } + if yyS[yypt-2].item != nil { + opt.SQLCache = yyS[yypt-2].item.(bool) + } + if yyS[yypt-1].item != nil { + opt.CalcFoundRows = yyS[yypt-1].item.(bool) + } + if yyS[yypt-0].item != nil { + opt.StraightJoin = yyS[yypt-0].item.(bool) + } + + parser.yyVAL.item = opt + } + case 887: + { + parser.yyVAL.item = nil + } + case 888: + { + parser.yyVAL.item = yyS[yypt-1].item + } + case 889: + { + yyerrok() + parser.lastErrorAsWarn() + parser.yyVAL.item = nil + } + case 890: + { + parser.yyVAL.item = []*ast.TableOptimizerHint{yyS[yypt-0].item.(*ast.TableOptimizerHint)} + } + case 891: + { + parser.yyVAL.item = yyS[yypt-0].item.([]*ast.TableOptimizerHint) + } + case 892: + { + parser.yyVAL.item = append(yyS[yypt-1].item.([]*ast.TableOptimizerHint), yyS[yypt-0].item.(*ast.TableOptimizerHint)) + } + case 893: + { + parser.yyVAL.item = append(yyS[yypt-2].item.([]*ast.TableOptimizerHint), yyS[yypt-0].item.(*ast.TableOptimizerHint)) + } + case 894: + { + parser.yyVAL.item = append(yyS[yypt-1].item.([]*ast.TableOptimizerHint), yyS[yypt-0].item.([]*ast.TableOptimizerHint)...) + } + case 895: + { + parser.yyVAL.item = append(yyS[yypt-2].item.([]*ast.TableOptimizerHint), yyS[yypt-0].item.([]*ast.TableOptimizerHint)...) + } + case 896: + { + parser.yyVAL.item = &ast.TableOptimizerHint{ + HintName: model.NewCIStr(yyS[yypt-5].ident), + QBName: yyS[yypt-3].item.(model.CIStr), + Tables: []ast.HintTable{yyS[yypt-2].item.(ast.HintTable)}, + Indexes: yyS[yypt-1].item.([]model.CIStr), + } + } + case 897: + { + parser.yyVAL.item = &ast.TableOptimizerHint{ + HintName: model.NewCIStr(yyS[yypt-5].ident), + QBName: yyS[yypt-3].item.(model.CIStr), + Tables: []ast.HintTable{yyS[yypt-2].item.(ast.HintTable)}, + Indexes: yyS[yypt-1].item.([]model.CIStr), + } + } + case 898: + { + parser.yyVAL.item = &ast.TableOptimizerHint{HintName: model.NewCIStr(yyS[yypt-4].ident), QBName: yyS[yypt-2].item.(model.CIStr), Tables: yyS[yypt-1].item.([]ast.HintTable)} + } + case 899: + { + parser.yyVAL.item = &ast.TableOptimizerHint{HintName: model.NewCIStr(yyS[yypt-4].ident), QBName: yyS[yypt-2].item.(model.CIStr), Tables: yyS[yypt-1].item.([]ast.HintTable)} + } + case 900: + { + parser.yyVAL.item = &ast.TableOptimizerHint{HintName: model.NewCIStr(yyS[yypt-4].ident), QBName: yyS[yypt-2].item.(model.CIStr), Tables: yyS[yypt-1].item.([]ast.HintTable)} + } + case 901: + { + parser.yyVAL.item = &ast.TableOptimizerHint{HintName: model.NewCIStr(yyS[yypt-4].ident), QBName: yyS[yypt-2].item.(model.CIStr), Tables: yyS[yypt-1].item.([]ast.HintTable)} + } + case 902: + { + parser.yyVAL.item = &ast.TableOptimizerHint{HintName: model.NewCIStr(yyS[yypt-4].ident), QBName: yyS[yypt-2].item.(model.CIStr), Tables: yyS[yypt-1].item.([]ast.HintTable)} + } + case 903: + { + parser.yyVAL.item = &ast.TableOptimizerHint{HintName: model.NewCIStr(yyS[yypt-4].ident), QBName: yyS[yypt-2].item.(model.CIStr), Tables: yyS[yypt-1].item.([]ast.HintTable)} + } + case 904: + { + parser.yyVAL.item = &ast.TableOptimizerHint{HintName: model.NewCIStr(yyS[yypt-4].ident), QBName: yyS[yypt-2].item.(model.CIStr), Tables: yyS[yypt-1].item.([]ast.HintTable)} + } + case 905: + { + parser.yyVAL.item = &ast.TableOptimizerHint{ + HintName: model.NewCIStr(yyS[yypt-5].ident), + QBName: yyS[yypt-3].item.(model.CIStr), + Tables: []ast.HintTable{yyS[yypt-2].item.(ast.HintTable)}, + Indexes: yyS[yypt-1].item.([]model.CIStr), + } + } + case 906: + { + parser.yyVAL.item = &ast.TableOptimizerHint{HintName: model.NewCIStr(yyS[yypt-4].ident), QBName: yyS[yypt-2].item.(model.CIStr), HintFlag: yyS[yypt-1].item.(bool)} + } + case 907: + { + parser.yyVAL.item = &ast.TableOptimizerHint{HintName: model.NewCIStr(yyS[yypt-4].ident), QBName: yyS[yypt-2].item.(model.CIStr), HintFlag: yyS[yypt-1].item.(bool)} + } + case 908: + { + parser.yyVAL.item = &ast.TableOptimizerHint{HintName: model.NewCIStr(yyS[yypt-4].ident), QBName: yyS[yypt-2].item.(model.CIStr), MaxExecutionTime: getUint64FromNUM(yyS[yypt-1].item)} + } + case 909: + { + // arguments not decided yet. + parser.yyVAL.item = &ast.TableOptimizerHint{HintName: model.NewCIStr(yyS[yypt-3].ident), QBName: yyS[yypt-1].item.(model.CIStr)} + } + case 910: + { + parser.yyVAL.item = &ast.TableOptimizerHint{HintName: model.NewCIStr(yyS[yypt-4].ident), QBName: yyS[yypt-2].item.(model.CIStr), QueryType: model.NewCIStr(yyS[yypt-1].item.(string))} + } + case 911: + { + parser.yyVAL.item = &ast.TableOptimizerHint{HintName: model.NewCIStr(yyS[yypt-4].ident), QBName: yyS[yypt-2].item.(model.CIStr), MemoryQuota: yyS[yypt-1].item.(int64)} + } + case 912: + { + parser.yyVAL.item = &ast.TableOptimizerHint{HintName: model.NewCIStr(yyS[yypt-3].ident), QBName: yyS[yypt-1].item.(model.CIStr)} + } + case 913: + { + parser.yyVAL.item = &ast.TableOptimizerHint{HintName: model.NewCIStr(yyS[yypt-3].ident), QBName: yyS[yypt-1].item.(model.CIStr)} + } + case 914: + { + parser.yyVAL.item = &ast.TableOptimizerHint{HintName: model.NewCIStr(yyS[yypt-3].ident), QBName: yyS[yypt-1].item.(model.CIStr)} + } + case 915: + { + parser.yyVAL.item = &ast.TableOptimizerHint{HintName: model.NewCIStr(yyS[yypt-3].ident), QBName: yyS[yypt-1].item.(model.CIStr)} + } + case 916: + { + parser.yyVAL.item = &ast.TableOptimizerHint{HintName: model.NewCIStr(yyS[yypt-3].ident), QBName: yyS[yypt-1].item.(model.CIStr)} + } + case 917: + { + parser.yyVAL.item = &ast.TableOptimizerHint{HintName: model.NewCIStr(yyS[yypt-3].ident), QBName: model.NewCIStr(yyS[yypt-1].ident)} + } + case 918: + { + parser.yyVAL.item = yyS[yypt-1].item.([]*ast.TableOptimizerHint) + for _, hint := range parser.yyVAL.item.([]*ast.TableOptimizerHint) { + hint.HintName = model.NewCIStr(yyS[yypt-4].ident) + hint.QBName = yyS[yypt-2].item.(model.CIStr) + } + } + case 919: + { + parser.yyVAL.item = []*ast.TableOptimizerHint{yyS[yypt-0].item.(*ast.TableOptimizerHint)} + } + case 920: + { + parser.yyVAL.item = append(yyS[yypt-2].item.([]*ast.TableOptimizerHint), yyS[yypt-0].item.(*ast.TableOptimizerHint)) + } + case 921: + { + parser.yyVAL.item = &ast.TableOptimizerHint{ + StoreType: model.NewCIStr(yyS[yypt-3].item.(string)), + Tables: yyS[yypt-1].item.([]ast.HintTable), + } + } + case 922: + { + parser.yyVAL.item = model.NewCIStr("") + } + case 923: + { + parser.yyVAL.item = model.NewCIStr(yyS[yypt-0].ident) + } + case 924: + { + parser.yyVAL.item = ast.HintTable{TableName: model.NewCIStr(yyS[yypt-1].ident), QBName: yyS[yypt-0].item.(model.CIStr)} + } + case 925: + { + parser.yyVAL.item = ast.HintTable{DBName: model.NewCIStr(yyS[yypt-3].ident), TableName: model.NewCIStr(yyS[yypt-1].ident), QBName: yyS[yypt-0].item.(model.CIStr)} + } + case 926: + { + parser.yyVAL.item = []ast.HintTable{yyS[yypt-0].item.(ast.HintTable)} + } + case 927: + { + parser.yyVAL.item = append(yyS[yypt-2].item.([]ast.HintTable), yyS[yypt-0].item.(ast.HintTable)) + } + case 928: + { + parser.yyVAL.item = true + } + case 929: + { + parser.yyVAL.item = false + } + case 930: + { + parser.yyVAL.item = yyS[yypt-0].ident + } + case 931: + { + parser.yyVAL.item = yyS[yypt-0].ident + } + case 932: + { + parser.yyVAL.item = yyS[yypt-0].ident + } + case 933: + { + parser.yyVAL.item = yyS[yypt-0].ident + } + case 934: + { + switch model.NewCIStr(yyS[yypt-0].ident).L { + case "mb": + parser.yyVAL.item = yyS[yypt-1].item.(int64) * 1024 * 1024 + case "gb": + parser.yyVAL.item = yyS[yypt-1].item.(int64) * 1024 * 1024 * 1024 + default: + // Executor handle memory quota < 0 as no memory limit, here use it to trigger warning in TiDB. + parser.yyVAL.item = int64(-1) + } + } + case 935: + { + parser.yyVAL.item = false + } + case 936: + { + parser.yyVAL.item = true + } + case 937: + { + parser.yyVAL.item = false + } + case 938: + { + parser.yyVAL.item = true + } + case 939: + { + parser.yyVAL.item = false + } + case 940: + { + parser.yyVAL.item = true + } + case 941: + { + parser.yyVAL.item = true + } + case 942: + { + parser.yyVAL.item = true + } + case 943: + { + parser.yyVAL.item = false + } + case 944: + { + parser.yyVAL.item = false + } + case 945: + { + parser.yyVAL.item = true + } + case 946: + { + parser.yyVAL.item = false + } + case 947: + { + parser.yyVAL.item = true + } + case 948: + { + parser.yyVAL.item = &ast.FieldList{Fields: yyS[yypt-0].item.([]*ast.SelectField)} + } + case 949: + { + parser.yyVAL.item = nil + } + case 951: + { + parser.yyVAL.statement = &ast.SetStmt{Variables: yyS[yypt-0].item.([]*ast.VariableAssignment)} + } + case 952: + { + parser.yyVAL.expr = ast.NewValueExpr("ON") + } + case 957: + { + parser.yyVAL.ident = yyS[yypt-2].ident + "." + yyS[yypt-0].ident + } + case 958: + { + parser.yyVAL.item = &ast.VariableAssignment{Name: yyS[yypt-2].ident, Value: yyS[yypt-0].expr, IsSystem: true} + } + case 959: + { + parser.yyVAL.item = &ast.VariableAssignment{Name: yyS[yypt-2].ident, Value: yyS[yypt-0].expr, IsGlobal: true, IsSystem: true} + } + case 960: + { + parser.yyVAL.item = &ast.VariableAssignment{Name: yyS[yypt-2].ident, Value: yyS[yypt-0].expr, IsSystem: true} + } + case 961: + { + parser.yyVAL.item = &ast.VariableAssignment{Name: yyS[yypt-2].ident, Value: yyS[yypt-0].expr, IsSystem: true} + } + case 962: + { + v := strings.ToLower(yyS[yypt-2].ident) + var isGlobal bool + if strings.HasPrefix(v, "@@global.") { + isGlobal = true + v = strings.TrimPrefix(v, "@@global.") + } else if strings.HasPrefix(v, "@@session.") { + v = strings.TrimPrefix(v, "@@session.") + } else if strings.HasPrefix(v, "@@local.") { + v = strings.TrimPrefix(v, "@@local.") + } else if strings.HasPrefix(v, "@@") { + v = strings.TrimPrefix(v, "@@") + } + parser.yyVAL.item = &ast.VariableAssignment{Name: v, Value: yyS[yypt-0].expr, IsGlobal: isGlobal, IsSystem: true} + } + case 963: + { + v := yyS[yypt-2].ident + v = strings.TrimPrefix(v, "@") + parser.yyVAL.item = &ast.VariableAssignment{Name: v, Value: yyS[yypt-0].expr} + } + case 964: + { + parser.yyVAL.expr = ast.NewValueExpr(yyS[yypt-0].item.(string)) + } + case 965: + { + parser.yyVAL.expr = &ast.DefaultExpr{} + } + case 966: + { + // Validate input charset name to keep the same behavior as parser of MySQL. + name, _, err := charset.GetCharsetInfo(yyS[yypt-0].item.(string)) + if err != nil { + yylex.AppendError(ErrUnknownCharacterSet.GenWithStackByArgs(yyS[yypt-0].item)) + return 1 + } + // Use charset name returned from charset.GetCharsetInfo(), + // to keep lower case of input for generated column restore. + parser.yyVAL.item = name + } + case 967: + { + parser.yyVAL.item = charset.CharsetBin + } + case 968: + { + info, err := charset.GetCollationByName(yyS[yypt-0].item.(string)) + if err != nil { + yylex.AppendError(err) + return 1 + } + parser.yyVAL.item = info.Name + } + case 969: + { + parser.yyVAL.item = []*ast.VariableAssignment{} + } + case 970: + { + parser.yyVAL.item = []*ast.VariableAssignment{yyS[yypt-0].item.(*ast.VariableAssignment)} + } + case 971: + { + parser.yyVAL.item = append(yyS[yypt-2].item.([]*ast.VariableAssignment), yyS[yypt-0].item.(*ast.VariableAssignment)) + } + case 974: + { + v := strings.ToLower(yyS[yypt-0].ident) + var isGlobal bool + explicitScope := true + if strings.HasPrefix(v, "@@global.") { + isGlobal = true + v = strings.TrimPrefix(v, "@@global.") + } else if strings.HasPrefix(v, "@@session.") { + v = strings.TrimPrefix(v, "@@session.") + } else if strings.HasPrefix(v, "@@local.") { + v = strings.TrimPrefix(v, "@@local.") + } else if strings.HasPrefix(v, "@@") { + v, explicitScope = strings.TrimPrefix(v, "@@"), false + } + parser.yyVAL.expr = &ast.VariableExpr{Name: v, IsGlobal: isGlobal, IsSystem: true, ExplicitScope: explicitScope} + } + case 975: + { + v := yyS[yypt-0].ident + v = strings.TrimPrefix(v, "@") + parser.yyVAL.expr = &ast.VariableExpr{Name: v, IsGlobal: false, IsSystem: false} + } + case 976: + { + parser.yyVAL.statement = &ast.AdminStmt{Tp: ast.AdminShowDDL} + } + case 977: + { + stmt := &ast.AdminStmt{Tp: ast.AdminShowDDLJobs} + if yyS[yypt-0].item != nil { + stmt.Where = yyS[yypt-0].item.(ast.ExprNode) + } + parser.yyVAL.statement = stmt + } + case 978: + { + stmt := &ast.AdminStmt{ + Tp: ast.AdminShowDDLJobs, + JobNumber: yyS[yypt-1].item.(int64), + } + if yyS[yypt-0].item != nil { + stmt.Where = yyS[yypt-0].item.(ast.ExprNode) + } + parser.yyVAL.statement = stmt + } + case 979: + { + stmt := yyS[yypt-1].item.(*ast.ShowStmt) + if yyS[yypt-0].item != nil { + stmt.Where = yyS[yypt-0].item.(ast.ExprNode) + } + parser.yyVAL.statement = stmt + } + case 980: + { + parser.yyVAL.statement = &ast.ShowStmt{ + Tp: ast.ShowCreateTable, + Table: yyS[yypt-0].item.(*ast.TableName), + } + } + case 981: + { + parser.yyVAL.statement = &ast.ShowStmt{ + Tp: ast.ShowCreateDatabase, + IfNotExists: yyS[yypt-1].item.(bool), + DBName: yyS[yypt-0].item.(string), + } + } + case 982: + { + parser.yyVAL.statement = &ast.ShowStmt{ + Tp: ast.ShowProcessList, + Full: yyS[yypt-1].item.(bool), + } + } + case 988: + { + parser.yyVAL.item = &ast.ShowStmt{Tp: ast.ShowDatabases} + } + case 989: + { + parser.yyVAL.item = &ast.ShowStmt{ + Tp: ast.ShowTables, + DBName: yyS[yypt-0].item.(string), + Full: yyS[yypt-2].item.(bool), + } + } + case 990: + { + parser.yyVAL.item = &ast.ShowStmt{Tp: ast.ShowWarnings} + } + case 991: + { + parser.yyVAL.item = &ast.ShowStmt{Tp: ast.ShowErrors} + } + case 992: + { + parser.yyVAL.item = &ast.ShowStmt{ + Tp: ast.ShowVariables, + GlobalScope: yyS[yypt-1].item.(bool), + } + } + case 993: + { + parser.yyVAL.item = nil + } + case 994: + { + parser.yyVAL.item = yyS[yypt-0].expr + } + case 995: + { + parser.yyVAL.item = false + } + case 996: + { + parser.yyVAL.item = true + } + case 997: + { + parser.yyVAL.item = false + } + case 998: + { + parser.yyVAL.item = false + } + case 999: + { + parser.yyVAL.item = true + } + case 1000: + { + parser.yyVAL.item = "" + } + case 1001: + { + parser.yyVAL.item = yyS[yypt-0].item.(string) + } + case 1002: + { + parser.yyVAL.item = yyS[yypt-0].item.(*ast.TableName) + } + case 1003: + { + parser.yyVAL.item = []*ast.TableName{} + } + case 1004: + { + parser.yyVAL.item = yyS[yypt-0].item + } + case 1031: + { + if yyS[yypt-0].statement != nil { + s := yyS[yypt-0].statement + if lexer, ok := yylex.(stmtTexter); ok { + s.SetText(lexer.stmtText()) + } + parser.result = append(parser.result, s) + } + } + case 1032: + { + if yyS[yypt-0].statement != nil { + s := yyS[yypt-0].statement + if lexer, ok := yylex.(stmtTexter); ok { + s.SetText(lexer.stmtText()) + } + parser.result = append(parser.result, s) + } + } + case 1033: + { + cst := yyS[yypt-0].item.(*ast.Constraint) + if yyS[yypt-1].item != nil { + cst.Name = yyS[yypt-1].item.(string) + } + parser.yyVAL.item = cst + } + case 1034: + { + parser.yyVAL.item = yyS[yypt-0].item.(*ast.ColumnDef) + } + case 1035: + { + parser.yyVAL.item = yyS[yypt-0].item.(*ast.Constraint) + } + case 1036: + { + if yyS[yypt-0].item != nil { + parser.yyVAL.item = []interface{}{yyS[yypt-0].item.(interface{})} + } else { + parser.yyVAL.item = []interface{}{} + } + } + case 1037: + { + if yyS[yypt-0].item != nil { + parser.yyVAL.item = append(yyS[yypt-2].item.([]interface{}), yyS[yypt-0].item) + } else { + parser.yyVAL.item = yyS[yypt-2].item + } + } + case 1038: + { + var columnDefs []*ast.ColumnDef + var constraints []*ast.Constraint + parser.yyVAL.item = &ast.CreateTableStmt{ + Cols: columnDefs, + Constraints: constraints, + } + } + case 1039: + { + tes := yyS[yypt-1].item.([]interface{}) + var columnDefs []*ast.ColumnDef + var constraints []*ast.Constraint + for _, te := range tes { + switch te := te.(type) { + case *ast.ColumnDef: + columnDefs = append(columnDefs, te) + case *ast.Constraint: + constraints = append(constraints, te) + } + } + parser.yyVAL.item = &ast.CreateTableStmt{ + Cols: columnDefs, + Constraints: constraints, + } + } + case 1042: + { + parser.yyVAL.statement = &ast.TruncateTableStmt{Table: yyS[yypt-0].item.(*ast.TableName)} + } + case 1043: + { + parser.yyVAL.item = yyS[yypt-0].item + } + case 1044: + { + parser.yyVAL.item = yyS[yypt-0].item + } + case 1045: + { + parser.yyVAL.item = yyS[yypt-0].item + } + case 1046: + { + // TODO: check flen 0 + x := types.NewFieldType(yyS[yypt-2].item.(byte)) + x.Flen = yyS[yypt-1].item.(int) + for _, o := range yyS[yypt-0].item.([]*ast.TypeOpt) { + if o.IsUnsigned { + x.Flag |= mysql.UnsignedFlag + } + if o.IsZerofill { + x.Flag |= mysql.ZerofillFlag + } + } + parser.yyVAL.item = x + } + case 1047: + { + // TODO: check flen 0 + x := types.NewFieldType(yyS[yypt-1].item.(byte)) + x.Flen = 1 + for _, o := range yyS[yypt-0].item.([]*ast.TypeOpt) { + if o.IsUnsigned { + x.Flag |= mysql.UnsignedFlag + } + if o.IsZerofill { + x.Flag |= mysql.ZerofillFlag + } + } + parser.yyVAL.item = x + } + case 1048: + { + fopt := yyS[yypt-1].item.(*ast.FloatOpt) + x := types.NewFieldType(yyS[yypt-2].item.(byte)) + x.Flen = fopt.Flen + x.Decimal = fopt.Decimal + for _, o := range yyS[yypt-0].item.([]*ast.TypeOpt) { + if o.IsUnsigned { + x.Flag |= mysql.UnsignedFlag + } + if o.IsZerofill { + x.Flag |= mysql.ZerofillFlag + } + } + parser.yyVAL.item = x + } + case 1049: + { + fopt := yyS[yypt-1].item.(*ast.FloatOpt) + x := types.NewFieldType(yyS[yypt-2].item.(byte)) + x.Flen = fopt.Flen + if x.Tp == mysql.TypeFloat && fopt.Decimal == types.UnspecifiedLength && x.Flen <= mysql.MaxDoublePrecisionLength { + if x.Flen > mysql.MaxFloatPrecisionLength { + x.Tp = mysql.TypeDouble + } + x.Flen = types.UnspecifiedLength + } + x.Decimal = fopt.Decimal + for _, o := range yyS[yypt-0].item.([]*ast.TypeOpt) { + if o.IsUnsigned { + x.Flag |= mysql.UnsignedFlag + } + if o.IsZerofill { + x.Flag |= mysql.ZerofillFlag + } + } + parser.yyVAL.item = x + } + case 1050: + { + x := types.NewFieldType(yyS[yypt-1].item.(byte)) + x.Flen = yyS[yypt-0].item.(int) + if x.Flen == types.UnspecifiedLength { + x.Flen = 1 + } + parser.yyVAL.item = x + } + case 1051: + { + parser.yyVAL.item = mysql.TypeTiny + } + case 1052: + { + parser.yyVAL.item = mysql.TypeShort + } + case 1053: + { + parser.yyVAL.item = mysql.TypeInt24 + } + case 1054: + { + parser.yyVAL.item = mysql.TypeLong + } + case 1055: + { + parser.yyVAL.item = mysql.TypeTiny + } + case 1056: + { + parser.yyVAL.item = mysql.TypeShort + } + case 1057: + { + parser.yyVAL.item = mysql.TypeInt24 + } + case 1058: + { + parser.yyVAL.item = mysql.TypeLong + } + case 1059: + { + parser.yyVAL.item = mysql.TypeLonglong + } + case 1060: + { + parser.yyVAL.item = mysql.TypeLong + } + case 1061: + { + parser.yyVAL.item = mysql.TypeLonglong + } + case 1062: + { + parser.yyVAL.item = mysql.TypeTiny + } + case 1063: + { + parser.yyVAL.item = mysql.TypeTiny + } + case 1067: + { + parser.yyVAL.item = mysql.TypeNewDecimal + } + case 1068: + { + parser.yyVAL.item = mysql.TypeNewDecimal + } + case 1069: + { + parser.yyVAL.item = mysql.TypeNewDecimal + } + case 1070: + { + parser.yyVAL.item = mysql.TypeFloat + } + case 1071: + { + if parser.lexer.GetSQLMode().HasRealAsFloatMode() { + parser.yyVAL.item = mysql.TypeFloat + } else { + parser.yyVAL.item = mysql.TypeDouble + } + } + case 1072: + { + parser.yyVAL.item = mysql.TypeDouble + } + case 1073: + { + parser.yyVAL.item = mysql.TypeDouble + } + case 1074: + { + parser.yyVAL.item = mysql.TypeBit + } + case 1075: + { + x := types.NewFieldType(mysql.TypeString) + x.Flen = yyS[yypt-1].item.(int) + x.Charset = yyS[yypt-0].item.(*ast.OptBinary).Charset + if yyS[yypt-0].item.(*ast.OptBinary).IsBinary { + x.Flag |= mysql.BinaryFlag + } + parser.yyVAL.item = x + } + case 1076: + { + x := types.NewFieldType(mysql.TypeString) + x.Charset = yyS[yypt-0].item.(*ast.OptBinary).Charset + if yyS[yypt-0].item.(*ast.OptBinary).IsBinary { + x.Flag |= mysql.BinaryFlag + } + parser.yyVAL.item = x + } + case 1077: + { + x := types.NewFieldType(mysql.TypeString) + x.Flen = yyS[yypt-1].item.(int) + x.Charset = yyS[yypt-0].item.(*ast.OptBinary).Charset + if yyS[yypt-0].item.(*ast.OptBinary).IsBinary { + x.Flag |= mysql.BinaryFlag + } + parser.yyVAL.item = x + } + case 1078: + { + x := types.NewFieldType(mysql.TypeString) + x.Charset = yyS[yypt-0].item.(*ast.OptBinary).Charset + if yyS[yypt-0].item.(*ast.OptBinary).IsBinary { + x.Flag |= mysql.BinaryFlag + } + parser.yyVAL.item = x + } + case 1079: + { + x := types.NewFieldType(mysql.TypeVarchar) + x.Flen = yyS[yypt-1].item.(int) + x.Charset = yyS[yypt-0].item.(*ast.OptBinary).Charset + if yyS[yypt-0].item.(*ast.OptBinary).IsBinary { + x.Flag |= mysql.BinaryFlag + } + parser.yyVAL.item = x + } + case 1080: + { + x := types.NewFieldType(mysql.TypeVarchar) + x.Flen = yyS[yypt-1].item.(int) + x.Charset = yyS[yypt-0].item.(*ast.OptBinary).Charset + if yyS[yypt-0].item.(*ast.OptBinary).IsBinary { + x.Flag |= mysql.BinaryFlag + } + parser.yyVAL.item = x + } + case 1081: + { + x := types.NewFieldType(mysql.TypeString) + x.Flen = yyS[yypt-0].item.(int) + x.Charset = charset.CharsetBin + x.Collate = charset.CharsetBin + x.Flag |= mysql.BinaryFlag + parser.yyVAL.item = x + } + case 1082: + { + x := types.NewFieldType(mysql.TypeVarchar) + x.Flen = yyS[yypt-0].item.(int) + x.Charset = charset.CharsetBin + x.Collate = charset.CharsetBin + x.Flag |= mysql.BinaryFlag + parser.yyVAL.item = x + } + case 1083: + { + x := yyS[yypt-0].item.(*types.FieldType) + x.Charset = charset.CharsetBin + x.Collate = charset.CharsetBin + x.Flag |= mysql.BinaryFlag + parser.yyVAL.item = yyS[yypt-0].item.(*types.FieldType) + } + case 1084: + { + x := yyS[yypt-1].item.(*types.FieldType) + x.Charset = yyS[yypt-0].item.(*ast.OptBinary).Charset + if yyS[yypt-0].item.(*ast.OptBinary).IsBinary { + x.Flag |= mysql.BinaryFlag + } + parser.yyVAL.item = x + } + case 1085: + { + x := types.NewFieldType(mysql.TypeEnum) + x.Elems = yyS[yypt-2].item.([]string) + x.Charset = yyS[yypt-0].item.(string) + parser.yyVAL.item = x + } + case 1086: + { + x := types.NewFieldType(mysql.TypeSet) + x.Elems = yyS[yypt-2].item.([]string) + x.Charset = yyS[yypt-0].item.(string) + parser.yyVAL.item = x + } + case 1087: + { + x := types.NewFieldType(mysql.TypeJSON) + x.Decimal = 0 + x.Charset = charset.CharsetBin + x.Collate = charset.CollationBin + parser.yyVAL.item = x + } + case 1088: + { + x := types.NewFieldType(mysql.TypeMediumBlob) + x.Charset = yyS[yypt-0].item.(*ast.OptBinary).Charset + if yyS[yypt-0].item.(*ast.OptBinary).IsBinary { + x.Flag |= mysql.BinaryFlag + } + parser.yyVAL.item = x + } + case 1089: + { + x := types.NewFieldType(mysql.TypeMediumBlob) + x.Charset = yyS[yypt-0].item.(*ast.OptBinary).Charset + if yyS[yypt-0].item.(*ast.OptBinary).IsBinary { + x.Flag |= mysql.BinaryFlag + } + parser.yyVAL.item = x + } + case 1109: + { + x := types.NewFieldType(mysql.TypeTinyBlob) + parser.yyVAL.item = x + } + case 1110: + { + x := types.NewFieldType(mysql.TypeBlob) + x.Flen = yyS[yypt-0].item.(int) + parser.yyVAL.item = x + } + case 1111: + { + x := types.NewFieldType(mysql.TypeMediumBlob) + parser.yyVAL.item = x + } + case 1112: + { + x := types.NewFieldType(mysql.TypeLongBlob) + parser.yyVAL.item = x + } + case 1113: + { + x := types.NewFieldType(mysql.TypeMediumBlob) + parser.yyVAL.item = x + } + case 1114: + { + x := types.NewFieldType(mysql.TypeTinyBlob) + parser.yyVAL.item = x + + } + case 1115: + { + x := types.NewFieldType(mysql.TypeBlob) + x.Flen = yyS[yypt-0].item.(int) + parser.yyVAL.item = x + } + case 1116: + { + x := types.NewFieldType(mysql.TypeMediumBlob) + parser.yyVAL.item = x + } + case 1117: + { + x := types.NewFieldType(mysql.TypeLongBlob) + parser.yyVAL.item = x + } + case 1118: + { + parser.yyVAL.item = yyS[yypt-0].item + } + case 1119: + { + parser.yyVAL.item = &ast.OptBinary{ + IsBinary: false, + Charset: charset.CharsetLatin1, + } + } + case 1120: + { + name, _, err := charset.GetCharsetInfo("ucs2") + if err != nil { + yylex.AppendError(ErrUnknownCharacterSet.GenWithStackByArgs("ucs2")) + return 1 + } + parser.yyVAL.item = &ast.OptBinary{ + IsBinary: false, + Charset: name, + } + } + case 1121: + { + parser.yyVAL.item = &ast.OptBinary{ + IsBinary: false, + Charset: "", + } + } + case 1122: + { + x := types.NewFieldType(mysql.TypeDate) + parser.yyVAL.item = x + } + case 1123: + { + x := types.NewFieldType(mysql.TypeDatetime) + x.Flen = mysql.MaxDatetimeWidthNoFsp + x.Decimal = yyS[yypt-0].item.(int) + if x.Decimal > 0 { + x.Flen = x.Flen + 1 + x.Decimal + } + parser.yyVAL.item = x + } + case 1124: + { + x := types.NewFieldType(mysql.TypeTimestamp) + x.Flen = mysql.MaxDatetimeWidthNoFsp + x.Decimal = yyS[yypt-0].item.(int) + if x.Decimal > 0 { + x.Flen = x.Flen + 1 + x.Decimal + } + parser.yyVAL.item = x + } + case 1125: + { + x := types.NewFieldType(mysql.TypeDuration) + x.Flen = mysql.MaxDurationWidthNoFsp + x.Decimal = yyS[yypt-0].item.(int) + if x.Decimal > 0 { + x.Flen = x.Flen + 1 + x.Decimal + } + parser.yyVAL.item = x + } + case 1126: + { + x := types.NewFieldType(mysql.TypeYear) + x.Flen = yyS[yypt-1].item.(int) + if x.Flen != types.UnspecifiedLength && x.Flen != 4 { + yylex.AppendError(ErrInvalidYearColumnLength.GenWithStackByArgs()) + return -1 + } + parser.yyVAL.item = x + } + case 1127: + { + parser.yyVAL.item = int(yyS[yypt-1].item.(uint64)) + } + case 1128: + { + parser.yyVAL.item = types.UnspecifiedLength + } + case 1129: + { + parser.yyVAL.item = yyS[yypt-0].item.(int) + } + case 1130: + { + parser.yyVAL.item = &ast.TypeOpt{IsUnsigned: true} + } + case 1131: + { + parser.yyVAL.item = &ast.TypeOpt{IsUnsigned: false} + } + case 1132: + { + parser.yyVAL.item = &ast.TypeOpt{IsZerofill: true, IsUnsigned: true} + } + case 1133: + { + parser.yyVAL.item = []*ast.TypeOpt{} + } + case 1134: + { + parser.yyVAL.item = append(yyS[yypt-1].item.([]*ast.TypeOpt), yyS[yypt-0].item.(*ast.TypeOpt)) + } + case 1135: + { + parser.yyVAL.item = &ast.FloatOpt{Flen: types.UnspecifiedLength, Decimal: types.UnspecifiedLength} + } + case 1136: + { + parser.yyVAL.item = &ast.FloatOpt{Flen: yyS[yypt-0].item.(int), Decimal: types.UnspecifiedLength} + } + case 1137: + { + parser.yyVAL.item = yyS[yypt-0].item.(*ast.FloatOpt) + } + case 1138: + { + parser.yyVAL.item = &ast.FloatOpt{Flen: int(yyS[yypt-3].item.(uint64)), Decimal: int(yyS[yypt-1].item.(uint64))} + } + case 1139: + { + parser.yyVAL.item = false + } + case 1140: + { + parser.yyVAL.item = true + } + case 1141: + { + parser.yyVAL.item = &ast.OptBinary{ + IsBinary: false, + Charset: "", + } + } + case 1142: + { + parser.yyVAL.item = &ast.OptBinary{ + IsBinary: true, + Charset: yyS[yypt-0].item.(string), + } + } + case 1143: + { + parser.yyVAL.item = &ast.OptBinary{ + IsBinary: yyS[yypt-0].item.(bool), + Charset: yyS[yypt-1].item.(string), + } + } + case 1144: + { + parser.yyVAL.item = "" + } + case 1145: + { + parser.yyVAL.item = yyS[yypt-0].item.(string) + } + case 1149: + { + parser.yyVAL.item = "" + } + case 1150: + { + parser.yyVAL.item = yyS[yypt-0].item.(string) + } + case 1151: + { + parser.yyVAL.item = []string{yyS[yypt-0].ident} + } + case 1152: + { + parser.yyVAL.item = append(yyS[yypt-2].item.([]string), yyS[yypt-0].ident) + } + case 1153: + { + parser.yyVAL.item = yyS[yypt-0].ident + } + case 1154: + { + parser.yyVAL.item = yyS[yypt-0].ident + } + case 1155: + { + parser.yyVAL.statement = &ast.UseStmt{DBName: yyS[yypt-0].item.(string)} + } + case 1156: + { + parser.yyVAL.item = yyS[yypt-0].expr + } + case 1157: + { + parser.yyVAL.item = nil + } + case 1158: + { + parser.yyVAL.item = yyS[yypt-0].item + } + + } + + if yyEx != nil && yyEx.Reduced(r, exState, parser.yyVAL) { + return -1 + } + goto yystack /* stack new state and value */ +} diff --git a/parser/parser.y b/parser/parser.y new file mode 100644 index 0000000..3f66c71 --- /dev/null +++ b/parser/parser.y @@ -0,0 +1,5299 @@ +%{ +// Copyright 2013 The ql Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSES/QL-LICENSE file. + +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +// Initial yacc source generated by ebnf2y[1] +// at 2013-10-04 23:10:47.861401015 +0200 CEST +// +// $ ebnf2y -o ql.y -oe ql.ebnf -start StatementList -pkg ql -p _ +// +// [1]: http://github.com/cznic/ebnf2y + +package parser + +import ( + "strings" + + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/opcode" + "github.com/pingcap/tidb/parser/charset" + "github.com/pingcap/tidb/parser/types" +) + +%} + +%union { + offset int // offset + item interface{} + ident string + expr ast.ExprNode + statement ast.StmtNode +} + +%token + /*yy:token "%c" */ identifier "identifier" + /*yy:token "_%c" */ underscoreCS "UNDERSCORE_CHARSET" + /*yy:token "\"%c\"" */ stringLit "string literal" + singleAtIdentifier "identifier with single leading at" + doubleAtIdentifier "identifier with double leading at" + invalid "a special token never used by parser, used by lexer to indicate error" + hintBegin "hintBegin is a virtual token for optimizer hint grammar" + hintEnd "hintEnd is a virtual token for optimizer hint grammar" + andand "&&" + pipes "||" + + /* The following tokens belong to ODBCDateTimeType. */ + odbcDateType "d" + odbcTimeType "t" + odbcTimestampType "ts" + + /* The following tokens belong to ReservedKeyword. Notice: make sure these tokens are contained in ReservedKeyword. */ + add "ADD" + all "ALL" + alter "ALTER" + analyze "ANALYZE" + and "AND" + as "AS" + asc "ASC" + between "BETWEEN" + bigIntType "BIGINT" + binaryType "BINARY" + blobType "BLOB" + both "BOTH" + by "BY" + cascade "CASCADE" + caseKwd "CASE" + change "CHANGE" + character "CHARACTER" + charType "CHAR" + check "CHECK" + collate "COLLATE" + column "COLUMN" + constraint "CONSTRAINT" + convert "CONVERT" + create "CREATE" + cross "CROSS" + currentDate "CURRENT_DATE" + currentTime "CURRENT_TIME" + currentTs "CURRENT_TIMESTAMP" + currentUser "CURRENT_USER" + currentRole "CURRENT_ROLE" + database "DATABASE" + databases "DATABASES" + dayHour "DAY_HOUR" + dayMicrosecond "DAY_MICROSECOND" + dayMinute "DAY_MINUTE" + daySecond "DAY_SECOND" + decimalType "DECIMAL" + defaultKwd "DEFAULT" + delayed "DELAYED" + deleteKwd "DELETE" + desc "DESC" + describe "DESCRIBE" + distinct "DISTINCT" + distinctRow "DISTINCTROW" + div "DIV" + doubleType "DOUBLE" + drop "DROP" + dual "DUAL" + elseKwd "ELSE" + enclosed "ENCLOSED" + escaped "ESCAPED" + exists "EXISTS" + explain "EXPLAIN" + except "EXCEPT" + falseKwd "FALSE" + floatType "FLOAT" + forKwd "FOR" + force "FORCE" + foreign "FOREIGN" + from "FROM" + fulltext "FULLTEXT" + generated "GENERATED" + grant "GRANT" + group "GROUP" + having "HAVING" + highPriority "HIGH_PRIORITY" + hourMicrosecond "HOUR_MICROSECOND" + hourMinute "HOUR_MINUTE" + hourSecond "HOUR_SECOND" + ifKwd "IF" + ignore "IGNORE" + in "IN" + index "INDEX" + infile "INFILE" + inner "INNER" + integerType "INTEGER" + interval "INTERVAL" + into "INTO" + is "IS" + insert "INSERT" + intType "INT" + int1Type "INT1" + int2Type "INT2" + int3Type "INT3" + int4Type "INT4" + int8Type "INT8" + join "JOIN" + key "KEY" + keys "KEYS" + kill "KILL" + language "LANGUAGE" + leading "LEADING" + left "LEFT" + like "LIKE" + limit "LIMIT" + lines "LINES" + linear "LINEAR" + load "LOAD" + localTime "LOCALTIME" + localTs "LOCALTIMESTAMP" + lock "LOCK" + longblobType "LONGBLOB" + longtextType "LONGTEXT" + lowPriority "LOW_PRIORITY" + match "MATCH" + maxValue "MAXVALUE" + mediumblobType "MEDIUMBLOB" + mediumIntType "MEDIUMINT" + mediumtextType "MEDIUMTEXT" + minuteMicrosecond "MINUTE_MICROSECOND" + minuteSecond "MINUTE_SECOND" + mod "MOD" + not "NOT" + noWriteToBinLog "NO_WRITE_TO_BINLOG" + null "NULL" + numericType "NUMERIC" + nvarcharType "NVARCHAR" + on "ON" + optimize "OPTIMIZE" + option "OPTION" + optionally "OPTIONALLY" + or "OR" + order "ORDER" + outer "OUTER" + packKeys "PACK_KEYS" + partition "PARTITION" + parser "PARSER" + precisionType "PRECISION" + primary "PRIMARY" + procedure "PROCEDURE" + shardRowIDBits "SHARD_ROW_ID_BITS" + preSplitRegions "PRE_SPLIT_REGIONS" + rangeKwd "RANGE" + read "READ" + realType "REAL" + references "REFERENCES" + regexpKwd "REGEXP" + rename "RENAME" + repeat "REPEAT" + replace "REPLACE" + require "REQUIRE" + restrict "RESTRICT" + revoke "REVOKE" + right "RIGHT" + rlike "RLIKE" + row "ROW" + secondMicrosecond "SECOND_MICROSECOND" + selectKwd "SELECT" + set "SET" + show "SHOW" + smallIntType "SMALLINT" + spatial "SPATIAL" + sql "SQL" + sqlBigResult "SQL_BIG_RESULT" + sqlCalcFoundRows "SQL_CALC_FOUND_ROWS" + sqlSmallResult "SQL_SMALL_RESULT" + ssl "SSL" + starting "STARTING" + straightJoin "STRAIGHT_JOIN" + tableKwd "TABLE" + stored "STORED" + terminated "TERMINATED" + then "THEN" + tinyblobType "TINYBLOB" + tinyIntType "TINYINT" + tinytextType "TINYTEXT" + to "TO" + trailing "TRAILING" + trigger "TRIGGER" + trueKwd "TRUE" + unique "UNIQUE" + union "UNION" + unlock "UNLOCK" + unsigned "UNSIGNED" + until "UNTIL" + update "UPDATE" + usage "USAGE" + use "USE" + using "USING" + utcDate "UTC_DATE" + utcTimestamp "UTC_TIMESTAMP" + utcTime "UTC_TIME" + values "VALUES" + long "LONG" + varcharType "VARCHAR" + varcharacter "VARCHARACTER" + varbinaryType "VARBINARY" + varying "VARYING" + virtual "VIRTUAL" + when "WHEN" + where "WHERE" + write "WRITE" + with "WITH" + xor "XOR" + yearMonth "YEAR_MONTH" + zerofill "ZEROFILL" + natural "NATURAL" + + /* The following tokens belong to UnReservedKeyword. Notice: make sure these tokens are contained in UnReservedKeyword. */ + account "ACCOUNT" + action "ACTION" + advise "ADVISE" + after "AFTER" + against "AGAINST" + always "ALWAYS" + algorithm "ALGORITHM" + any "ANY" + ascii "ASCII" + autoIncrement "AUTO_INCREMENT" + autoRandom "AUTO_RANDOM" + avgRowLength "AVG_ROW_LENGTH" + avg "AVG" + begin "BEGIN" + binlog "BINLOG" + bitType "BIT" + block "BLOCK" + booleanType "BOOLEAN" + boolType "BOOL" + btree "BTREE" + byteType "BYTE" + cache "CACHE" + cascaded "CASCADED" + capture "CAPTURE" + charsetKwd "CHARSET" + checksum "CHECKSUM" + cipher "CIPHER" + cleanup "CLEANUP" + client "CLIENT" + coalesce "COALESCE" + collation "COLLATION" + columnFormat "COLUMN_FORMAT" + columns "COLUMNS" + comment "COMMENT" + commit "COMMIT" + committed "COMMITTED" + compact "COMPACT" + compressed "COMPRESSED" + compression "COMPRESSION" + connection "CONNECTION" + consistent "CONSISTENT" + context "CONTEXT" + cpu "CPU" + current "CURRENT" + cycle "CYCLE" + day "DAY" + data "DATA" + dateType "DATE" + datetimeType "DATETIME" + deallocate "DEALLOCATE" + definer "DEFINER" + delayKeyWrite "DELAY_KEY_WRITE" + directory "DIRECTORY" + disable "DISABLE" + discard "DISCARD" + disk "DISK" + do "DO" + duplicate "DUPLICATE" + dynamic "DYNAMIC" + enable "ENABLE" + encryption "ENCRYPTION" + end "END" + engine "ENGINE" + engines "ENGINES" + enum "ENUM" + event "EVENT" + events "EVENTS" + evolve "EVOLVE" + escape "ESCAPE" + exchange "EXCHANGE" + exclusive "EXCLUSIVE" + execute "EXECUTE" + expansion "EXPANSION" + expire "EXPIRE" + extended "EXTENDED" + faultsSym "FAULTS" + fields "FIELDS" + first "FIRST" + fixed "FIXED" + flush "FLUSH" + following "FOLLOWING" + format "FORMAT" + full "FULL" + function "FUNCTION" + grants "GRANTS" + hash "HASH" + history "HISTORY" + hosts "HOSTS" + hour "HOUR" + identified "IDENTIFIED" + importKwd "IMPORT" + insertMethod "INSERT_METHOD" + isolation "ISOLATION" + issuer "ISSUER" + increment "INCREMENT" + incremental "INCREMENTAL" + indexes "INDEXES" + invisible "INVISIBLE" + invoker "INVOKER" + io "IO" + ipc "IPC" + jsonType "JSON" + keyBlockSize "KEY_BLOCK_SIZE" + labels "LABELS" + last "LAST" + less "LESS" + level "LEVEL" + list "LIST" + local "LOCAL" + location "LOCATION" + logs "LOGS" + master "MASTER" + microsecond "MICROSECOND" + minute "MINUTE" + mode "MODE" + modify "MODIFY" + month "MONTH" + maxRows "MAX_ROWS" + maxConnectionsPerHour "MAX_CONNECTIONS_PER_HOUR" + maxQueriesPerHour "MAX_QUERIES_PER_HOUR" + maxUpdatesPerHour "MAX_UPDATES_PER_HOUR" + maxUserConnections "MAX_USER_CONNECTIONS" + memory "MEMORY" + merge "MERGE" + minRows "MIN_ROWS" + minValue "MINVALUE" + max_minutes "MAX_MINUTES" + max_idxnum "MAX_IDXNUM" + names "NAMES" + national "NATIONAL" + ncharType "NCHAR" + never "NEVER" + no "NO" + nocache "NOCACHE" + nocycle "NOCYCLE" + nodegroup "NODEGROUP" + nomaxvalue "NOMAXVALUE" + nominvalue "NOMINVALUE" + none "NONE" + noorder "NOORDER" + nulls "NULLS" + offset "OFFSET" + only "ONLY" + pageSym "PAGE" + password "PASSWORD" + partial "PARTIAL" + partitioning "PARTITIONING" + partitions "PARTITIONS" + pipesAsOr + plugins "PLUGINS" + preceding "PRECEDING" + prepare "PREPARE" + privileges "PRIVILEGES" + process "PROCESS" + processlist "PROCESSLIST" + profile "PROFILE" + profiles "PROFILES" + per_table "PER_TABLE" + per_db "PER_DB" + quarter "QUARTER" + query "QUERY" + queries "QUERIES" + quick "QUICK" + rebuild "REBUILD" + recover "RECOVER" + redundant "REDUNDANT" + reload "RELOAD" + remove "REMOVE" + reorganize "REORGANIZE" + repair "REPAIR" + repeatable "REPEATABLE" + respect "RESPECT" + replica "REPLICA" + replication "REPLICATION" + reverse "REVERSE" + role "ROLE" + rollback "ROLLBACK" + routine "ROUTINE" + rowCount "ROW_COUNT" + rowFormat "ROW_FORMAT" + rtree "RTREE" + second "SECOND" + secondaryEngine "SECONDARY_ENGINE" + secondaryLoad "SECONDARY_LOAD" + secondaryUnload "SECONDARY_UNLOAD" + security "SECURITY" + separator "SEPARATOR" + sequence "SEQUENCE" + serial "SERIAL" + serializable "SERIALIZABLE" + session "SESSION" + share "SHARE" + shared "SHARED" + shutdown "SHUTDOWN" + signed "SIGNED" + simple "SIMPLE" + slave "SLAVE" + slow "SLOW" + snapshot "SNAPSHOT" + sqlBufferResult "SQL_BUFFER_RESULT" + sqlCache "SQL_CACHE" + sqlNoCache "SQL_NO_CACHE" + sqlTsiDay "SQL_TSI_DAY" + sqlTsiHour "SQL_TSI_HOUR" + sqlTsiMinute "SQL_TSI_MINUTE" + sqlTsiMonth "SQL_TSI_MONTH" + sqlTsiQuarter "SQL_TSI_QUARTER" + sqlTsiSecond "SQL_TSI_SECOND" + sqlTsiWeek "SQL_TSI_WEEK" + sqlTsiYear "SQL_TSI_YEAR" + start "START" + statsAutoRecalc "STATS_AUTO_RECALC" + statsPersistent "STATS_PERSISTENT" + statsSamplePages "STATS_SAMPLE_PAGES" + status "STATUS" + storage "STORAGE" + swaps "SWAPS" + switchesSym "SWITCHES" + systemTime "SYSTEM_TIME" + open "OPEN" + source "SOURCE" + subject "SUBJECT" + subpartition "SUBPARTITION" + subpartitions "SUBPARTITIONS" + super "SUPER" + some "SOME" + global "GLOBAL" + tableChecksum "TABLE_CHECKSUM" + tables "TABLES" + tablespace "TABLESPACE" + temporary "TEMPORARY" + temptable "TEMPTABLE" + textType "TEXT" + than "THAN" + timeType "TIME" + timestampType "TIMESTAMP" + trace "TRACE" + traditional "TRADITIONAL" + transaction "TRANSACTION" + triggers "TRIGGERS" + truncate "TRUNCATE" + tp "TYPE" + unbounded "UNBOUNDED" + uncommitted "UNCOMMITTED" + unicodeSym "UNICODE" + unknown "UNKNOWN" + user "USER" + undefined "UNDEFINED" + validation "VALIDATION" + value "VALUE" + variables "VARIABLES" + view "VIEW" + visible "VISIBLE" + binding "BINDING" + bindings "BINDINGS" + warnings "WARNINGS" + without "WITHOUT" + identSQLErrors "ERRORS" + week "WEEK" + yearType "YEAR" + x509 "X509" + enforced "ENFORCED" + nowait "NOWAIT" + + /* The following tokens belong to NotKeywordToken. Notice: make sure these tokens are contained in NotKeywordToken. */ + addDate "ADDDATE" + bitAnd "BIT_AND" + bitOr "BIT_OR" + bitXor "BIT_XOR" + bound "BOUND" + cast "CAST" + copyKwd "COPY" + count "COUNT" + curTime "CURTIME" + dateAdd "DATE_ADD" + dateSub "DATE_SUB" + exact "EXACT" + extract "EXTRACT" + flashback "FLASHBACK" + getFormat "GET_FORMAT" + groupConcat "GROUP_CONCAT" + next_row_id "NEXT_ROW_ID" + inplace "INPLACE" + instant "INSTANT" + internal "INTERNAL" + min "MIN" + max "MAX" + maxExecutionTime "MAX_EXECUTION_TIME" + now "NOW" + position "POSITION" + recent "RECENT" + staleness "STALENESS" + std "STD" + stddev "STDDEV" + stddevPop "STDDEV_POP" + stddevSamp "STDDEV_SAMP" + strong "STRONG" + subDate "SUBDATE" + sum "SUM" + substring "SUBSTRING" + timestampAdd "TIMESTAMPADD" + timestampDiff "TIMESTAMPDIFF" + tokudbDefault "TOKUDB_DEFAULT" + tokudbFast "TOKUDB_FAST" + tokudbLzma "TOKUDB_LZMA" + tokudbQuickLZ "TOKUDB_QUICKLZ" + tokudbSnappy "TOKUDB_SNAPPY" + tokudbSmall "TOKUDB_SMALL" + tokudbUncompressed "TOKUDB_UNCOMPRESSED" + tokudbZlib "TOKUDB_ZLIB" + top "TOP" + trim "TRIM" + variance "VARIANCE" + varPop "VAR_POP" + varSamp "VAR_SAMP" + exprPushdownBlacklist "EXPR_PUSHDOWN_BLACKLIST" + optRuleBlacklist "OPT_RULE_BLACKLIST" + + /* The following tokens belong to TiDBKeyword. Notice: make sure these tokens are contained in TiDBKeyword. */ + admin "ADMIN" + buckets "BUCKETS" + builtins "BUILTINS" + cancel "CANCEL" + cmSketch "CMSKETCH" + ddl "DDL" + depth "DEPTH" + drainer "DRAINER" + jobs "JOBS" + job "JOB" + nodeID "NODE_ID" + nodeState "NODE_STATE" + optimistic "OPTIMISTIC" + pessimistic "PESSIMISTIC" + pump "PUMP" + samples "SAMPLES" + stats "STATS" + statsMeta "STATS_META" + statsHistograms "STATS_HISTOGRAMS" + statsBuckets "STATS_BUCKETS" + statsHealthy "STATS_HEALTHY" + tidb "TIDB" + hintAggToCop "AGG_TO_COP" + hintHJ "HASH_JOIN" + hintSMJ "SM_JOIN" + hintINLJ "INL_JOIN" + hintINLHJ "INL_HASH_JOIN" + hintINLMJ "INL_MERGE_JOIN" + hintSJI "SWAP_JOIN_INPUTS" + hintNSJI "NO_SWAP_JOIN_INPUTS" + hintHASHAGG "HASH_AGG" + hintSTREAMAGG "STREAM_AGG" + hintUseIndex "USE_INDEX" + hintIgnoreIndex "IGNORE_INDEX" + hintUseIndexMerge "USE_INDEX_MERGE" + hintNoIndexMerge "NO_INDEX_MERGE" + hintUseToja "USE_TOJA" + hintEnablePlanCache "ENABLE_PLAN_CACHE" + hintUsePlanCache "USE_PLAN_CACHE" + hintReadConsistentReplica "READ_CONSISTENT_REPLICA" + hintReadFromStorage "READ_FROM_STORAGE" + hintQBName "QB_NAME" + hintQueryType "QUERY_TYPE" + hintMemoryQuota "MEMORY_QUOTA" + hintOLAP "OLAP" + hintOLTP "OLTP" + hintTiKV "TIKV" + hintTiFlash "TIFLASH" + topn "TOPN" + split "SPLIT" + width "WIDTH" + regions "REGIONS" + region "REGION" + + builtinAddDate + builtinBitAnd + builtinBitOr + builtinBitXor + builtinCast + builtinCount + builtinCurDate + builtinCurTime + builtinDateAdd + builtinDateSub + builtinExtract + builtinGroupConcat + builtinMax + builtinMin + builtinNow + builtinPosition + builtinSubDate + builtinSubstring + builtinSum + builtinSysDate + builtinStddevPop + builtinStddevSamp + builtinTrim + builtinUser + builtinVarPop + builtinVarSamp + +%token + + /*yy:token "1.%d" */ floatLit "floating-point literal" + /*yy:token "1.%d" */ decLit "decimal literal" + /*yy:token "%d" */ intLit "integer literal" + /*yy:token "%x" */ hexLit "hexadecimal literal" + /*yy:token "%b" */ bitLit "bit literal" + + andnot "&^" + assignmentEq ":=" + eq "=" + ge ">=" + le "<=" + jss "->" + juss "->>" + lsh "<<" + neq "!=" + neqSynonym "<>" + nulleq "<=>" + rsh ">>" + +%token not2 + +%type + Expression "expression" + BoolPri "boolean primary expression" + ExprOrDefault "expression or default" + PredicateExpr "Predicate expression factor" + SetExpr "Set variable statement value's expression" + BitExpr "bit expression" + SimpleExpr "simple expression" + SimpleIdent "Simple Identifier expression" + SumExpr "aggregate functions" + FunctionCallGeneric "Function call with Identifier" + FunctionCallKeyword "Function call with keyword as function name" + FunctionCallNonKeyword "Function call with nonkeyword as function name" + Literal "literal value" + Variable "User or system variable" + SystemVariable "System defined variable name" + UserVariable "User defined variable name" + StringLiteral "text literal" + ExpressionOpt "Optional expression" + SignedLiteral "Literal or NumLiteral with sign" + DefaultValueExpr "DefaultValueExpr(Now or Signed Literal)" + NowSymOptionFraction "NowSym with optional fraction part" + CharsetNameOrDefault "Character set name or default" + +%type + AdminStmt "Check table statement or show ddl statement" + AlterTableStmt "Alter table statement" + AnalyzeTableStmt "Analyze table statement" + BeginTransactionStmt "BEGIN TRANSACTION statement" + + CommitStmt "COMMIT statement" + CreateTableStmt "CREATE TABLE statement" + CreateDatabaseStmt "Create Database Statement" + CreateIndexStmt "CREATE INDEX statement" + DropDatabaseStmt "DROP DATABASE statement" + DropIndexStmt "DROP INDEX statement" + DropTableStmt "DROP TABLE statement" + DeleteFromStmt "DELETE FROM statement" + EmptyStmt "empty statement" + ExplainStmt "EXPLAIN statement" + ExplainableStmt "explainable statement" + InsertIntoStmt "INSERT INTO statement" + SelectStmt "SELECT statement" + ReplaceIntoStmt "REPLACE INTO statement" + RollbackStmt "ROLLBACK statement" + SetStmt "Set variable statement" + ShowStmt "Show engines/databases/tables/user/columns/warnings/status statement" + Statement "statement" + TruncateTableStmt "TRUNCATE TABLE statement" + UseStmt "USE statement" + +%type + AlterTableSpec "Alter table specification" + AlterTableSpecList "Alter table specification list" + AlterTableSpecListOpt "Alter table specification list optional" + AnyOrAll "Any or All for subquery" + Assignment "assignment" + AssignmentList "assignment list" + AssignmentListOpt "assignment list opt" + OptionalBraces "optional braces" + CastType "Cast function target type" + CharsetName "Character set name" + CollationName "Collation name" + ColumnDef "table column definition" + ColumnDefList "table column definition list" + ColumnFormat "Column format" + ColumnName "column name" + ColumnNameList "column name list" + ColumnNameListOpt "column name list opt" + ColumnSetValue "insert statement set value by column name" + ColumnSetValueList "insert statement set value by column name list" + CompareOp "Compare opcode" + ColumnOption "column definition option" + ColumnOptionList "column definition option list" + VirtualOrStored "indicate generated column is stored or not" + ColumnOptionListOpt "optional column definition option list" + Constraint "table constraint" + ConstraintElem "table constraint element" + ConstraintKeywordOpt "Constraint Keyword or empty" + DatabaseOption "CREATE Database specification" + DatabaseOptionList "CREATE Database specification list" + DatabaseOptionListOpt "CREATE Database specification list opt" + DBName "Database Name" + DistinctOpt "Explicit distinct option" + DefaultFalseDistinctOpt "Distinct option which defaults to false" + DefaultTrueDistinctOpt "Distinct option which defaults to true" + EqOpt "= or empty" + EscapedTableRef "escaped table reference" + ExplainFormatType "explain format type" + ExpressionList "expression list" + ExpressionListOpt "expression list opt" + FuncDatetimePrecListOpt "Function datetime precision list opt" + FuncDatetimePrecList "Function datetime precision list" + Field "field expression" + FieldAsName "Field alias name" + FieldAsNameOpt "Field alias name opt" + FieldList "field expression list" + TableRefsClause "Table references clause" + FuncDatetimePrec "Function datetime precision" + GlobalScope "The scope of variable" + GroupByClause "GROUP BY clause" + HavingClause "HAVING clause" + IfExists "If Exists" + IfNotExists "If Not Exists" + IndexHint "index hint" + IndexHintList "index hint list" + IndexHintListOpt "index hint list opt" + IndexHintScope "index hint scope" + IndexHintType "index hint type" + IndexInvisible "index visible/invisible" + IndexKeyTypeOpt "index key type" + IndexName "index name" + IndexNameAndTypeOpt "index name and index type" + IndexNameList "index name list" + IndexOption "Index Option" + IndexOptionList "Index Option List or empty" + IndexType "index type" + IndexTypeName "index type name" + IndexTypeOpt "optional index type" + IndexPartSpecification "Index column name or expression" + IndexPartSpecificationList "List of index column name or expression" + IndexPartSpecificationListOpt "Optional list of index column name or expression" + InsertValues "Rest part of INSERT/REPLACE INTO statement" + JoinTable "join table" + JoinType "join type" + LocationLabelList "location label name list" + LikeEscapeOpt "like escape option" + LikeTableWithOrWithoutParen "LIKE table_name or ( LIKE table_name )" + LimitClause "LIMIT clause" + LimitOption "Limit option could be integer or parameter marker." + NumLiteral "Num/Int/Float/Decimal Literal" + OptFull "Full or empty" + OptTemporary "TEMPORARY or empty" + Order "ORDER BY clause optional collation specification" + OrderBy "ORDER BY clause" + ByItem "BY item" + OrderByOptional "Optional ORDER BY clause optional" + ByList "BY list" + QuickOptional "QUICK or empty" + QueryBlockOpt "Query block identifier optional" + PriorityOpt "Statement priority option" + OptGConcatSeparator "optional GROUP_CONCAT SEPARATOR" + RowValue "Row value" + SelectStmtCalcFoundRows "SELECT statement optional SQL_CALC_FOUND_ROWS" + SelectStmtSQLBigResult "SELECT statement optional SQL_BIG_RESULT" + SelectStmtSQLBufferResult "SELECT statement optional SQL_BUFFER_RESULT" + SelectStmtSQLCache "SELECT statement optional SQL_CAHCE/SQL_NO_CACHE" + SelectStmtSQLSmallResult "SELECT statement optional SQL_SMALL_RESULT" + SelectStmtStraightJoin "SELECT statement optional STRAIGHT_JOIN" + SelectStmtFieldList "SELECT statement field list" + SelectStmtLimit "SELECT statement optional LIMIT clause" + SelectStmtOpts "Select statement options" + SelectStmtBasic "SELECT statement from constant value" + SelectStmtFromDualTable "SELECT statement from dual table" + SelectStmtFromTable "SELECT statement from table" + SelectStmtGroup "SELECT statement optional GROUP BY clause" + ShowTargetFilterable "Show target that can be filtered by WHERE or LIKE" + ShowDatabaseNameOpt "Show tables/columns statement database name option" + ShowTableAliasOpt "Show table alias option" + ShowLikeOrWhereOpt "Show like or where clause option" + StatementList "statement list" + StringName "string literal or identifier" + StringList "string list" + Symbol "Constraint Symbol" + TableAliasRefList "table alias reference list" + TableAsName "table alias name" + TableAsNameOpt "table alias name optional" + TableElement "table definition element" + TableElementList "table definition element list" + TableElementListOpt "table definition element list optional" + TableFactor "table factor" + TableName "Table name" + TableNameOptWild "Table name with optional wildcard" + TableNameList "Table name list" + TableNameListOpt "Table name list opt" + TableRef "table reference" + TableRefs "table references" + + Values "values" + ValuesList "values list" + ValuesOpt "values optional" + VariableAssignment "set variable value" + VariableAssignmentList "set variable value list" + WhereClause "WHERE clause" + WhereClauseOptional "Optional WHERE clause" + WithValidation "with validation" + WithValidationOpt "optional with validation" + Type "Types" + + OptWild "Optional Wildcard" + + BetweenOrNotOp "Between predicate" + IsOrNotOp "Is predicate" + InOrNotOp "In predicate" + + NumericType "Numeric types" + IntegerType "Integer Types types" + BooleanType "Boolean Types types" + FixedPointType "Exact value types" + FloatingPointType "Approximate value types" + BitValueType "bit value types" + StringType "String types" + BlobType "Blob types" + TextType "Text types" + DateAndTimeType "Date and Time types" + + OptFieldLen "Field length or empty" + FieldLen "Field length" + FieldOpts "Field type definition option list" + FieldOpt "Field type definition option" + FloatOpt "Floating-point type option" + Precision "Floating-point precision option" + OptBinary "Optional BINARY" + OptBinMod "Optional BINARY mode" + OptCharsetWithOptBinary "Optional BINARY or ASCII or UNICODE or BYTE" + OptCharset "Optional Character setting" + OptCollate "Optional Collate setting" + NUM "A number" + LengthNum "Field length num(uint64)" + StorageOptimizerHintOpt "Storage level optimizer hint" + TableOptimizerHintOpt "Table level optimizer hint" + TableOptimizerHints "Table level optimizer hints" + OptimizerHintList "optimizer hint list" + HintTable "Table in optimizer hint" + HintTableList "Table list in optimizer hint" + HintStorageType "storage type in optimizer hint" + HintStorageTypeAndTable "storage type and tables in optimizer hint" + HintStorageTypeAndTableList "storage type and tables list in optimizer hint" + HintTrueOrFalse "True or false in optimizer hint" + HintQueryType "Query type in optimizer hint" + HintMemoryQuota "Memory quota in optimizer hint" + EnforcedOrNot "{ENFORCED|NOT ENFORCED}" + EnforcedOrNotOpt "Optional {ENFORCED|NOT ENFORCED}" + EnforcedOrNotOrNotNullOpt "{[ENFORCED|NOT ENFORCED|NOT NULL]}" + +%type + AsOpt "AS or EmptyString" + KeyOrIndex "{KEY|INDEX}" + ColumnKeywordOpt "Column keyword or empty" + PrimaryOpt "Optional primary keyword" + NowSym "CURRENT_TIMESTAMP/LOCALTIME/LOCALTIMESTAMP" + NowSymFunc "CURRENT_TIMESTAMP/LOCALTIME/LOCALTIMESTAMP/NOW" + DefaultKwdOpt "optional DEFAULT keyword" + DatabaseSym "DATABASE or SCHEMA" + ExplainSym "EXPLAIN or DESCRIBE or DESC" + IntoOpt "INTO or EmptyString" + ValueSym "Value or Values" + Char "{CHAR|CHARACTER}" + NChar "{NCHAR|NATIONAL CHARACTER|NATIONAL CHAR}" + Varchar "{VARCHAR|VARCHARACTER|CHARACTER VARYING|CHAR VARYING}" + NVarchar "{NATIONAL VARCHAR|NATIONAL VARCHARACTER|NVARCHAR|NCHAR VARCHAR|NATIONAL CHARACTER VARYING|NATIONAL CHAR VARYING|NCHAR VARYING}" + Year "{YEAR|SQL_TSI_YEAR}" + OuterOpt "optional OUTER clause" + CrossOpt "Cross join option" + ShowIndexKwd "Show index/indexs/key keyword" + DistinctKwd "DISTINCT/DISTINCTROW keyword" + FromOrIn "From or In" + OptTable "Optional table keyword" + OptInteger "Optional Integer keyword" + CharsetKw "charset or charater set" + CommaOpt "optional comma" + logAnd "logical and operator" + logOr "logical or operator" + StorageMedia "{DISK|MEMORY|DEFAULT}" + +%type + Identifier "identifier or unreserved keyword" + NotKeywordToken "Tokens not mysql keyword but treated specially" + UnReservedKeyword "MySQL unreserved keywords" + TiDBKeyword "TiDB added keywords" + FunctionNameConflict "Built-in function call names which are conflict with keywords" + FunctionNameOptionalBraces "Function with optional braces, all of them are reserved keywords." + FunctionNameDatetimePrecision "Function with optional datetime precision, all of them are reserved keywords." + FunctionNameDateArith "Date arith function call names (date_add or date_sub)" + FunctionNameDateArithMultiForms "Date arith function call names (adddate or subdate)" + VariableName "A simple Identifier like xx or the xx.xx form" + +%precedence empty + +%precedence sqlBufferResult +%precedence sqlBigResult +%precedence sqlSmallResult +%precedence sqlCache sqlNoCache +%precedence lowerThanIntervalKeyword +%precedence interval +%precedence lowerThanStringLitToken +%precedence stringLit +%precedence lowerThanSetKeyword +%precedence set +%precedence lowerThanInsertValues +%precedence insertValues +%precedence lowerThanCreateTableSelect +%precedence createTableSelect +%precedence lowerThanCharsetKwd +%precedence charsetKwd +%precedence lowerThanKey +%precedence key +%precedence lowerThanLocal +%precedence local +%precedence lowerThanRemove +%precedence remove +%precedence lowerThenOrder +%precedence order + +%left join straightJoin inner cross left right full natural +/* A dummy token to force the priority of TableRef production in a join. */ +%left tableRefPriority +%precedence lowerThanOn +%precedence on using +%right assignmentEq +%left pipes or pipesAsOr +%left xor +%left andand and +%left between +%precedence lowerThanEq +%left eq ge le neq neqSynonym '>' '<' is like in +%left '|' +%left '&' +%left rsh lsh +%left '-' '+' +%left '*' '/' '%' div mod +%left '^' +%left '~' neg +%precedence lowerThanNot +%right not not2 +%right collate +%right encryption + +%left labels +%precedence '(' +%precedence quick +%precedence escape +%precedence lowerThanComma +%precedence ',' +%precedence higherThanComma + +%start Start + +%% + +Start: + StatementList + +/**************************************AlterTableStmt*************************************** + * See https://dev.mysql.com/doc/refman/5.7/en/alter-table.html + *******************************************************************************************/ +AlterTableStmt: + "ALTER" "TABLE" TableName AlterTableSpecListOpt + { + specs := $4.([]*ast.AlterTableSpec) + $$ = &ast.AlterTableStmt{ + Table: $3.(*ast.TableName), + Specs: specs, + } + } + +LocationLabelList: + { + $$ = []string{} + } +| "LOCATION" "LABELS" StringList + { + $$ = $3 + } + + +AlterTableSpec: + "ADD" ColumnKeywordOpt IfNotExists ColumnDef + { + $$ = &ast.AlterTableSpec{ + IfNotExists: $3.(bool), + Tp: ast.AlterTableAddColumns, + NewColumns: []*ast.ColumnDef{$4.(*ast.ColumnDef)}, + } + } +| "ADD" ColumnKeywordOpt IfNotExists '(' TableElementList ')' + { + tes := $5.([]interface {}) + var columnDefs []*ast.ColumnDef + var constraints []*ast.Constraint + for _, te := range tes { + switch te := te.(type) { + case *ast.ColumnDef: + columnDefs = append(columnDefs, te) + case *ast.Constraint: + constraints = append(constraints, te) + } + } + $$ = &ast.AlterTableSpec{ + IfNotExists: $3.(bool), + Tp: ast.AlterTableAddColumns, + NewColumns: columnDefs, + NewConstraints: constraints, + } + } +| "ADD" Constraint + { + constraint := $2.(*ast.Constraint) + $$ = &ast.AlterTableSpec{ + Tp: ast.AlterTableAddConstraint, + Constraint: constraint, + } + } +| "DROP" ColumnKeywordOpt IfExists ColumnName RestrictOrCascadeOpt + { + $$ = &ast.AlterTableSpec{ + IfExists: $3.(bool), + Tp: ast.AlterTableDropColumn, + OldColumnName: $4.(*ast.ColumnName), + } + } +| "DROP" "PRIMARY" "KEY" + { + $$ = &ast.AlterTableSpec{Tp: ast.AlterTableDropPrimaryKey} + } +| "IMPORT" "TABLESPACE" + { + ret := &ast.AlterTableSpec{ + Tp: ast.AlterTableImportTablespace, + } + $$ = ret + yylex.AppendError(yylex.Errorf("The IMPORT TABLESPACE clause is parsed but ignored by all storage engines.")) + parser.lastErrorAsWarn() + } +| "DISCARD" "TABLESPACE" + { + ret := &ast.AlterTableSpec{ + Tp: ast.AlterTableDiscardTablespace, + } + $$ = ret + yylex.AppendError(yylex.Errorf("The DISCARD TABLESPACE clause is parsed but ignored by all storage engines.")) + parser.lastErrorAsWarn() + } +| "DROP" KeyOrIndex IfExists Identifier + { + $$ = &ast.AlterTableSpec{ + IfExists: $3.(bool), + Tp: ast.AlterTableDropIndex, + Name: $4, + } + } +| "DROP" "FOREIGN" "KEY" IfExists Symbol + { + $$ = &ast.AlterTableSpec{ + IfExists: $4.(bool), + Tp: ast.AlterTableDropForeignKey, + Name: $5.(string), + } + } +| "DISABLE" "KEYS" + { + $$ = &ast.AlterTableSpec{ + Tp: ast.AlterTableDisableKeys, + } + } +| "ENABLE" "KEYS" + { + $$ = &ast.AlterTableSpec{ + Tp: ast.AlterTableEnableKeys, + } + } +| "MODIFY" ColumnKeywordOpt IfExists ColumnDef + { + $$ = &ast.AlterTableSpec{ + IfExists: $3.(bool), + Tp: ast.AlterTableModifyColumn, + NewColumns: []*ast.ColumnDef{$4.(*ast.ColumnDef)}, + } + } +| "CHANGE" ColumnKeywordOpt IfExists ColumnName ColumnDef + { + $$ = &ast.AlterTableSpec{ + IfExists: $3.(bool), + Tp: ast.AlterTableChangeColumn, + OldColumnName: $4.(*ast.ColumnName), + NewColumns: []*ast.ColumnDef{$5.(*ast.ColumnDef)}, + } + } +| "ALTER" ColumnKeywordOpt ColumnName "SET" "DEFAULT" SignedLiteral + { + option := &ast.ColumnOption{Expr: $6} + colDef := &ast.ColumnDef{ + Name: $3.(*ast.ColumnName), + Options: []*ast.ColumnOption{option}, + } + $$ = &ast.AlterTableSpec{ + Tp: ast.AlterTableAlterColumn, + NewColumns: []*ast.ColumnDef{colDef}, + } + } +| "ALTER" ColumnKeywordOpt ColumnName "SET" "DEFAULT" '(' Expression ')' + { + option := &ast.ColumnOption{Expr: $7} + colDef := &ast.ColumnDef{ + Name: $3.(*ast.ColumnName), + Options: []*ast.ColumnOption{option}, + } + $$ = &ast.AlterTableSpec{ + Tp: ast.AlterTableAlterColumn, + NewColumns: []*ast.ColumnDef{colDef}, + } + } +| "ALTER" ColumnKeywordOpt ColumnName "DROP" "DEFAULT" + { + colDef := &ast.ColumnDef{ + Name: $3.(*ast.ColumnName), + } + $$ = &ast.AlterTableSpec{ + Tp: ast.AlterTableAlterColumn, + NewColumns: []*ast.ColumnDef{colDef}, + } + } +| "RENAME" "COLUMN" ColumnName "TO" ColumnName + { + $$ = &ast.AlterTableSpec{ + Tp: ast.AlterTableRenameColumn, + OldColumnName: $3.(*ast.ColumnName), + NewColumnName: $5.(*ast.ColumnName), + } + } +| "RENAME" KeyOrIndex Identifier "TO" Identifier + { + $$ = &ast.AlterTableSpec{ + Tp: ast.AlterTableRenameIndex, + FromKey: model.NewCIStr($3), + ToKey: model.NewCIStr($5), + } + } +| "FORCE" + { + // Parse it and ignore it. Just for compatibility. + $$ = &ast.AlterTableSpec{ + Tp: ast.AlterTableForce, + } + } +| "WITH" "VALIDATION" + { + // Parse it and ignore it. Just for compatibility. + $$ = &ast.AlterTableSpec{ + Tp: ast.AlterTableWithValidation, + } + yylex.AppendError(yylex.Errorf("The WITH/WITHOUT VALIDATION clause is parsed but ignored by all storage engines.")) + parser.lastErrorAsWarn() + } +| "WITHOUT" "VALIDATION" + { + // Parse it and ignore it. Just for compatibility. + $$ = &ast.AlterTableSpec{ + Tp: ast.AlterTableWithoutValidation, + } + yylex.AppendError(yylex.Errorf("The WITH/WITHOUT VALIDATION clause is parsed but ignored by all storage engines.")) + parser.lastErrorAsWarn() + } +// Added in MySQL 8.0.13, see: https://dev.mysql.com/doc/refman/8.0/en/keywords.html for details +| "SECONDARY_LOAD" + { + // Parse it and ignore it. Just for compatibility. + $$ = &ast.AlterTableSpec{ + Tp: ast.AlterTableSecondaryLoad, + } + yylex.AppendError(yylex.Errorf("The SECONDARY_LOAD clause is parsed but not implement yet.")) + parser.lastErrorAsWarn() + } +// Added in MySQL 8.0.13, see: https://dev.mysql.com/doc/refman/8.0/en/keywords.html for details +| "SECONDARY_UNLOAD" + { + // Parse it and ignore it. Just for compatibility. + $$ = &ast.AlterTableSpec{ + Tp: ast.AlterTableSecondaryUnload, + } + yylex.AppendError(yylex.Errorf("The SECONDARY_UNLOAD VALIDATION clause is parsed but not implement yet.")) + parser.lastErrorAsWarn() + } +| "ALTER" "CHECK" Identifier EnforcedOrNot + { + // Parse it and ignore it. Just for compatibility. + c := &ast.Constraint{ + Name: $3, + Enforced: $4.(bool), + } + $$ = &ast.AlterTableSpec{ + Tp: ast.AlterTableAlterCheck, + Constraint: c, + } + yylex.AppendError(yylex.Errorf("The ALTER CHECK clause is parsed but not implemented yet.")) + parser.lastErrorAsWarn() + } +| "DROP" "CHECK" Identifier + { + // Parse it and ignore it. Just for compatibility. + c := &ast.Constraint{ + Name: $3, + } + $$ = &ast.AlterTableSpec{ + Tp: ast.AlterTableDropCheck, + Constraint: c, + } + yylex.AppendError(yylex.Errorf("The DROP CHECK clause is parsed but not implemented yet.")) + parser.lastErrorAsWarn() + } +| "ALTER" "INDEX" Identifier IndexInvisible + { + $$ = &ast.AlterTableSpec{ + Tp: ast.AlterTableIndexInvisible, + Name: $3, + Visibility: $4.(ast.IndexVisibility), + } + } + +WithValidationOpt: + { + $$ = true + } +| WithValidation + { + $$ = $1 + } + +WithValidation: + "WITH" "VALIDATION" + { + $$ = true + } +| "WITHOUT" "VALIDATION" + { + $$ = false + } + +KeyOrIndex: "KEY" | "INDEX" + + +KeyOrIndexOpt: + {} +| KeyOrIndex + +ColumnKeywordOpt: + {} +| "COLUMN" + +AlterTableSpecListOpt: + /* empty */ + { + $$ = make([]*ast.AlterTableSpec, 0, 1) + } +| AlterTableSpecList + { + $$ = $1 + } + +AlterTableSpecList: + AlterTableSpec + { + $$ = []*ast.AlterTableSpec{$1.(*ast.AlterTableSpec)} + } +| AlterTableSpecList ',' AlterTableSpec + { + $$ = append($1.([]*ast.AlterTableSpec), $3.(*ast.AlterTableSpec)) + } + +ConstraintKeywordOpt: + { + $$ = nil + } +| "CONSTRAINT" + { + $$ = nil + } +| "CONSTRAINT" Symbol + { + $$ = $2.(string) + } + +Symbol: + Identifier + { + $$ = $1 + } + +/*******************************************************************************************/ + +AnalyzeTableStmt: + "ANALYZE" "TABLE" TableNameList + { + $$ = &ast.AnalyzeTableStmt{TableNames: $3.([]*ast.TableName)} + } + +/*******************************************************************************************/ +Assignment: + ColumnName eq ExprOrDefault + { + $$ = &ast.Assignment{Column: $1.(*ast.ColumnName), Expr:$3} + } + +AssignmentList: + Assignment + { + $$ = []*ast.Assignment{$1.(*ast.Assignment)} + } +| AssignmentList ',' Assignment + { + $$ = append($1.([]*ast.Assignment), $3.(*ast.Assignment)) + } + +AssignmentListOpt: + /* EMPTY */ + { + $$ = []*ast.Assignment{} + } +| AssignmentList + +BeginTransactionStmt: + "BEGIN" + { + $$ = &ast.BeginStmt{} + } +| "START" "TRANSACTION" + { + $$ = &ast.BeginStmt{} + } + +ColumnDefList: + ColumnDef + { + $$ = []*ast.ColumnDef{$1.(*ast.ColumnDef)} + } +| ColumnDefList ',' ColumnDef + { + $$ = append($1.([]*ast.ColumnDef), $3.(*ast.ColumnDef)) + } + +ColumnDef: + ColumnName Type ColumnOptionListOpt + { + colDef := &ast.ColumnDef{Name: $1.(*ast.ColumnName), Tp: $2.(*types.FieldType), Options: $3.([]*ast.ColumnOption)} + if !colDef.Validate() { + yylex.AppendError(yylex.Errorf("Invalid column definition")) + return 1 + } + $$ = colDef + } +| ColumnName "SERIAL" ColumnOptionListOpt + { + // TODO: check flen 0 + tp := types.NewFieldType(mysql.TypeLonglong) + options := []*ast.ColumnOption{{Tp: ast.ColumnOptionNotNull}, {Tp: ast.ColumnOptionAutoIncrement}, {Tp: ast.ColumnOptionUniqKey}} + options = append(options, $3.([]*ast.ColumnOption)...) + tp.Flag |= mysql.UnsignedFlag + colDef := &ast.ColumnDef{Name: $1.(*ast.ColumnName), Tp: tp, Options: options} + if !colDef.Validate() { + yylex.AppendError(yylex.Errorf("Invalid column definition")) + return 1 + } + $$ = colDef + } + +ColumnName: + Identifier + { + $$ = &ast.ColumnName{Name: model.NewCIStr($1)} + } +| Identifier '.' Identifier + { + $$ = &ast.ColumnName{Table: model.NewCIStr($1), Name: model.NewCIStr($3)} + } +| Identifier '.' Identifier '.' Identifier + { + $$ = &ast.ColumnName{Schema: model.NewCIStr($1), Table: model.NewCIStr($3), Name: model.NewCIStr($5)} + } + +ColumnNameList: + ColumnName + { + $$ = []*ast.ColumnName{$1.(*ast.ColumnName)} + } +| ColumnNameList ',' ColumnName + { + $$ = append($1.([]*ast.ColumnName), $3.(*ast.ColumnName)) + } + +ColumnNameListOpt: + /* EMPTY */ + { + $$ = []*ast.ColumnName{} + } +| ColumnNameList + { + $$ = $1.([]*ast.ColumnName) + } + +CommitStmt: + "COMMIT" + { + $$ = &ast.CommitStmt{} + } + +PrimaryOpt: + {} +| "PRIMARY" + +EnforcedOrNot: + "ENFORCED" + { + $$ = true + } +| "NOT" "ENFORCED" + { + $$ = false + } + +EnforcedOrNotOpt: + { + $$ = true + } %prec lowerThanNot +| EnforcedOrNot + { + $$ = $1 + } + +EnforcedOrNotOrNotNullOpt: +// This branch is needed to workaround the need of a lookahead of 2 for the grammar: +// +// { [NOT] NULL | CHECK(...) [NOT] ENFORCED } ... + "NOT" "NULL" + { + $$ = 0 + } +| EnforcedOrNotOpt + { + if ($1.(bool)) { + $$ = 1 + } else { + $$ = 2 + } + } + +ColumnOption: + "NOT" "NULL" + { + $$ = &ast.ColumnOption{Tp: ast.ColumnOptionNotNull} + } +| "NULL" + { + $$ = &ast.ColumnOption{Tp: ast.ColumnOptionNull} + } +| "AUTO_INCREMENT" + { + $$ = &ast.ColumnOption{Tp: ast.ColumnOptionAutoIncrement} + } +| PrimaryOpt "KEY" + { + // KEY is normally a synonym for INDEX. The key attribute PRIMARY KEY + // can also be specified as just KEY when given in a column definition. + // See http://dev.mysql.com/doc/refman/5.7/en/create-table.html + $$ = &ast.ColumnOption{Tp: ast.ColumnOptionPrimaryKey} + } +| "UNIQUE" %prec lowerThanKey + { + $$ = &ast.ColumnOption{Tp: ast.ColumnOptionUniqKey} + } +| "UNIQUE" "KEY" + { + $$ = &ast.ColumnOption{Tp: ast.ColumnOptionUniqKey} + } +| "DEFAULT" DefaultValueExpr + { + $$ = &ast.ColumnOption{Tp: ast.ColumnOptionDefaultValue, Expr: $2} + } +| "SERIAL" "DEFAULT" "VALUE" + { + $$ = []*ast.ColumnOption{{Tp: ast.ColumnOptionNotNull}, {Tp: ast.ColumnOptionAutoIncrement}, {Tp: ast.ColumnOptionUniqKey}} + } +| "ON" "UPDATE" NowSymOptionFraction + { + $$ = &ast.ColumnOption{Tp: ast.ColumnOptionOnUpdate, Expr: $3} + } +| "COMMENT" stringLit + { + $$ = &ast.ColumnOption{Tp: ast.ColumnOptionComment, Expr: ast.NewValueExpr($2)} + } +| ConstraintKeywordOpt "CHECK" '(' Expression ')' EnforcedOrNotOrNotNullOpt + { + // See https://dev.mysql.com/doc/refman/5.7/en/create-table.html + // The CHECK clause is parsed but ignored by all storage engines. + // See the branch named `EnforcedOrNotOrNotNullOpt`. + + optionCheck := &ast.ColumnOption{ + Tp: ast.ColumnOptionCheck, + Expr: $4, + Enforced: true, + } + switch $6.(int) { + case 0: + $$ = []*ast.ColumnOption{optionCheck, {Tp: ast.ColumnOptionNotNull}} + case 1: + optionCheck.Enforced = true + $$ = optionCheck + case 2: + optionCheck.Enforced = false + $$ = optionCheck + default: + } + yylex.AppendError(yylex.Errorf("The CHECK clause is parsed but ignored by all storage engines.")) + parser.lastErrorAsWarn() + } +| GeneratedAlways "AS" '(' Expression ')' VirtualOrStored + { + startOffset := parser.startOffset(&yyS[yypt-2]) + endOffset := parser.endOffset(&yyS[yypt-1]) + expr := $4 + expr.SetText(parser.src[startOffset:endOffset]) + + $$ = &ast.ColumnOption{ + Tp: ast.ColumnOptionGenerated, + Expr: expr, + Stored: $6.(bool), + } + } +| "COLLATE" CollationName + { + $$ = &ast.ColumnOption{Tp: ast.ColumnOptionCollate, StrValue: $2.(string)} + } +| "COLUMN_FORMAT" ColumnFormat + { + $$ = &ast.ColumnOption{Tp: ast.ColumnOptionColumnFormat, StrValue: $2.(string)} + } +| "STORAGE" StorageMedia + { + $$ = &ast.ColumnOption{Tp: ast.ColumnOptionStorage, StrValue: $2} + yylex.AppendError(yylex.Errorf("The STORAGE clause is parsed but ignored by all storage engines.")) + parser.lastErrorAsWarn() + } +| "AUTO_RANDOM" OptFieldLen + { + $$ = &ast.ColumnOption{Tp: ast.ColumnOptionAutoRandom, AutoRandomBitLength: $2.(int)} + } + +StorageMedia: + "DEFAULT" | "DISK" | "MEMORY" + +ColumnFormat: + "DEFAULT" + { + $$ = "DEFAULT" + } +| "FIXED" + { + $$ = "FIXED" + } +| "DYNAMIC" + { + $$ = "DYNAMIC" + } + +GeneratedAlways: | "GENERATED" "ALWAYS" + +VirtualOrStored: + { + $$ = false + } +| "VIRTUAL" + { + $$ = false + } +| "STORED" + { + $$ = true + } + +ColumnOptionList: + ColumnOption + { + if columnOption,ok := $1.(*ast.ColumnOption); ok { + $$ = []*ast.ColumnOption{columnOption} + } else { + $$ = $1 + } + } +| ColumnOptionList ColumnOption + { + if columnOption,ok := $2.(*ast.ColumnOption); ok { + $$ = append($1.([]*ast.ColumnOption), columnOption) + } else { + $$ = append($1.([]*ast.ColumnOption), $2.([]*ast.ColumnOption)...) + } + } + +ColumnOptionListOpt: + { + $$ = []*ast.ColumnOption{} + } +| ColumnOptionList + { + $$ = $1.([]*ast.ColumnOption) + } + +ConstraintElem: + "PRIMARY" "KEY" IndexNameAndTypeOpt '(' IndexPartSpecificationList ')' IndexOptionList + { + c := &ast.Constraint{ + Tp: ast.ConstraintPrimaryKey, + Keys: $5.([]*ast.IndexPartSpecification), + Name: $3.([]interface{})[0].(string), + } + if $7 != nil { + c.Option = $7.(*ast.IndexOption) + } + if indexType := $3.([]interface{})[1]; indexType != nil { + if c.Option == nil { + c.Option = &ast.IndexOption{} + } + c.Option.Tp = indexType.(model.IndexType) + } + $$ = c + } +| "FULLTEXT" KeyOrIndexOpt IndexName '(' IndexPartSpecificationList ')' IndexOptionList + { + c := &ast.Constraint{ + Tp: ast.ConstraintFulltext, + Keys: $5.([]*ast.IndexPartSpecification), + Name: $3.(string), + } + if $7 != nil { + c.Option = $7.(*ast.IndexOption) + } + $$ = c + } +| KeyOrIndex IfNotExists IndexNameAndTypeOpt '(' IndexPartSpecificationList ')' IndexOptionList + { + c := &ast.Constraint{ + IfNotExists: $2.(bool), + Tp: ast.ConstraintIndex, + Keys: $5.([]*ast.IndexPartSpecification), + } + if $7 != nil { + c.Option = $7.(*ast.IndexOption) + } + c.Name = $3.([]interface{})[0].(string) + if indexType := $3.([]interface{})[1]; indexType != nil { + if c.Option == nil { + c.Option = &ast.IndexOption{} + } + c.Option.Tp = indexType.(model.IndexType) + } + $$ = c + } +| "UNIQUE" KeyOrIndexOpt IndexNameAndTypeOpt '(' IndexPartSpecificationList ')' IndexOptionList + { + c := &ast.Constraint{ + Tp: ast.ConstraintUniq, + Keys: $5.([]*ast.IndexPartSpecification), + } + if $7 != nil { + c.Option = $7.(*ast.IndexOption) + } + c.Name = $3.([]interface{})[0].(string) + if indexType := $3.([]interface{})[1]; indexType != nil { + if c.Option == nil { + c.Option = &ast.IndexOption{} + } + c.Option.Tp = indexType.(model.IndexType) + } + $$ = c + } +| "CHECK" '(' Expression ')' EnforcedOrNotOpt + { + $$ = &ast.Constraint{ + Tp: ast.ConstraintCheck, + Expr: $3.(ast.ExprNode), + Enforced: $5.(bool), + } + yylex.AppendError(yylex.Errorf("The CHECK clause is parsed but ignored by all storage engines.")) + parser.lastErrorAsWarn() + } + +/* + * The DEFAULT clause specifies a default value for a column. + * With one exception, the default value must be a constant; + * it cannot be a function or an expression. This means, for example, + * that you cannot set the default for a date column to be the value of + * a function such as NOW() or CURRENT_DATE. The exception is that you + * can specify CURRENT_TIMESTAMP as the default for a TIMESTAMP or DATETIME column. + * + * See http://dev.mysql.com/doc/refman/5.7/en/create-table.html + * https://github.com/mysql/mysql-server/blob/5.7/sql/sql_yacc.yy#L6832 + */ +DefaultValueExpr: + NowSymOptionFraction | SignedLiteral + +NowSymOptionFraction: + NowSym + { + $$ = &ast.FuncCallExpr{FnName: model.NewCIStr("CURRENT_TIMESTAMP")} + } +| NowSymFunc '(' ')' + { + $$ = &ast.FuncCallExpr{FnName: model.NewCIStr("CURRENT_TIMESTAMP")} + } +| NowSymFunc '(' NUM ')' + { + $$ = &ast.FuncCallExpr{FnName: model.NewCIStr("CURRENT_TIMESTAMP"), Args: []ast.ExprNode{ast.NewValueExpr($3)}} + } + +/* +* See https://dev.mysql.com/doc/refman/5.7/en/date-and-time-functions.html#function_localtime +* TODO: Process other three keywords +*/ +NowSymFunc: + "CURRENT_TIMESTAMP" | "LOCALTIME" | "LOCALTIMESTAMP" | builtinNow +NowSym: + "CURRENT_TIMESTAMP" | "LOCALTIME" | "LOCALTIMESTAMP" + + +SignedLiteral: + Literal + { + $$ = ast.NewValueExpr($1) + } +| '+' NumLiteral + { + $$ = &ast.UnaryOperationExpr{Op: opcode.Plus, V: ast.NewValueExpr($2)} + } +| '-' NumLiteral + { + $$ = &ast.UnaryOperationExpr{Op: opcode.Minus, V: ast.NewValueExpr($2)} + } + +NumLiteral: + intLit +| floatLit +| decLit + +/**************************************CreateIndexStmt*************************************** + * See https://dev.mysql.com/doc/refman/8.0/en/create-index.html + * + * TYPE type_name is recognized as a synonym for USING type_name. However, USING is the preferred form. + * + * CREATE [UNIQUE | FULLTEXT | SPATIAL] INDEX index_name + * [index_type] + * ON tbl_name (key_part,...) + * [index_option] + * [algorithm_option | lock_option] ... + * + * key_part: {col_name [(length)] | (expr)} [ASC | DESC] + * + * index_option: + * KEY_BLOCK_SIZE [=] value + * | index_type + * | WITH PARSER parser_name + * | COMMENT 'string' + * | {VISIBLE | INVISIBLE} + * + * index_type: + * USING {BTREE | HASH} + * + * algorithm_option: + * ALGORITHM [=] {DEFAULT | INPLACE | COPY} + * + * lock_option: + * LOCK [=] {DEFAULT | NONE | SHARED | EXCLUSIVE} + *******************************************************************************************/ +CreateIndexStmt: + "CREATE" IndexKeyTypeOpt "INDEX" IfNotExists Identifier IndexTypeOpt "ON" TableName '(' IndexPartSpecificationList ')' IndexOptionList + { + var indexOption *ast.IndexOption + if $12 != nil { + indexOption = $12.(*ast.IndexOption) + if indexOption.Tp == model.IndexTypeInvalid { + if $6 != nil { + indexOption.Tp = $6.(model.IndexType) + } + } + } else { + indexOption = &ast.IndexOption{} + if $6 != nil { + indexOption.Tp = $6.(model.IndexType) + } + } + $$ = &ast.CreateIndexStmt{ + IfNotExists: $4.(bool), + IndexName: $5, + Table: $8.(*ast.TableName), + IndexPartSpecifications: $10.([]*ast.IndexPartSpecification), + IndexOption: indexOption, + KeyType: $2.(ast.IndexKeyType), + } + } + +IndexPartSpecificationListOpt: + { + $$ = ([]*ast.IndexPartSpecification)(nil) + } +| '(' IndexPartSpecificationList ')' + { + $$ = $2 + } + +IndexPartSpecificationList: + IndexPartSpecification + { + $$ = []*ast.IndexPartSpecification{$1.(*ast.IndexPartSpecification)} + } +| IndexPartSpecificationList ',' IndexPartSpecification + { + $$ = append($1.([]*ast.IndexPartSpecification), $3.(*ast.IndexPartSpecification)) + } + +IndexPartSpecification: + ColumnName OptFieldLen Order + { + // Order is parsed but just ignored as MySQL did. + $$ = &ast.IndexPartSpecification{Column: $1.(*ast.ColumnName), Length: $2.(int)} + } +| '(' Expression ')' Order + { + $$ = &ast.IndexPartSpecification{Expr: $2} + } + +IndexKeyTypeOpt: + { + $$ = ast.IndexKeyTypeNone + } +| "UNIQUE" + { + $$ = ast.IndexKeyTypeUnique + } +| "SPATIAL" + { + $$ = ast.IndexKeyTypeSpatial + } +| "FULLTEXT" + { + $$ = ast.IndexKeyTypeFullText + } + +/******************************************************************* + * + * Create Database Statement + * CREATE {DATABASE | SCHEMA} [IF NOT EXISTS] db_name + * [create_specification] ... + * + * create_specification: + * [DEFAULT] CHARACTER SET [=] charset_name + * | [DEFAULT] COLLATE [=] collation_name + * | [DEFAULT] ENCRYPTION [=] {'Y' | 'N'} + *******************************************************************/ +CreateDatabaseStmt: + "CREATE" DatabaseSym IfNotExists DBName DatabaseOptionListOpt + { + $$ = &ast.CreateDatabaseStmt{ + IfNotExists: $3.(bool), + Name: $4.(string), + Options: $5.([]*ast.DatabaseOption), + } + } + +DBName: + Identifier + { + $$ = $1 + } + +DatabaseOption: + DefaultKwdOpt CharsetKw EqOpt CharsetName + { + $$ = &ast.DatabaseOption{Tp: ast.DatabaseOptionCharset, Value: $4.(string)} + } +| DefaultKwdOpt "COLLATE" EqOpt CollationName + { + $$ = &ast.DatabaseOption{Tp: ast.DatabaseOptionCollate, Value: $4.(string)} + } +| DefaultKwdOpt "ENCRYPTION" EqOpt stringLit + { + $$ = &ast.DatabaseOption{Tp: ast.DatabaseOptionEncryption, Value: $4} + } + +DatabaseOptionListOpt: + { + $$ = []*ast.DatabaseOption{} + } +| DatabaseOptionList + +DatabaseOptionList: + DatabaseOption + { + $$ = []*ast.DatabaseOption{$1.(*ast.DatabaseOption)} + } +| DatabaseOptionList DatabaseOption + { + $$ = append($1.([]*ast.DatabaseOption), $2.(*ast.DatabaseOption)) + } + +/******************************************************************* + * + * Create Table Statement + * + * Example: + * CREATE TABLE Persons + * ( + * P_Id int NOT NULL, + * LastName varchar(255) NOT NULL, + * FirstName varchar(255), + * Address varchar(255), + * City varchar(255), + * PRIMARY KEY (P_Id) + * ) + *******************************************************************/ + +CreateTableStmt: + "CREATE" OptTemporary "TABLE" IfNotExists TableName TableElementListOpt AsOpt + { + stmt := $6.(*ast.CreateTableStmt) + stmt.Table = $5.(*ast.TableName) + stmt.IfNotExists = $4.(bool) + stmt.IsTemporary = $2.(bool) + $$ = stmt + } +| "CREATE" OptTemporary "TABLE" IfNotExists TableName LikeTableWithOrWithoutParen + { + $$ = &ast.CreateTableStmt{ + Table: $5.(*ast.TableName), + ReferTable: $6.(*ast.TableName), + IfNotExists: $4.(bool), + IsTemporary: $2.(bool), + } + } + +DefaultKwdOpt: + %prec lowerThanCharsetKwd + {} +| "DEFAULT" + +AsOpt: + {} +| "AS" + {} + +LikeTableWithOrWithoutParen: + "LIKE" TableName + { + $$ = $2 + } +| + '(' "LIKE" TableName ')' + { + $$ = $3 + } + +/******************************************************************* + * + * Delete Statement + * + *******************************************************************/ +DeleteFromStmt: + "DELETE" PriorityOpt QuickOptional "FROM" TableName TableAsNameOpt IndexHintListOpt WhereClauseOptional OrderByOptional LimitClause + { + // Single Table + tn := $5.(*ast.TableName) + tn.IndexHints = $7.([]*ast.IndexHint) + join := &ast.Join{Left: &ast.TableSource{Source: tn, AsName: $6.(model.CIStr)}, Right: nil} + x := &ast.DeleteStmt{ + TableRefs: &ast.TableRefsClause{TableRefs: join}, + Priority: $2.(mysql.PriorityEnum), + Quick: $3.(bool), + } + if $8 != nil { + x.Where = $8.(ast.ExprNode) + } + if $9 != nil { + x.Order = $9.(*ast.OrderByClause) + } + if $10 != nil { + x.Limit = $10.(*ast.Limit) + } + + $$ = x + } + +DatabaseSym: +"DATABASE" + +DropDatabaseStmt: + "DROP" DatabaseSym IfExists DBName + { + $$ = &ast.DropDatabaseStmt{IfExists: $3.(bool), Name: $4.(string)} + } + +/****************************************************************** + * Drop Index Statement + * See https://dev.mysql.com/doc/refman/8.0/en/drop-index.html + * + * DROP INDEX index_name ON tbl_name + * [algorithm_option | lock_option] ... + * + * algorithm_option: + * ALGORITHM [=] {DEFAULT|INPLACE|COPY} + * + * lock_option: + * LOCK [=] {DEFAULT|NONE|SHARED|EXCLUSIVE} + ******************************************************************/ +DropIndexStmt: + "DROP" "INDEX" IfExists Identifier "ON" TableName + { + $$ = &ast.DropIndexStmt{IfExists: $3.(bool), IndexName: $4, Table: $6.(*ast.TableName)} + } + +DropTableStmt: + "DROP" OptTemporary TableOrTables IfExists TableNameList RestrictOrCascadeOpt + { + $$ = &ast.DropTableStmt{IfExists: $4.(bool), Tables: $5.([]*ast.TableName), IsView: false, IsTemporary: $2.(bool)} + } + +OptTemporary: + /* empty */ { $$ = false; } + | "TEMPORARY" + { + $$ = true + yylex.AppendError(yylex.Errorf("TiDB doesn't support TEMPORARY TABLE, TEMPORARY will be parsed but ignored.")) + parser.lastErrorAsWarn() + } + +RestrictOrCascadeOpt: + {} +| "RESTRICT" +| "CASCADE" + +TableOrTables: + "TABLE" +| "TABLES" + +EqOpt: + {} +| eq + +EmptyStmt: + /* EMPTY */ + { + $$ = nil + } + +ExplainSym: +"EXPLAIN" | "DESCRIBE" | "DESC" + +ExplainStmt: + ExplainSym ExplainableStmt + { + $$ = &ast.ExplainStmt{ + Stmt: $2, + Format: "row", + } + } +| ExplainSym "FORMAT" "=" stringLit ExplainableStmt + { + $$ = &ast.ExplainStmt{ + Stmt: $5, + Format: $4, + } + } +| ExplainSym "FORMAT" "=" ExplainFormatType ExplainableStmt + { + $$ = &ast.ExplainStmt{ + Stmt: $5, + Format: $4.(string), + } + } + +ExplainFormatType: + "TRADITIONAL" + { + $$ = "row" + } +| "JSON" + { + $$ = "json" + } + +LengthNum: + NUM + { + $$ = getUint64FromNUM($1) + } + +NUM: + intLit + +Expression: + singleAtIdentifier assignmentEq Expression %prec assignmentEq + { + v := $1 + v = strings.TrimPrefix(v, "@") + $$ = &ast.VariableExpr{ + Name: v, + IsGlobal: false, + IsSystem: false, + Value: $3, + } + } +| Expression logOr Expression %prec pipes + { + $$ = &ast.BinaryOperationExpr{Op: opcode.LogicOr, L: $1, R: $3} + } +| Expression "XOR" Expression %prec xor + { + $$ = &ast.BinaryOperationExpr{Op: opcode.LogicXor, L: $1, R: $3} + } +| Expression logAnd Expression %prec andand + { + $$ = &ast.BinaryOperationExpr{Op: opcode.LogicAnd, L: $1, R: $3} + } +| "NOT" Expression %prec not + { + $$ = &ast.UnaryOperationExpr{Op: opcode.Not, V: $2} + } +| BoolPri IsOrNotOp "UNKNOWN" %prec is + { + /* https://dev.mysql.com/doc/refman/5.7/en/comparison-operators.html#operator_is */ + $$ = &ast.IsNullExpr{Expr: $1, Not: !$2.(bool)} + } +| BoolPri + +logOr: + pipesAsOr +| "OR" + +logAnd: +"&&" | "AND" + +ExpressionList: + Expression + { + $$ = []ast.ExprNode{$1} + } +| ExpressionList ',' Expression + { + $$ = append($1.([]ast.ExprNode), $3) + } + +ExpressionListOpt: + { + $$ = []ast.ExprNode{} + } +| ExpressionList + +FuncDatetimePrecListOpt: + { + $$ = []ast.ExprNode{} + } +| FuncDatetimePrecList + { + $$ = $1 + } + +FuncDatetimePrecList: + intLit + { + expr := ast.NewValueExpr($1) + $$ = []ast.ExprNode{expr} + } + +BoolPri: + BoolPri IsOrNotOp "NULL" %prec is + { + $$ = &ast.IsNullExpr{Expr: $1, Not: !$2.(bool)} + } +| BoolPri CompareOp PredicateExpr %prec eq + { + $$ = &ast.BinaryOperationExpr{Op: $2.(opcode.Op), L: $1, R: $3} + } +| BoolPri CompareOp singleAtIdentifier assignmentEq PredicateExpr %prec assignmentEq + { + v := $3 + v = strings.TrimPrefix(v, "@") + variable := &ast.VariableExpr{ + Name: v, + IsGlobal: false, + IsSystem: false, + Value: $5, + } + $$ = &ast.BinaryOperationExpr{Op: $2.(opcode.Op), L: $1, R: variable} + } +| PredicateExpr + +CompareOp: + ">=" + { + $$ = opcode.GE + } +| '>' + { + $$ = opcode.GT + } +| "<=" + { + $$ = opcode.LE + } +| '<' + { + $$ = opcode.LT + } +| "!=" + { + $$ = opcode.NE + } +| "<>" + { + $$ = opcode.NE + } +| "=" + { + $$ = opcode.EQ + } +| "<=>" + { + $$ = opcode.NullEQ + } + +BetweenOrNotOp: + "BETWEEN" + { + $$ = true + } +| "NOT" "BETWEEN" + { + $$ = false + } + +IsOrNotOp: + "IS" + { + $$ = true + } +| "IS" "NOT" + { + $$ = false + } + +InOrNotOp: + "IN" + { + $$ = true + } +| "NOT" "IN" + { + $$ = false + } + +AnyOrAll: + "ANY" + { + $$ = false + } +| "SOME" + { + $$ = false + } +| "ALL" + { + $$ = true + } + +PredicateExpr: + BitExpr InOrNotOp '(' ExpressionList ')' + { + $$ = &ast.PatternInExpr{Expr: $1, Not: !$2.(bool), List: $4.([]ast.ExprNode)} + } +| BitExpr BetweenOrNotOp BitExpr "AND" PredicateExpr + { + $$ = &ast.BetweenExpr{ + Expr: $1, + Left: $3, + Right: $5, + Not: !$2.(bool), + } + } +| BitExpr + +LikeEscapeOpt: + %prec empty + { + $$ = "\\" + } +| "ESCAPE" stringLit + { + $$ = $2 + } + +Field: + '*' + { + $$ = &ast.SelectField{WildCard: &ast.WildCardField{}} + } +| Identifier '.' '*' + { + wildCard := &ast.WildCardField{Table: model.NewCIStr($1)} + $$ = &ast.SelectField{WildCard: wildCard} + } +| Identifier '.' Identifier '.' '*' + { + wildCard := &ast.WildCardField{Schema: model.NewCIStr($1), Table: model.NewCIStr($3)} + $$ = &ast.SelectField{WildCard: wildCard} + } +| Expression FieldAsNameOpt + { + expr := $1 + asName := $2.(string) + $$ = &ast.SelectField{Expr: expr, AsName: model.NewCIStr(asName)} + } +| '{' Identifier Expression '}' FieldAsNameOpt + { + /* + * ODBC escape syntax. + * See https://dev.mysql.com/doc/refman/5.7/en/expressions.html + */ + expr := $3 + asName := $5.(string) + $$ = &ast.SelectField{Expr: expr, AsName: model.NewCIStr(asName)} + } + +FieldAsNameOpt: + /* EMPTY */ + { + $$ = "" + } +| FieldAsName + { + $$ = $1 + } + +FieldAsName: + Identifier + { + $$ = $1 + } +| "AS" Identifier + { + $$ = $2 + } +| stringLit + { + $$ = $1 + } +| "AS" stringLit + { + $$ = $2 + } + +FieldList: + Field + { + field := $1.(*ast.SelectField) + field.Offset = parser.startOffset(&yyS[yypt]) + $$ = []*ast.SelectField{field} + } +| FieldList ',' Field + { + + fl := $1.([]*ast.SelectField) + last := fl[len(fl)-1] + if last.Expr != nil && last.AsName.O == "" { + lastEnd := parser.endOffset(&yyS[yypt-1]) + last.SetText(parser.src[last.Offset:lastEnd]) + } + newField := $3.(*ast.SelectField) + newField.Offset = parser.startOffset(&yyS[yypt]) + $$ = append(fl, newField) + } + +GroupByClause: + "GROUP" "BY" ByList + { + $$ = &ast.GroupByClause{Items: $3.([]*ast.ByItem)} + } + +HavingClause: + { + $$ = nil + } +| "HAVING" Expression + { + $$ = &ast.HavingClause{Expr: $2} + } + +IfExists: + { + $$ = false + } +| "IF" "EXISTS" + { + $$ = true + } + +IfNotExists: + { + $$ = false + } +| "IF" "NOT" "EXISTS" + { + $$ = true + } + +IndexName: + { + $$ = "" + } +| Identifier + { + //"index name" + $$ = $1 + } + +IndexOptionList: + { + $$ = nil + } +| IndexOptionList IndexOption + { + // Merge the options + if $1 == nil { + $$ = $2 + } else { + opt1 := $1.(*ast.IndexOption) + opt2 := $2.(*ast.IndexOption) + if len(opt2.Comment) > 0 { + opt1.Comment = opt2.Comment + } else if opt2.Tp != 0 { + opt1.Tp = opt2.Tp + } else if opt2.KeyBlockSize > 0 { + opt1.KeyBlockSize = opt2.KeyBlockSize + } else if len(opt2.ParserName.O) > 0 { + opt1.ParserName = opt2.ParserName + } else if opt2.Visibility != ast.IndexVisibilityDefault { + opt1.Visibility = opt2.Visibility + } + $$ = opt1 + } + } + +IndexOption: + "KEY_BLOCK_SIZE" EqOpt LengthNum + { + $$ = &ast.IndexOption{ + KeyBlockSize: $3.(uint64), + } + } +| IndexType + { + $$ = &ast.IndexOption { + Tp: $1.(model.IndexType), + } + } +| "WITH" "PARSER" Identifier + { + $$ = &ast.IndexOption { + ParserName: model.NewCIStr($3), + } + yylex.AppendError(yylex.Errorf("The WITH PARASER clause is parsed but ignored by all storage engines.")) + parser.lastErrorAsWarn() + } +| "COMMENT" stringLit + { + $$ = &ast.IndexOption { + Comment: $2, + } + } +| IndexInvisible + { + $$ = &ast.IndexOption { + Visibility: $1.(ast.IndexVisibility), + } + } + +/* + See: https://github.com/mysql/mysql-server/blob/8.0/sql/sql_yacc.yy#L7179 + + The syntax for defining an index is: + + ... INDEX [index_name] [USING|TYPE] ... + + The problem is that whereas USING is a reserved word, TYPE is not. We can + still handle it if an index name is supplied, i.e.: + + ... INDEX type TYPE ... + + here the index's name is unmbiguously 'type', but for this: + + ... INDEX TYPE ... + + it's impossible to know what this actually mean - is 'type' the name or the + type? For this reason we accept the TYPE syntax only if a name is supplied. +*/ +IndexNameAndTypeOpt: + IndexName + { + $$ = []interface{}{$1, nil} + } +| IndexName "USING" IndexTypeName + { + $$ = []interface{}{$1, $3} + } +| Identifier "TYPE" IndexTypeName + { + $$ = []interface{}{$1, $3} + } + +IndexTypeOpt: + { + $$ = nil + } +| IndexType + { + $$ = $1 + } + +IndexType: + "USING" IndexTypeName + { + $$ = $2 + } +| "TYPE" IndexTypeName + { + $$ = $2 + } + +IndexTypeName: + "BTREE" + { + $$ = model.IndexTypeBtree + } + | "HASH" + { + $$ = model.IndexTypeHash + } + | "RTREE" + { + $$ = model.IndexTypeRtree + } + +IndexInvisible: + "VISIBLE" + { + $$ = ast.IndexVisibilityVisible + } +| "INVISIBLE" + { + $$ = ast.IndexVisibilityInvisible + } + +/**********************************Identifier********************************************/ +Identifier: +identifier | UnReservedKeyword | NotKeywordToken | TiDBKeyword + +UnReservedKeyword: + "ACTION" | "ADVISE" |"ASCII" | "AUTO_INCREMENT" | "AFTER" | "ALWAYS" | "AVG" | "BEGIN" | "BIT" | "BOOL" | "BOOLEAN" | "BTREE" | "BYTE" | "CAPTURE" |"CLEANUP" | "CHARSET" +| "COLUMNS" | "COMMIT" | "COMPACT" | "COMPRESSED" | "CONSISTENT" | "CURRENT" | "DATA" | "DATE" %prec lowerThanStringLitToken| "DATETIME" | "DAY" | "DEALLOCATE" | "DO" | "DUPLICATE" +| "DYNAMIC" | "ENCRYPTION" | "END" | "ENFORCED" | "ENGINE" | "ENGINES" | "ENUM" | "ERRORS" | "ESCAPE" | "EVOLVE" | "EXECUTE" | "EXTENDED" | "FIELDS" | "FIRST" | "FIXED" | "FLUSH" | "FOLLOWING" | "FORMAT" | "FULL" |"GLOBAL" +| "HASH" | "HOUR" | "INSERT_METHOD" | "LESS" | "LOCAL" | "LAST" | "NAMES" | "OFFSET" | "PASSWORD" %prec lowerThanEq | "PREPARE" | "QUICK" | "REBUILD" | "REDUNDANT" | "REORGANIZE" +| "ROLE" |"ROLLBACK" | "SESSION" | "SIGNED" | "SHUTDOWN" | "SNAPSHOT" | "START" | "STATUS" | "OPEN"| "SUBPARTITIONS" | "SUBPARTITION" | "TABLES" | "TABLESPACE" | "TEXT" | "THAN" | "TIME" %prec lowerThanStringLitToken +| "TIMESTAMP" %prec lowerThanStringLitToken | "TRACE" | "TRANSACTION" | "TRUNCATE" | "UNBOUNDED" | "UNKNOWN" | "VALUE" | "WARNINGS" | "YEAR" | "MODE" | "WEEK" | "ANY" | "SOME" | "USER" | "IDENTIFIED" +| "COLLATION" | "COMMENT" | "AVG_ROW_LENGTH" | "CONNECTION" | "CHECKSUM" | "COMPRESSION" | "KEY_BLOCK_SIZE" | "MASTER" | "MAX_ROWS" +| "MIN_ROWS" | "NATIONAL" | "NCHAR" | "ROW_FORMAT" | "QUARTER" | "GRANTS" | "TRIGGERS" | "DELAY_KEY_WRITE" | "ISOLATION" | "JSON" +| "REPEATABLE" | "RESPECT" | "COMMITTED" | "UNCOMMITTED" | "ONLY" | "SERIAL" | "SERIALIZABLE" | "LEVEL" | "VARIABLES" | "SQL_CACHE" | "INDEXES" | "PROCESSLIST" +| "SQL_NO_CACHE" | "DISABLE" | "ENABLE" | "REVERSE" | "PRIVILEGES" | "NO" | "BINLOG" | "FUNCTION" | "VIEW" | "BINDING" | "BINDINGS" | "MODIFY" | "EVENTS" | "PARTITIONS" +| "NONE" | "NULLS" | "SUPER" | "EXCLUSIVE" | "STATS_PERSISTENT" | "STATS_AUTO_RECALC" | "ROW_COUNT" | "COALESCE" | "MONTH" | "PROCESS" | "PROFILE" | "PROFILES" +| "MICROSECOND" | "MINUTE" | "PLUGINS" | "PRECEDING" | "QUERY" | "QUERIES" | "SECOND" | "SEPARATOR" | "SHARE" | "SHARED" | "SLOW" | "MAX_CONNECTIONS_PER_HOUR" | "MAX_QUERIES_PER_HOUR" | "MAX_UPDATES_PER_HOUR" +| "MAX_USER_CONNECTIONS" | "REPLICATION" | "CLIENT" | "SLAVE" | "RELOAD" | "TEMPORARY" | "ROUTINE" | "EVENT" | "ALGORITHM" | "DEFINER" | "INVOKER" | "MERGE" | "TEMPTABLE" | "UNDEFINED" | "SECURITY" | "CASCADED" +| "RECOVER" | "CIPHER" | "SUBJECT" | "ISSUER" | "X509" | "NEVER" | "EXPIRE" | "ACCOUNT" | "INCREMENTAL" | "CPU" | "MEMORY" | "BLOCK" | "IO" | "CONTEXT" | "SWITCHES" | "PAGE" | "FAULTS" | "IPC" | "SWAPS" | "SOURCE" +| "TRADITIONAL" | "SQL_BUFFER_RESULT" | "DIRECTORY" | "HISTORY" | "LIST" | "NODEGROUP" | "SYSTEM_TIME" | "PARTIAL" | "SIMPLE" | "REMOVE" | "PARTITIONING" | "STORAGE" | "DISK" | "STATS_SAMPLE_PAGES" | "SECONDARY_ENGINE" | "SECONDARY_LOAD" | "SECONDARY_UNLOAD" | "VALIDATION" +| "WITHOUT" | "RTREE" | "EXCHANGE" | "COLUMN_FORMAT" | "REPAIR" | "IMPORT" | "DISCARD" | "TABLE_CHECKSUM" | "UNICODE" | "AUTO_RANDOM" +| "SQL_TSI_DAY" | "SQL_TSI_HOUR" | "SQL_TSI_MINUTE" | "SQL_TSI_MONTH" | "SQL_TSI_QUARTER" | "SQL_TSI_SECOND" | +"SQL_TSI_WEEK" | "SQL_TSI_YEAR" | "INVISIBLE" | "VISIBLE" | "TYPE" | "NOWAIT" | "REPLICA" | "LOCATION" | "LABELS" +| "LOGS" | "HOSTS" | "AGAINST" | "EXPANSION" | "INCREMENT" | "MINVALUE" | "NOMAXVALUE" | "NOMINVALUE" | "NOCACHE" | "CACHE" | "CYCLE" | "NOCYCLE" | "NOORDER" | "SEQUENCE" | "MAX_MINUTES" | "MAX_IDXNUM" | "PER_TABLE" | "PER_DB" + +TiDBKeyword: + "ADMIN" | "AGG_TO_COP" |"BUCKETS" | "BUILTINS" | "CANCEL" | "CMSKETCH" | "DDL" | "DEPTH" | "DRAINER" | "JOBS" | "JOB" | "NODE_ID" | "NODE_STATE" | "PUMP" | "SAMPLES" | "STATS" | "STATS_META" | "STATS_HISTOGRAMS" | "STATS_BUCKETS" | "STATS_HEALTHY" | "TIDB" +| "HASH_JOIN" | "SM_JOIN" | "INL_JOIN" | "INL_HASH_JOIN"| "INL_MERGE_JOIN" | "SWAP_JOIN_INPUTS" | "NO_SWAP_JOIN_INPUTS" | "HASH_AGG" | "STREAM_AGG" | "USE_INDEX" | "IGNORE_INDEX" | "USE_INDEX_MERGE" | "NO_INDEX_MERGE" | "USE_TOJA" | "ENABLE_PLAN_CACHE" | "USE_PLAN_CACHE" +| "READ_CONSISTENT_REPLICA" | "READ_FROM_STORAGE" | "QB_NAME" | "QUERY_TYPE" | "MEMORY_QUOTA" | "OLAP" | "OLTP" | "TOPN" | "TIKV" | "TIFLASH" | "SPLIT" | "OPTIMISTIC" | "PESSIMISTIC" | "WIDTH" | "REGIONS" | "REGION" + +NotKeywordToken: + "ADDDATE" | "BIT_AND" | "BIT_OR" | "BIT_XOR" | "CAST" | "COPY" | "COUNT" | "CURTIME" | "DATE_ADD" | "DATE_SUB" | "EXTRACT" | "GET_FORMAT" | "GROUP_CONCAT" +| "INPLACE" | "INSTANT" | "INTERNAL" |"MIN" | "MAX" | "MAX_EXECUTION_TIME" | "NOW" | "RECENT" | "POSITION" | "SUBDATE" | "SUBSTRING" | "SUM" +| "STD" | "STDDEV" | "STDDEV_POP" | "STDDEV_SAMP" | "VARIANCE" | "VAR_POP" | "VAR_SAMP" +| "TIMESTAMPADD" | "TIMESTAMPDIFF" | "TOKUDB_DEFAULT" | "TOKUDB_FAST" | "TOKUDB_LZMA" | "TOKUDB_QUICKLZ" | "TOKUDB_SNAPPY" | "TOKUDB_SMALL" | "TOKUDB_UNCOMPRESSED" | "TOKUDB_ZLIB" | "TOP" | "TRIM" | "NEXT_ROW_ID" +| "EXPR_PUSHDOWN_BLACKLIST" | "OPT_RULE_BLACKLIST" | "BOUND" | "EXACT" | "STALENESS" | "STRONG" | "FLASHBACK" + +/************************************************************************************ + * + * Insert Statements + * + **********************************************************************************/ +InsertIntoStmt: + "INSERT" PriorityOpt IntoOpt TableName InsertValues + { + x := $5.(*ast.InsertStmt) + x.Priority = $2.(mysql.PriorityEnum) + // Wraps many layers here so that it can be processed the same way as select statement. + ts := &ast.TableSource{Source: $4.(*ast.TableName)} + x.Table = &ast.TableRefsClause{TableRefs: &ast.Join{Left: ts}} + $$ = x + } + +IntoOpt: + {} +| "INTO" + +InsertValues: + '(' ColumnNameListOpt ')' ValueSym ValuesList + { + $$ = &ast.InsertStmt{ + Columns: $2.([]*ast.ColumnName), + Lists: $5.([][]ast.ExprNode), + } + } +| '(' ColumnNameListOpt ')' SelectStmt + { + $$ = &ast.InsertStmt{Columns: $2.([]*ast.ColumnName), Select: $4.(*ast.SelectStmt)} + } +| '(' ColumnNameListOpt ')' '(' SelectStmt ')' + { + $$ = &ast.InsertStmt{Columns: $2.([]*ast.ColumnName), Select: $5.(*ast.SelectStmt)} + } +| ValueSym ValuesList %prec insertValues + { + $$ = &ast.InsertStmt{Lists: $2.([][]ast.ExprNode)} + } +| '(' SelectStmt ')' + { + $$ = &ast.InsertStmt{Select: $2.(*ast.SelectStmt)} + } +| SelectStmt + { + $$ = &ast.InsertStmt{Select: $1.(*ast.SelectStmt)} + } +| "SET" ColumnSetValueList + { + $$ = &ast.InsertStmt{Setlist: $2.([]*ast.Assignment)} + } + +ValueSym: +"VALUE" | "VALUES" + +ValuesList: + RowValue + { + $$ = [][]ast.ExprNode{$1.([]ast.ExprNode)} + } +| ValuesList ',' RowValue + { + $$ = append($1.([][]ast.ExprNode), $3.([]ast.ExprNode)) + } + +RowValue: + '(' ValuesOpt ')' + { + $$ = $2 + } + +ValuesOpt: + { + $$ = []ast.ExprNode{} + } +| Values + +Values: + Values ',' ExprOrDefault + { + $$ = append($1.([]ast.ExprNode), $3) + } +| ExprOrDefault + { + $$ = []ast.ExprNode{$1} + } + +ExprOrDefault: + Expression +| "DEFAULT" + { + $$ = &ast.DefaultExpr{} + } + +ColumnSetValue: + ColumnName eq ExprOrDefault + { + $$ = &ast.Assignment{ + Column: $1.(*ast.ColumnName), + Expr: $3, + } + } + +ColumnSetValueList: + { + $$ = []*ast.Assignment{} + } +| ColumnSetValue + { + $$ = []*ast.Assignment{$1.(*ast.Assignment)} + } +| ColumnSetValueList ',' ColumnSetValue + { + $$ = append($1.([]*ast.Assignment), $3.(*ast.Assignment)) + } + +/***********************************Insert Statements END************************************/ + +/************************************************************************************ + * Replace Statements + * See https://dev.mysql.com/doc/refman/5.7/en/replace.html + * + * TODO: support PARTITION + **********************************************************************************/ +ReplaceIntoStmt: + "REPLACE" PriorityOpt IntoOpt TableName InsertValues + { + x := $5.(*ast.InsertStmt) + x.IsReplace = true + x.Priority = $2.(mysql.PriorityEnum) + ts := &ast.TableSource{Source: $4.(*ast.TableName)} + x.Table = &ast.TableRefsClause{TableRefs: &ast.Join{Left: ts}} + $$ = x + } + +/***********************************Replace Statements END************************************/ + +Literal: + "FALSE" + { + $$ = ast.NewValueExpr(false) + } +| "NULL" + { + $$ = ast.NewValueExpr(nil) + } +| "TRUE" + { + $$ = ast.NewValueExpr(true) + } +| floatLit + { + $$ = ast.NewValueExpr($1) + } +| decLit + { + $$ = ast.NewValueExpr($1) + } +| intLit + { + $$ = ast.NewValueExpr($1) + } +| StringLiteral %prec lowerThanStringLitToken + { + $$ = $1 + } +| "UNDERSCORE_CHARSET" stringLit + { + // See https://dev.mysql.com/doc/refman/5.7/en/charset-literal.html + co, err := charset.GetDefaultCollation($1) + if err != nil { + yylex.AppendError(yylex.Errorf("Get collation error for charset: %s", $1)) + return 1 + } + expr := ast.NewValueExpr($2) + tp := expr.GetType() + tp.Charset = $1 + tp.Collate = co + if tp.Collate == charset.CollationBin { + tp.Flag |= mysql.BinaryFlag + } + $$ = expr + } +| hexLit + { + $$ = ast.NewValueExpr($1) + } +| bitLit + { + $$ = ast.NewValueExpr($1) + } + +StringLiteral: + stringLit + { + expr := ast.NewValueExpr($1) + $$ = expr + } +| StringLiteral stringLit + { + valExpr := $1.(ast.ValueExpr) + strLit := valExpr.GetString() + expr := ast.NewValueExpr(strLit+$2) + // Fix #4239, use first string literal as projection name. + if valExpr.GetProjectionOffset() >= 0 { + expr.SetProjectionOffset(valExpr.GetProjectionOffset()) + } else { + expr.SetProjectionOffset(len(strLit)) + } + $$ = expr + } + +OrderBy: + "ORDER" "BY" ByList + { + $$ = &ast.OrderByClause{Items: $3.([]*ast.ByItem)} + } + +ByList: + ByItem + { + $$ = []*ast.ByItem{$1.(*ast.ByItem)} + } +| ByList ',' ByItem + { + $$ = append($1.([]*ast.ByItem), $3.(*ast.ByItem)) + } + +ByItem: + Expression Order + { + $$ = &ast.ByItem{Expr: $1, Desc: $2.(bool)} + } + +Order: + /* EMPTY */ + { + $$ = false // ASC by default + } +| "ASC" + { + $$ = false + } +| "DESC" + { + $$ = true + } + +OrderByOptional: + { + $$ = nil + } +| OrderBy + { + $$ = $1 + } + +BitExpr: + BitExpr '|' BitExpr %prec '|' + { + $$ = &ast.BinaryOperationExpr{Op: opcode.Or, L: $1, R: $3} + } +| BitExpr '&' BitExpr %prec '&' + { + $$ = &ast.BinaryOperationExpr{Op: opcode.And, L: $1, R: $3} + } +| BitExpr "<<" BitExpr %prec lsh + { + $$ = &ast.BinaryOperationExpr{Op: opcode.LeftShift, L: $1, R: $3} + } +| BitExpr ">>" BitExpr %prec rsh + { + $$ = &ast.BinaryOperationExpr{Op: opcode.RightShift, L: $1, R: $3} + } +| BitExpr '+' BitExpr %prec '+' + { + $$ = &ast.BinaryOperationExpr{Op: opcode.Plus, L: $1, R: $3} + } +| BitExpr '-' BitExpr %prec '-' + { + $$ = &ast.BinaryOperationExpr{Op: opcode.Minus, L: $1, R: $3} + } +| BitExpr '*' BitExpr %prec '*' + { + $$ = &ast.BinaryOperationExpr{Op: opcode.Mul, L: $1, R: $3} + } +| BitExpr '/' BitExpr %prec '/' + { + $$ = &ast.BinaryOperationExpr{Op: opcode.Div, L: $1, R: $3} + } +| BitExpr '%' BitExpr %prec '%' + { + $$ = &ast.BinaryOperationExpr{Op: opcode.Mod, L: $1, R: $3} + } +| BitExpr "DIV" BitExpr %prec div + { + $$ = &ast.BinaryOperationExpr{Op: opcode.IntDiv, L: $1, R: $3} + } +| BitExpr "MOD" BitExpr %prec mod + { + $$ = &ast.BinaryOperationExpr{Op: opcode.Mod, L: $1, R: $3} + } +| BitExpr '^' BitExpr + { + $$ = &ast.BinaryOperationExpr{Op: opcode.Xor, L: $1, R: $3} + } +| SimpleExpr + +SimpleIdent: + Identifier + { + $$ = &ast.ColumnNameExpr{Name: &ast.ColumnName{ + Name: model.NewCIStr($1), + }} + } +| Identifier '.' Identifier + { + $$ = &ast.ColumnNameExpr{Name: &ast.ColumnName{ + Table: model.NewCIStr($1), + Name: model.NewCIStr($3), + }} + } +| '.' Identifier '.' Identifier + { + $$ = &ast.ColumnNameExpr{Name: &ast.ColumnName{ + Table: model.NewCIStr($2), + Name: model.NewCIStr($4), + }} + } +| Identifier '.' Identifier '.' Identifier + { + $$ = &ast.ColumnNameExpr{Name: &ast.ColumnName{ + Schema: model.NewCIStr($1), + Table: model.NewCIStr($3), + Name: model.NewCIStr($5), + }} + } + +SimpleExpr: + SimpleIdent +| FunctionCallKeyword +| FunctionCallNonKeyword +| FunctionCallGeneric +| SimpleExpr "COLLATE" StringName %prec neg + { + // TODO: Create a builtin function hold expr and collation. When do evaluation, convert expr result using the collation. + $$ = $1 + } +| Literal +| Variable +| SumExpr +| '!' SimpleExpr %prec neg + { + $$ = &ast.UnaryOperationExpr{Op: opcode.Not, V: $2} + } +| '~' SimpleExpr %prec neg + { + $$ = &ast.UnaryOperationExpr{Op: opcode.BitNeg, V: $2} + } +| '-' SimpleExpr %prec neg + { + $$ = &ast.UnaryOperationExpr{Op: opcode.Minus, V: $2} + } +| '+' SimpleExpr %prec neg + { + $$ = &ast.UnaryOperationExpr{Op: opcode.Plus, V: $2} + } +| not2 SimpleExpr %prec neg + { + $$ = &ast.UnaryOperationExpr{Op: opcode.Not, V: $2} + } +| '(' Expression ')' { + startOffset := parser.startOffset(&yyS[yypt-1]) + endOffset := parser.endOffset(&yyS[yypt]) + expr := $2 + expr.SetText(parser.src[startOffset:endOffset]) + $$ = &ast.ParenthesesExpr{Expr: expr} + } +| '(' ExpressionList ',' Expression ')' + { + values := append($2.([]ast.ExprNode), $4) + $$ = &ast.RowExpr{Values: values} + } +| "ROW" '(' ExpressionList ',' Expression ')' + { + values := append($3.([]ast.ExprNode), $5) + $$ = &ast.RowExpr{Values: values} + } +| "CONVERT" '(' Expression "USING" CharsetName ')' + { + // See https://dev.mysql.com/doc/refman/5.7/en/cast-functions.html#function_convert + charset1 := ast.NewValueExpr($5) + $$ = &ast.FuncCallExpr{ + FnName: model.NewCIStr($1), + Args: []ast.ExprNode{$3, charset1}, + } + } +| "DEFAULT" '(' SimpleIdent ')' + { + $$ = &ast.DefaultExpr{Name: $3.(*ast.ColumnNameExpr).Name} + } +| "VALUES" '(' SimpleIdent ')' %prec lowerThanInsertValues + { + $$ = &ast.ValuesExpr{Column: $3.(*ast.ColumnNameExpr)} + } + +DistinctKwd: + "DISTINCT" +| "DISTINCTROW" + +DistinctOpt: + "ALL" + { + $$ = false + } +| DistinctKwd + { + $$ = true + } + +DefaultFalseDistinctOpt: + { + $$ = false + } +| DistinctOpt + +DefaultTrueDistinctOpt: + { + $$ = true + } +| DistinctOpt + + +FunctionNameConflict: + "ASCII" +| "CHARSET" +| "COALESCE" +| "COLLATION" +| "DATE" +| "DATABASE" +| "DAY" +| "HOUR" +| "IF" +| "INTERVAL" %prec lowerThanIntervalKeyword +| "FORMAT" +| "LEFT" +| "MICROSECOND" +| "MINUTE" +| "MONTH" +| builtinNow +| "QUARTER" +| "REPEAT" +| "REPLACE" +| "REVERSE" +| "RIGHT" +| "ROW_COUNT" +| "SECOND" +| "TIME" +| "TIMESTAMP" +| "TRUNCATE" +| "USER" +| "WEEK" +| "YEAR" + +OptionalBraces: + {} | '(' ')' {} + +FunctionNameOptionalBraces: + "CURRENT_USER" +| "CURRENT_DATE" +| "CURRENT_ROLE" +| "UTC_DATE" + +FunctionNameDatetimePrecision: + "CURRENT_TIME" +| "CURRENT_TIMESTAMP" +| "LOCALTIME" +| "LOCALTIMESTAMP" +| "UTC_TIME" +| "UTC_TIMESTAMP" + +FunctionCallKeyword: + FunctionNameConflict '(' ExpressionListOpt ')' + { + $$ = &ast.FuncCallExpr{FnName: model.NewCIStr($1), Args: $3.([]ast.ExprNode)} + } +| builtinUser '(' ExpressionListOpt ')' + { + $$ = &ast.FuncCallExpr{FnName: model.NewCIStr($1), Args: $3.([]ast.ExprNode)} + } +| FunctionNameOptionalBraces OptionalBraces + { + $$ = &ast.FuncCallExpr{FnName: model.NewCIStr($1)} + } +| builtinCurDate '(' ')' + { + $$ = &ast.FuncCallExpr{FnName: model.NewCIStr($1)} + } +| FunctionNameDatetimePrecision FuncDatetimePrec + { + args := []ast.ExprNode{} + if $2 != nil { + args = append(args, $2.(ast.ExprNode)) + } + $$ = &ast.FuncCallExpr{FnName: model.NewCIStr($1), Args: args} + } +| "MOD" '(' BitExpr ',' BitExpr ')' + { + $$ = &ast.BinaryOperationExpr{Op: opcode.Mod, L: $3, R: $5} + } + +FunctionCallNonKeyword: + builtinCurTime '(' FuncDatetimePrecListOpt ')' + { + $$ = &ast.FuncCallExpr{FnName: model.NewCIStr($1), Args: $3.([]ast.ExprNode)} + } +| builtinSysDate '(' FuncDatetimePrecListOpt ')' + { + $$ = &ast.FuncCallExpr{FnName: model.NewCIStr($1), Args: $3.([]ast.ExprNode)} + } +| builtinPosition '(' BitExpr "IN" Expression ')' + { + $$ = &ast.FuncCallExpr{FnName: model.NewCIStr($1), Args: []ast.ExprNode{$3, $5}} + } +| builtinSubstring '(' Expression ',' Expression ')' + { + $$ = &ast.FuncCallExpr{ + FnName: model.NewCIStr($1), + Args: []ast.ExprNode{$3, $5}, + } + } +| builtinSubstring '(' Expression "FROM" Expression ')' + { + $$ = &ast.FuncCallExpr{ + FnName: model.NewCIStr($1), + Args: []ast.ExprNode{$3, $5}, + } + } +| builtinSubstring '(' Expression ',' Expression ',' Expression ')' + { + $$ = &ast.FuncCallExpr{ + FnName: model.NewCIStr($1), + Args: []ast.ExprNode{$3, $5, $7}, + } + } +| builtinSubstring '(' Expression "FROM" Expression "FOR" Expression ')' + { + $$ = &ast.FuncCallExpr{ + FnName: model.NewCIStr($1), + Args: []ast.ExprNode{$3, $5, $7}, + } + } +| builtinTrim '(' Expression ')' + { + $$ = &ast.FuncCallExpr{ + FnName: model.NewCIStr($1), + Args: []ast.ExprNode{$3}, + } + } +| builtinTrim '(' Expression "FROM" Expression ')' + { + $$ = &ast.FuncCallExpr{ + FnName: model.NewCIStr($1), + Args: []ast.ExprNode{$5, $3}, + } + } + +FunctionNameDateArith: + builtinDateAdd +| builtinDateSub + + +FunctionNameDateArithMultiForms: + builtinAddDate +| builtinSubDate + +SumExpr: + "AVG" '(' Expression ')' + { + $$ = &ast.AggregateFuncExpr{F: $1, Args: []ast.ExprNode{$3}} + } +| builtinCount '(' Expression ')' + { + $$ = &ast.AggregateFuncExpr{F: $1, Args: []ast.ExprNode{$3}} + } +| builtinCount '(' '*' ')' + { + args := []ast.ExprNode{ast.NewValueExpr(1)} + $$ = &ast.AggregateFuncExpr{F: $1, Args: args,} + } +| builtinMax '(' Expression ')' + { + $$ = &ast.AggregateFuncExpr{F: $1, Args: []ast.ExprNode{$3}} + } +| builtinMin '(' Expression ')' + { + $$ = &ast.AggregateFuncExpr{F: $1, Args: []ast.ExprNode{$3}} + } +| builtinSum '(' Expression ')' + { + $$ = &ast.AggregateFuncExpr{F: $1, Args: []ast.ExprNode{$3}} + } + +OptGConcatSeparator: + { + $$ = ast.NewValueExpr(",") + } +| "SEPARATOR" stringLit + { + $$ = ast.NewValueExpr($2) + } + + +FunctionCallGeneric: + identifier '(' ExpressionListOpt ')' + { + $$ = &ast.FuncCallExpr{FnName: model.NewCIStr($1), Args: $3.([]ast.ExprNode)} + } + +FuncDatetimePrec: + { + $$ = nil + } +| '(' ')' + { + $$ = nil + } +| '(' intLit ')' + { + expr := ast.NewValueExpr($2) + $$ = expr + } + +ExpressionOpt: + { + $$ = nil + } +| Expression + { + $$ = $1 + } + +CastType: + "BINARY" OptFieldLen + { + x := types.NewFieldType(mysql.TypeVarString) + x.Flen = $2.(int) // TODO: Flen should be the flen of expression + if x.Flen != types.UnspecifiedLength { + x.Tp = mysql.TypeString + } + x.Charset = charset.CharsetBin + x.Collate = charset.CollationBin + x.Flag |= mysql.BinaryFlag + $$ = x + } +| "CHAR" OptFieldLen OptBinary + { + x := types.NewFieldType(mysql.TypeVarString) + x.Flen = $2.(int) // TODO: Flen should be the flen of expression + x.Charset = $3.(*ast.OptBinary).Charset + if $3.(*ast.OptBinary).IsBinary{ + x.Flag |= mysql.BinaryFlag + } + if x.Charset == "" { + x.Charset = mysql.DefaultCharset + x.Collate = mysql.DefaultCollationName + } + $$ = x + } +| "DATE" + { + x := types.NewFieldType(mysql.TypeDate) + x.Charset = charset.CharsetBin + x.Collate = charset.CollationBin + x.Flag |= mysql.BinaryFlag + $$ = x + } +| "DATETIME" OptFieldLen + { + x := types.NewFieldType(mysql.TypeDatetime) + x.Flen, _ = mysql.GetDefaultFieldLengthAndDecimalForCast(mysql.TypeDatetime) + x.Decimal = $2.(int) + if x.Decimal > 0 { + x.Flen = x.Flen + 1 + x.Decimal + } + x.Charset = charset.CharsetBin + x.Collate = charset.CollationBin + x.Flag |= mysql.BinaryFlag + $$ = x + } +| "DECIMAL" FloatOpt + { + fopt := $2.(*ast.FloatOpt) + x := types.NewFieldType(mysql.TypeNewDecimal) + x.Flen = fopt.Flen + x.Decimal = fopt.Decimal + x.Charset = charset.CharsetBin + x.Collate = charset.CollationBin + x.Flag |= mysql.BinaryFlag + $$ = x + } +| "TIME" OptFieldLen + { + x := types.NewFieldType(mysql.TypeDuration) + x.Flen, _ = mysql.GetDefaultFieldLengthAndDecimalForCast(mysql.TypeDuration) + x.Decimal = $2.(int) + if x.Decimal > 0 { + x.Flen = x.Flen + 1 + x.Decimal + } + x.Charset = charset.CharsetBin + x.Collate = charset.CollationBin + x.Flag |= mysql.BinaryFlag + $$ = x + } +| "SIGNED" OptInteger + { + x := types.NewFieldType(mysql.TypeLonglong) + x.Charset = charset.CharsetBin + x.Collate = charset.CollationBin + x.Flag |= mysql.BinaryFlag + $$ = x + } +| "UNSIGNED" OptInteger + { + x := types.NewFieldType(mysql.TypeLonglong) + x.Flag |= mysql.UnsignedFlag | mysql.BinaryFlag + x.Charset = charset.CharsetBin + x.Collate = charset.CollationBin + $$ = x + } +| "JSON" + { + x := types.NewFieldType(mysql.TypeJSON) + x.Flag |= mysql.BinaryFlag | (mysql.ParseToJSONFlag) + x.Charset = mysql.DefaultCharset + x.Collate = mysql.DefaultCollationName + $$ = x + } +| "DOUBLE" + { + x := types.NewFieldType(mysql.TypeDouble) + x.Flen, x.Decimal = mysql.GetDefaultFieldLengthAndDecimalForCast(mysql.TypeDouble) + x.Flag |= mysql.BinaryFlag + x.Charset = charset.CharsetBin + x.Collate = charset.CollationBin + $$ = x + } +| "FLOAT" FloatOpt + { + x := types.NewFieldType(mysql.TypeFloat) + fopt := $2.(*ast.FloatOpt) + if fopt.Flen >= 54 { + yylex.AppendError(ErrTooBigPrecision.GenWithStackByArgs(fopt.Flen,"CAST",53)) + } else if fopt.Flen >= 25 { + x = types.NewFieldType(mysql.TypeDouble) + } + x.Flen, x.Decimal = mysql.GetDefaultFieldLengthAndDecimalForCast(x.Tp) + x.Flag |= mysql.BinaryFlag + x.Charset = charset.CharsetBin + x.Collate = charset.CollationBin + $$ = x + } +| "REAL" + { + var x *types.FieldType + if parser.lexer.GetSQLMode().HasRealAsFloatMode() { + x = types.NewFieldType(mysql.TypeFloat) + } else { + x = types.NewFieldType(mysql.TypeDouble) + } + x.Flen, x.Decimal = mysql.GetDefaultFieldLengthAndDecimalForCast(x.Tp) + x.Flag |= mysql.BinaryFlag + x.Charset = charset.CharsetBin + x.Collate = charset.CollationBin + $$ = x + } + +PriorityOpt: + { + $$ = mysql.NoPriority + } +| "LOW_PRIORITY" + { + $$ = mysql.LowPriority + } +| "HIGH_PRIORITY" + { + $$ = mysql.HighPriority + } +| "DELAYED" + { + $$ = mysql.DelayedPriority + } + +TableName: + Identifier + { + $$ = &ast.TableName{Name:model.NewCIStr($1)} + } +| Identifier '.' Identifier + { + $$ = &ast.TableName{Schema:model.NewCIStr($1), Name:model.NewCIStr($3)} + } + +TableNameList: + TableName + { + tbl := []*ast.TableName{$1.(*ast.TableName)} + $$ = tbl + } +| TableNameList ',' TableName + { + $$ = append($1.([]*ast.TableName), $3.(*ast.TableName)) + } + +TableNameOptWild: + Identifier OptWild + { + $$ = &ast.TableName{Name:model.NewCIStr($1)} + } +| Identifier '.' Identifier OptWild + { + $$ = &ast.TableName{Schema:model.NewCIStr($1), Name:model.NewCIStr($3)} + } + +TableAliasRefList: + TableNameOptWild + { + tbl := []*ast.TableName{$1.(*ast.TableName)} + $$ = tbl + } +| TableAliasRefList ',' TableNameOptWild + { + $$ = append($1.([]*ast.TableName), $3.(*ast.TableName)) + } + +OptWild: + %prec empty + { + } +| '.' '*' + { + } + +QuickOptional: + %prec empty + { + $$ = false + } +| "QUICK" + { + $$ = true + } + +RollbackStmt: + "ROLLBACK" + { + $$ = &ast.RollbackStmt{} + } + +SelectStmtBasic: + "SELECT" SelectStmtOpts SelectStmtFieldList + { + st := &ast.SelectStmt { + SelectStmtOpts: $2.(*ast.SelectStmtOpts), + Distinct: $2.(*ast.SelectStmtOpts).Distinct, + Fields: $3.(*ast.FieldList), + } + if st.SelectStmtOpts.TableHints != nil { + st.TableHints = st.SelectStmtOpts.TableHints + } + $$ = st + } + +SelectStmtFromDualTable: + SelectStmtBasic FromDual WhereClauseOptional + { + st := $1.(*ast.SelectStmt) + lastField := st.Fields.Fields[len(st.Fields.Fields)-1] + if lastField.Expr != nil && lastField.AsName.O == "" { + lastEnd := yyS[yypt-1].offset-1 + lastField.SetText(parser.src[lastField.Offset:lastEnd]) + } + if $3 != nil { + st.Where = $3.(ast.ExprNode) + } + } + +SelectStmtFromTable: + SelectStmtBasic "FROM" + TableRefsClause WhereClauseOptional SelectStmtGroup HavingClause + { + st := $1.(*ast.SelectStmt) + st.From = $3.(*ast.TableRefsClause) + lastField := st.Fields.Fields[len(st.Fields.Fields)-1] + if lastField.Expr != nil && lastField.AsName.O == "" { + lastEnd := parser.endOffset(&yyS[yypt-4]) + lastField.SetText(parser.src[lastField.Offset:lastEnd]) + } + if $4 != nil { + st.Where = $4.(ast.ExprNode) + } + if $5 != nil { + st.GroupBy = $5.(*ast.GroupByClause) + } + if $6 != nil { + st.Having = $6.(*ast.HavingClause) + } + $$ = st + } + +SelectStmt: + SelectStmtBasic OrderByOptional SelectStmtLimit + { + st := $1.(*ast.SelectStmt) + lastField := st.Fields.Fields[len(st.Fields.Fields)-1] + if lastField.Expr != nil && lastField.AsName.O == "" { + src := parser.src + var lastEnd int + if $2 != nil { + lastEnd = yyS[yypt-1].offset-1 + } else if $3 != nil { + lastEnd = yyS[yypt-0].offset-1 + } else { + lastEnd = len(src) + if src[lastEnd-1] == ';' { + lastEnd-- + } + } + lastField.SetText(src[lastField.Offset:lastEnd]) + } + if $2 != nil { + st.OrderBy = $2.(*ast.OrderByClause) + } + if $3 != nil { + st.Limit = $3.(*ast.Limit) + } + $$ = st + } +| SelectStmtFromDualTable OrderByOptional SelectStmtLimit + { + st := $1.(*ast.SelectStmt) + if $2 != nil { + st.OrderBy = $2.(*ast.OrderByClause) + } + if $3 != nil { + st.Limit = $3.(*ast.Limit) + } + $$ = st + } +| SelectStmtFromTable OrderByOptional SelectStmtLimit + { + st := $1.(*ast.SelectStmt) + if $2 != nil { + st.OrderBy = $2.(*ast.OrderByClause) + } + if $3 != nil { + st.Limit = $3.(*ast.Limit) + } + $$ = st + } + +FromDual: + "FROM" "DUAL" + +TableRefsClause: + TableRefs + { + $$ = &ast.TableRefsClause{TableRefs: $1.(*ast.Join)} + } + +TableRefs: + EscapedTableRef + { + if j, ok := $1.(*ast.Join); ok { + // if $1 is Join, use it directly + $$ = j + } else { + $$ = &ast.Join{Left: $1.(ast.ResultSetNode), Right: nil} + } + } +| TableRefs ',' EscapedTableRef + { + /* from a, b is default cross join */ + $$ = &ast.Join{Left: $1.(ast.ResultSetNode), Right: $3.(ast.ResultSetNode), Tp: ast.CrossJoin} + } + +EscapedTableRef: + TableRef %prec lowerThanSetKeyword + { + $$ = $1 + } +| '{' Identifier TableRef '}' + { + /* + * ODBC escape syntax for outer join is { OJ join_table } + * Use an Identifier for OJ + */ + $$ = $3 + } + +TableRef: + TableFactor + { + $$ = $1 + } +| JoinTable + { + $$ = $1 + } + +TableFactor: + TableName TableAsNameOpt IndexHintListOpt + { + tn := $1.(*ast.TableName) + tn.IndexHints = $3.([]*ast.IndexHint) + $$ = &ast.TableSource{Source: tn, AsName: $2.(model.CIStr)} + } +| '(' SelectStmt ')' TableAsName + { + st := $2.(*ast.SelectStmt) + endOffset := parser.endOffset(&yyS[yypt-1]) + parser.setLastSelectFieldText(st, endOffset) + $$ = &ast.TableSource{Source: $2.(*ast.SelectStmt), AsName: $4.(model.CIStr)} + } +| '(' TableRefs ')' + { + $$ = $2 + } + +TableAsNameOpt: + { + $$ = model.CIStr{} + } +| TableAsName + { + $$ = $1 + } + +TableAsName: + Identifier + { + $$ = model.NewCIStr($1) + } +| "AS" Identifier + { + $$ = model.NewCIStr($2) + } + +IndexHintType: + "USE" KeyOrIndex + { + $$ = ast.HintUse + } +| "IGNORE" KeyOrIndex + { + $$ = ast.HintIgnore + } +| "FORCE" KeyOrIndex + { + $$ = ast.HintForce + } + +IndexHintScope: + { + $$ = ast.HintForScan + } +| "FOR" "JOIN" + { + $$ = ast.HintForJoin + } +| "FOR" "ORDER" "BY" + { + $$ = ast.HintForOrderBy + } +| "FOR" "GROUP" "BY" + { + $$ = ast.HintForGroupBy + } + + +IndexHint: + IndexHintType IndexHintScope '(' IndexNameList ')' + { + $$ = &ast.IndexHint{ + IndexNames: $4.([]model.CIStr), + HintType: $1.(ast.IndexHintType), + HintScope: $2.(ast.IndexHintScope), + } + } + +IndexNameList: + { + var nameList []model.CIStr + $$ = nameList + } +| Identifier + { + $$ = []model.CIStr{model.NewCIStr($1)} + } +| IndexNameList ',' Identifier + { + $$ = append($1.([]model.CIStr), model.NewCIStr($3)) + } +| "PRIMARY" + { + $$ = []model.CIStr{model.NewCIStr($1)} + } +| IndexNameList ',' "PRIMARY" + { + $$ = append($1.([]model.CIStr), model.NewCIStr($3)) + } + +IndexHintList: + IndexHint + { + $$ = []*ast.IndexHint{$1.(*ast.IndexHint)} + } +| IndexHintList IndexHint + { + $$ = append($1.([]*ast.IndexHint), $2.(*ast.IndexHint)) + } + +IndexHintListOpt: + { + var hintList []*ast.IndexHint + $$ = hintList + } +| IndexHintList + { + $$ = $1 + } + +JoinTable: + /* Use %prec to evaluate production TableRef before cross join */ + TableRef CrossOpt TableRef %prec tableRefPriority + { + $$ = &ast.Join{Left: $1.(ast.ResultSetNode), Right: $3.(ast.ResultSetNode), Tp: ast.CrossJoin} + } + /* Your code here. */ + +JoinType: + "LEFT" + { + $$ = ast.LeftJoin + } +| "RIGHT" + { + $$ = ast.RightJoin + } + +OuterOpt: + {} +| "OUTER" + +CrossOpt: + "JOIN" +| "INNER" "JOIN" + + +LimitClause: + { + $$ = nil + } +| "LIMIT" LimitOption + { + $$ = &ast.Limit{Count: $2.(ast.ValueExpr)} + } + +LimitOption: + LengthNum + { + $$ = ast.NewValueExpr($1) + } + +SelectStmtLimit: + { + $$ = nil + } +| "LIMIT" LimitOption + { + $$ = &ast.Limit{Count: $2.(ast.ExprNode)} + } +| "LIMIT" LimitOption ',' LimitOption + { + $$ = &ast.Limit{Offset: $2.(ast.ExprNode), Count: $4.(ast.ExprNode)} + } +| "LIMIT" LimitOption "OFFSET" LimitOption + { + $$ = &ast.Limit{Offset: $4.(ast.ExprNode), Count: $2.(ast.ExprNode)} + } + + +SelectStmtOpts: + TableOptimizerHints DefaultFalseDistinctOpt PriorityOpt SelectStmtSQLSmallResult SelectStmtSQLBigResult SelectStmtSQLBufferResult SelectStmtSQLCache SelectStmtCalcFoundRows SelectStmtStraightJoin + { + opt := &ast.SelectStmtOpts{} + if $1 != nil { + opt.TableHints = $1.([]*ast.TableOptimizerHint) + } + if $2 != nil { + opt.Distinct = $2.(bool) + } + if $3 != nil { + opt.Priority = $3.(mysql.PriorityEnum) + } + if $4 != nil { + opt.SQLSmallResult = $4.(bool) + } + if $5 != nil { + opt.SQLBigResult = $5.(bool) + } + if $6 != nil { + opt.SQLBufferResult = $6.(bool) + } + if $7 != nil { + opt.SQLCache = $7.(bool) + } + if $8 != nil { + opt.CalcFoundRows = $8.(bool) + } + if $9 != nil { + opt.StraightJoin = $9.(bool) + } + + $$ = opt + } + +TableOptimizerHints: + /* empty */ + { + $$ = nil + } +| hintBegin OptimizerHintList hintEnd + { + $$ = $2 + } +| hintBegin error hintEnd + { + yyerrok() + parser.lastErrorAsWarn() + $$ = nil + } + +OptimizerHintList: + TableOptimizerHintOpt + { + $$ = []*ast.TableOptimizerHint{$1.(*ast.TableOptimizerHint)} + } +| StorageOptimizerHintOpt + { + $$ = $1.([]*ast.TableOptimizerHint) + } +| OptimizerHintList TableOptimizerHintOpt + { + $$ = append($1.([]*ast.TableOptimizerHint), $2.(*ast.TableOptimizerHint)) + } +| OptimizerHintList ',' TableOptimizerHintOpt + { + $$ = append($1.([]*ast.TableOptimizerHint), $3.(*ast.TableOptimizerHint)) + } +| OptimizerHintList StorageOptimizerHintOpt + { + $$ = append($1.([]*ast.TableOptimizerHint), $2.([]*ast.TableOptimizerHint)...) + } +| OptimizerHintList ',' StorageOptimizerHintOpt + { + $$ = append($1.([]*ast.TableOptimizerHint), $3.([]*ast.TableOptimizerHint)...) + } + +TableOptimizerHintOpt: + hintUseIndex '(' QueryBlockOpt HintTable IndexNameList ')' + { + $$ = &ast.TableOptimizerHint{ + HintName: model.NewCIStr($1), + QBName: $3.(model.CIStr), + Tables: []ast.HintTable{$4.(ast.HintTable)}, + Indexes: $5.([]model.CIStr), + } + } +| hintIgnoreIndex '(' QueryBlockOpt HintTable IndexNameList ')' + { + $$ = &ast.TableOptimizerHint{ + HintName: model.NewCIStr($1), + QBName: $3.(model.CIStr), + Tables: []ast.HintTable{$4.(ast.HintTable)}, + Indexes: $5.([]model.CIStr), + } + } +| hintSMJ '(' QueryBlockOpt HintTableList ')' + { + $$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), QBName: $3.(model.CIStr), Tables: $4.([]ast.HintTable)} + } +| hintINLJ '(' QueryBlockOpt HintTableList ')' + { + $$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), QBName: $3.(model.CIStr), Tables: $4.([]ast.HintTable)} + } +| hintINLHJ '(' QueryBlockOpt HintTableList ')' + { + $$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), QBName: $3.(model.CIStr), Tables: $4.([]ast.HintTable)} + } +| hintSJI '(' QueryBlockOpt HintTableList ')' + { + $$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), QBName: $3.(model.CIStr), Tables: $4.([]ast.HintTable)} + } +| hintNSJI '(' QueryBlockOpt HintTableList ')' + { + $$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), QBName: $3.(model.CIStr), Tables: $4.([]ast.HintTable)} + } +| hintINLMJ '(' QueryBlockOpt HintTableList ')' + { + $$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), QBName: $3.(model.CIStr), Tables: $4.([]ast.HintTable)} + } +| hintHJ '(' QueryBlockOpt HintTableList ')' + { + $$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), QBName: $3.(model.CIStr), Tables: $4.([]ast.HintTable)} + } +| hintUseIndexMerge '(' QueryBlockOpt HintTable IndexNameList ')' + { + $$ = &ast.TableOptimizerHint{ + HintName: model.NewCIStr($1), + QBName: $3.(model.CIStr), + Tables: []ast.HintTable{$4.(ast.HintTable)}, + Indexes: $5.([]model.CIStr), + } + } +| hintUseToja '(' QueryBlockOpt HintTrueOrFalse ')' + { + $$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), QBName: $3.(model.CIStr), HintFlag: $4.(bool)} + } +| hintEnablePlanCache '(' QueryBlockOpt HintTrueOrFalse ')' + { + $$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), QBName: $3.(model.CIStr), HintFlag: $4.(bool)} + } +| maxExecutionTime '(' QueryBlockOpt NUM ')' + { + $$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), QBName: $3.(model.CIStr), MaxExecutionTime: getUint64FromNUM($4)} + } +| hintUsePlanCache '(' QueryBlockOpt ')' + { + // arguments not decided yet. + $$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), QBName: $3.(model.CIStr)} + } +| hintQueryType '(' QueryBlockOpt HintQueryType ')' + { + $$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), QBName: $3.(model.CIStr), QueryType: model.NewCIStr($4.(string))} + } +| hintMemoryQuota '(' QueryBlockOpt HintMemoryQuota ')' + { + $$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), QBName: $3.(model.CIStr), MemoryQuota: $4.(int64)} + } +| hintHASHAGG '(' QueryBlockOpt ')' + { + $$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), QBName: $3.(model.CIStr)} + } +| hintSTREAMAGG '(' QueryBlockOpt ')' + { + $$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), QBName: $3.(model.CIStr)} + } +| hintAggToCop '(' QueryBlockOpt ')' + { + $$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), QBName: $3.(model.CIStr)} + } +| hintNoIndexMerge '(' QueryBlockOpt ')' + { + $$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), QBName: $3.(model.CIStr)} + } +| hintReadConsistentReplica '(' QueryBlockOpt ')' + { + $$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), QBName: $3.(model.CIStr)} + } +| hintQBName '(' Identifier ')' + { + $$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), QBName: model.NewCIStr($3)} + } + +StorageOptimizerHintOpt: + hintReadFromStorage '(' QueryBlockOpt HintStorageTypeAndTableList ')' + { + $$ = $4.([]*ast.TableOptimizerHint) + for _, hint := range $$.([]*ast.TableOptimizerHint) { + hint.HintName = model.NewCIStr($1) + hint.QBName = $3.(model.CIStr) + } + } + +HintStorageTypeAndTableList: + HintStorageTypeAndTable + { + $$ = []*ast.TableOptimizerHint{$1.(*ast.TableOptimizerHint)} + } +| HintStorageTypeAndTableList ',' HintStorageTypeAndTable + { + $$ = append($1.([]*ast.TableOptimizerHint), $3.(*ast.TableOptimizerHint)) + } + +HintStorageTypeAndTable: + HintStorageType '[' HintTableList ']' + { + $$ = &ast.TableOptimizerHint{ + StoreType: model.NewCIStr($1.(string)), + Tables: $3.([]ast.HintTable), + } + } + +QueryBlockOpt: + { + $$ = model.NewCIStr("") + } +| singleAtIdentifier + { + $$ = model.NewCIStr($1) + } + +HintTable: + Identifier QueryBlockOpt + { + $$ = ast.HintTable{TableName: model.NewCIStr($1), QBName: $2.(model.CIStr)} + } + | Identifier '.' Identifier QueryBlockOpt + { + $$ = ast.HintTable{DBName: model.NewCIStr($1), TableName: model.NewCIStr($3), QBName: $4.(model.CIStr)} + } + +HintTableList: + HintTable + { + $$ = []ast.HintTable{$1.(ast.HintTable)} + } +| HintTableList ',' HintTable + { + $$ = append($1.([]ast.HintTable), $3.(ast.HintTable)) + } + +HintTrueOrFalse: + "TRUE" + { + $$ = true + } +| "FALSE" + { + $$ = false + } + +HintStorageType: + hintTiKV + { + $$ = $1 + } +| hintTiFlash + { + $$ = $1 + } + +HintQueryType: + hintOLAP + { + $$ = $1 + } +| hintOLTP + { + $$ = $1 + } + +HintMemoryQuota: + NUM Identifier + { + switch model.NewCIStr($2).L { + case "mb": + $$ = $1.(int64) * 1024 * 1024 + case "gb": + $$ = $1.(int64) * 1024 * 1024 * 1024 + default: + // Executor handle memory quota < 0 as no memory limit, here use it to trigger warning in TiDB. + $$ = int64(-1) + } + } + +SelectStmtCalcFoundRows: + { + $$ = false + } +| "SQL_CALC_FOUND_ROWS" + { + $$ = true + } +SelectStmtSQLBigResult: + %prec empty + { + $$ = false + } +| "SQL_BIG_RESULT" + { + $$ = true + } +SelectStmtSQLBufferResult: + %prec empty + { + $$ = false + } +| "SQL_BUFFER_RESULT" + { + $$ = true + } +SelectStmtSQLCache: + %prec empty + { + $$ = true + } +| "SQL_CACHE" + { + $$ = true + } +| "SQL_NO_CACHE" + { + $$ = false + } +SelectStmtSQLSmallResult: + %prec empty + { + $$ = false + } +| "SQL_SMALL_RESULT" + { + $$ = true + } +SelectStmtStraightJoin: + %prec empty + { + $$ = false + } +| "STRAIGHT_JOIN" + { + $$ = true + } + +SelectStmtFieldList: + FieldList + { + $$ = &ast.FieldList{Fields: $1.([]*ast.SelectField)} + } + +SelectStmtGroup: + /* EMPTY */ + { + $$ = nil + } +| GroupByClause + +/********************Set Statement*******************************/ +SetStmt: + "SET" VariableAssignmentList + { + $$ = &ast.SetStmt{Variables: $2.([]*ast.VariableAssignment)} + } + +SetExpr: + "ON" + { + $$ = ast.NewValueExpr("ON") + } +| ExprOrDefault + +EqOrAssignmentEq: + eq | assignmentEq + +VariableName: + Identifier +| Identifier '.' Identifier + { + $$ = $1 + "." + $3 + } + +VariableAssignment: + VariableName EqOrAssignmentEq SetExpr + { + $$ = &ast.VariableAssignment{Name: $1, Value: $3, IsSystem: true} + } +| "GLOBAL" VariableName EqOrAssignmentEq SetExpr + { + $$ = &ast.VariableAssignment{Name: $2, Value: $4, IsGlobal: true, IsSystem: true} + } +| "SESSION" VariableName EqOrAssignmentEq SetExpr + { + $$ = &ast.VariableAssignment{Name: $2, Value: $4, IsSystem: true} + } +| "LOCAL" VariableName EqOrAssignmentEq Expression + { + $$ = &ast.VariableAssignment{Name: $2, Value: $4, IsSystem: true} + } +| doubleAtIdentifier EqOrAssignmentEq SetExpr + { + v := strings.ToLower($1) + var isGlobal bool + if strings.HasPrefix(v, "@@global.") { + isGlobal = true + v = strings.TrimPrefix(v, "@@global.") + } else if strings.HasPrefix(v, "@@session.") { + v = strings.TrimPrefix(v, "@@session.") + } else if strings.HasPrefix(v, "@@local.") { + v = strings.TrimPrefix(v, "@@local.") + } else if strings.HasPrefix(v, "@@") { + v = strings.TrimPrefix(v, "@@") + } + $$ = &ast.VariableAssignment{Name: v, Value: $3, IsGlobal: isGlobal, IsSystem: true} + } +| singleAtIdentifier EqOrAssignmentEq Expression + { + v := $1 + v = strings.TrimPrefix(v, "@") + $$ = &ast.VariableAssignment{Name: v, Value: $3} + } + +CharsetNameOrDefault: + CharsetName + { + $$ = ast.NewValueExpr($1.(string)) + } +| "DEFAULT" + { + $$ = &ast.DefaultExpr{} + } + +CharsetName: + StringName + { + // Validate input charset name to keep the same behavior as parser of MySQL. + name, _, err := charset.GetCharsetInfo($1.(string)) + if err != nil { + yylex.AppendError(ErrUnknownCharacterSet.GenWithStackByArgs($1)) + return 1 + } + // Use charset name returned from charset.GetCharsetInfo(), + // to keep lower case of input for generated column restore. + $$ = name + } +| binaryType + { + $$ = charset.CharsetBin + } + +CollationName: + StringName + { + info, err := charset.GetCollationByName($1.(string)) + if err != nil { + yylex.AppendError(err) + return 1 + } + $$ = info.Name + } + +VariableAssignmentList: + { + $$ = []*ast.VariableAssignment{} + } +| VariableAssignment + { + $$ = []*ast.VariableAssignment{$1.(*ast.VariableAssignment)} + } +| VariableAssignmentList ',' VariableAssignment + { + $$ = append($1.([]*ast.VariableAssignment), $3.(*ast.VariableAssignment)) + } + +Variable: + SystemVariable | UserVariable + +SystemVariable: + doubleAtIdentifier + { + v := strings.ToLower($1) + var isGlobal bool + explicitScope := true + if strings.HasPrefix(v, "@@global.") { + isGlobal = true + v = strings.TrimPrefix(v, "@@global.") + } else if strings.HasPrefix(v, "@@session.") { + v = strings.TrimPrefix(v, "@@session.") + } else if strings.HasPrefix(v, "@@local.") { + v = strings.TrimPrefix(v, "@@local.") + } else if strings.HasPrefix(v, "@@") { + v, explicitScope = strings.TrimPrefix(v, "@@"), false + } + $$ = &ast.VariableExpr{Name: v, IsGlobal: isGlobal, IsSystem: true, ExplicitScope: explicitScope} + } + +UserVariable: + singleAtIdentifier + { + v := $1 + v = strings.TrimPrefix(v, "@") + $$ = &ast.VariableExpr{Name: v, IsGlobal: false, IsSystem: false} + } + +/****************************Admin Statement*******************************/ +AdminStmt: + "ADMIN" "SHOW" "DDL" + { + $$ = &ast.AdminStmt{Tp: ast.AdminShowDDL} + } +| "ADMIN" "SHOW" "DDL" "JOBS" WhereClauseOptional + { + stmt := &ast.AdminStmt{Tp: ast.AdminShowDDLJobs} + if $5 != nil { + stmt.Where = $5.(ast.ExprNode) + } + $$ = stmt + } +| "ADMIN" "SHOW" "DDL" "JOBS" NUM WhereClauseOptional + { + stmt := &ast.AdminStmt{ + Tp: ast.AdminShowDDLJobs, + JobNumber: $5.(int64), + } + if $6 != nil { + stmt.Where = $6.(ast.ExprNode) + } + $$ = stmt + } + +/****************************Show Statement*******************************/ +ShowStmt: + "SHOW" ShowTargetFilterable ShowLikeOrWhereOpt + { + stmt := $2.(*ast.ShowStmt) + if $3 != nil { + stmt.Where = $3.(ast.ExprNode) + } + $$ = stmt + } +| "SHOW" "CREATE" "TABLE" TableName + { + $$ = &ast.ShowStmt{ + Tp: ast.ShowCreateTable, + Table: $4.(*ast.TableName), + } + } +| "SHOW" "CREATE" "DATABASE" IfNotExists DBName + { + $$ = &ast.ShowStmt{ + Tp: ast.ShowCreateDatabase, + IfNotExists: $4.(bool), + DBName: $5.(string), + } + } +| "SHOW" OptFull "PROCESSLIST" + { + $$ = &ast.ShowStmt{ + Tp: ast.ShowProcessList, + Full: $2.(bool), + } + } + +ShowIndexKwd: + "INDEX" +| "INDEXES" +| "KEYS" + +FromOrIn: +"FROM" | "IN" + +ShowTargetFilterable: + "DATABASES" + { + $$ = &ast.ShowStmt{Tp: ast.ShowDatabases} + } +| OptFull "TABLES" ShowDatabaseNameOpt + { + $$ = &ast.ShowStmt{ + Tp: ast.ShowTables, + DBName: $3.(string), + Full: $1.(bool), + } + } +| "WARNINGS" + { + $$ = &ast.ShowStmt{Tp: ast.ShowWarnings} + } +| "ERRORS" + { + $$ = &ast.ShowStmt{Tp: ast.ShowErrors} + } +| GlobalScope "VARIABLES" + { + $$ = &ast.ShowStmt{ + Tp: ast.ShowVariables, + GlobalScope: $1.(bool), + } + } + +ShowLikeOrWhereOpt: + { + $$ = nil + } +| "WHERE" Expression + { + $$ = $2 + } + +GlobalScope: + { + $$ = false + } +| "GLOBAL" + { + $$ = true + } +| "SESSION" + { + $$ = false + } + +OptFull: + { + $$ = false + } +| "FULL" + { + $$ = true + } + +ShowDatabaseNameOpt: + { + $$ = "" + } +| FromOrIn DBName + { + $$ = $2.(string) + } + +ShowTableAliasOpt: + FromOrIn TableName + { + $$ = $2.(*ast.TableName) + } + +TableNameListOpt: + %prec empty + { + $$ = []*ast.TableName{} + } +| TableNameList + { + $$ = $1 + } + + +Statement: + EmptyStmt +| AdminStmt +| AlterTableStmt +| AnalyzeTableStmt +| BeginTransactionStmt +| CommitStmt +| DeleteFromStmt +| ExplainStmt +| CreateDatabaseStmt +| CreateIndexStmt +| CreateTableStmt +| DropDatabaseStmt +| DropIndexStmt +| DropTableStmt +| InsertIntoStmt +| RollbackStmt +| ReplaceIntoStmt +| SelectStmt +| SetStmt +| ShowStmt +| TruncateTableStmt +| UseStmt + +ExplainableStmt: + SelectStmt +| DeleteFromStmt +| InsertIntoStmt +| ReplaceIntoStmt + +StatementList: + Statement + { + if $1 != nil { + s := $1 + if lexer, ok := yylex.(stmtTexter); ok { + s.SetText(lexer.stmtText()) + } + parser.result = append(parser.result, s) + } + } +| StatementList ';' Statement + { + if $3 != nil { + s := $3 + if lexer, ok := yylex.(stmtTexter); ok { + s.SetText(lexer.stmtText()) + } + parser.result = append(parser.result, s) + } + } + +Constraint: + ConstraintKeywordOpt ConstraintElem + { + cst := $2.(*ast.Constraint) + if $1 != nil { + cst.Name = $1.(string) + } + $$ = cst + } + +TableElement: + ColumnDef + { + $$ = $1.(*ast.ColumnDef) + } +| Constraint + { + $$ = $1.(*ast.Constraint) + } + +TableElementList: + TableElement + { + if $1 != nil { + $$ = []interface{}{$1.(interface{})} + } else { + $$ = []interface{}{} + } + } +| TableElementList ',' TableElement + { + if $3 != nil { + $$ = append($1.([]interface{}), $3) + } else { + $$ = $1 + } + } + +TableElementListOpt: + /* empty */ %prec lowerThanCreateTableSelect + { + var columnDefs []*ast.ColumnDef + var constraints []*ast.Constraint + $$ = &ast.CreateTableStmt{ + Cols: columnDefs, + Constraints: constraints, + } + } +| '(' TableElementList ')' + { + tes := $2.([]interface {}) + var columnDefs []*ast.ColumnDef + var constraints []*ast.Constraint + for _, te := range tes { + switch te := te.(type) { + case *ast.ColumnDef: + columnDefs = append(columnDefs, te) + case *ast.Constraint: + constraints = append(constraints, te) + } + } + $$ = &ast.CreateTableStmt{ + Cols: columnDefs, + Constraints: constraints, + } + } + +OptTable: + {} +| "TABLE" + +TruncateTableStmt: + "TRUNCATE" OptTable TableName + { + $$ = &ast.TruncateTableStmt{Table: $3.(*ast.TableName)} + } + +/*************************************Type Begin***************************************/ +Type: + NumericType + { + $$ = $1 + } +| StringType + { + $$ = $1 + } +| DateAndTimeType + { + $$ = $1 + } + +NumericType: + IntegerType OptFieldLen FieldOpts + { + // TODO: check flen 0 + x := types.NewFieldType($1.(byte)) + x.Flen = $2.(int) + for _, o := range $3.([]*ast.TypeOpt) { + if o.IsUnsigned { + x.Flag |= mysql.UnsignedFlag + } + if o.IsZerofill { + x.Flag |= mysql.ZerofillFlag + } + } + $$ = x + } +| BooleanType FieldOpts + { + // TODO: check flen 0 + x := types.NewFieldType($1.(byte)) + x.Flen = 1 + for _, o := range $2.([]*ast.TypeOpt) { + if o.IsUnsigned { + x.Flag |= mysql.UnsignedFlag + } + if o.IsZerofill { + x.Flag |= mysql.ZerofillFlag + } + } + $$ = x + } +| FixedPointType FloatOpt FieldOpts + { + fopt := $2.(*ast.FloatOpt) + x := types.NewFieldType($1.(byte)) + x.Flen = fopt.Flen + x.Decimal = fopt.Decimal + for _, o := range $3.([]*ast.TypeOpt) { + if o.IsUnsigned { + x.Flag |= mysql.UnsignedFlag + } + if o.IsZerofill { + x.Flag |= mysql.ZerofillFlag + } + } + $$ = x + } +| FloatingPointType FloatOpt FieldOpts + { + fopt := $2.(*ast.FloatOpt) + x := types.NewFieldType($1.(byte)) + x.Flen = fopt.Flen + if x.Tp == mysql.TypeFloat && fopt.Decimal == types.UnspecifiedLength && x.Flen <= mysql.MaxDoublePrecisionLength { + if x.Flen > mysql.MaxFloatPrecisionLength { + x.Tp = mysql.TypeDouble + } + x.Flen = types.UnspecifiedLength + } + x.Decimal = fopt.Decimal + for _, o := range $3.([]*ast.TypeOpt) { + if o.IsUnsigned { + x.Flag |= mysql.UnsignedFlag + } + if o.IsZerofill { + x.Flag |= mysql.ZerofillFlag + } + } + $$ = x + } +| BitValueType OptFieldLen + { + x := types.NewFieldType($1.(byte)) + x.Flen = $2.(int) + if x.Flen == types.UnspecifiedLength { + x.Flen = 1 + } + $$ = x + } + +IntegerType: + "TINYINT" + { + $$ = mysql.TypeTiny + } +| "SMALLINT" + { + $$ = mysql.TypeShort + } +| "MEDIUMINT" + { + $$ = mysql.TypeInt24 + } +| "INT" + { + $$ = mysql.TypeLong + } +| "INT1" + { + $$ = mysql.TypeTiny + } +| "INT2" + { + $$ = mysql.TypeShort + } +| "INT3" + { + $$ = mysql.TypeInt24 + } +| "INT4" + { + $$ = mysql.TypeLong + } +| "INT8" + { + $$ = mysql.TypeLonglong + } +| "INTEGER" + { + $$ = mysql.TypeLong + } +| "BIGINT" + { + $$ = mysql.TypeLonglong + } + + +BooleanType: + "BOOL" + { + $$ = mysql.TypeTiny + } +| "BOOLEAN" + { + $$ = mysql.TypeTiny + } + +OptInteger: + {} +| "INTEGER" +| "INT" + +FixedPointType: + "DECIMAL" + { + $$ = mysql.TypeNewDecimal + } +| "NUMERIC" + { + $$ = mysql.TypeNewDecimal + } +| "FIXED" + { + $$ = mysql.TypeNewDecimal + } + +FloatingPointType: + "FLOAT" + { + $$ = mysql.TypeFloat + } +| "REAL" + { + if parser.lexer.GetSQLMode().HasRealAsFloatMode() { + $$ = mysql.TypeFloat + } else { + $$ = mysql.TypeDouble + } + } +| "DOUBLE" + { + $$ = mysql.TypeDouble + } +| "DOUBLE" "PRECISION" + { + $$ = mysql.TypeDouble + } + +BitValueType: + "BIT" + { + $$ = mysql.TypeBit + } + +StringType: + Char FieldLen OptBinary + { + x := types.NewFieldType(mysql.TypeString) + x.Flen = $2.(int) + x.Charset = $3.(*ast.OptBinary).Charset + if $3.(*ast.OptBinary).IsBinary { + x.Flag |= mysql.BinaryFlag + } + $$ = x + } +| Char OptBinary + { + x := types.NewFieldType(mysql.TypeString) + x.Charset = $2.(*ast.OptBinary).Charset + if $2.(*ast.OptBinary).IsBinary { + x.Flag |= mysql.BinaryFlag + } + $$ = x + } +| NChar FieldLen OptBinary + { + x := types.NewFieldType(mysql.TypeString) + x.Flen = $2.(int) + x.Charset = $3.(*ast.OptBinary).Charset + if $3.(*ast.OptBinary).IsBinary { + x.Flag |= mysql.BinaryFlag + } + $$ = x + } +| NChar OptBinary + { + x := types.NewFieldType(mysql.TypeString) + x.Charset = $2.(*ast.OptBinary).Charset + if $2.(*ast.OptBinary).IsBinary { + x.Flag |= mysql.BinaryFlag + } + $$ = x + } +| Varchar FieldLen OptBinary + { + x := types.NewFieldType(mysql.TypeVarchar) + x.Flen = $2.(int) + x.Charset = $3.(*ast.OptBinary).Charset + if $3.(*ast.OptBinary).IsBinary { + x.Flag |= mysql.BinaryFlag + } + $$ = x + } +| NVarchar FieldLen OptBinary + { + x := types.NewFieldType(mysql.TypeVarchar) + x.Flen = $2.(int) + x.Charset = $3.(*ast.OptBinary).Charset + if $3.(*ast.OptBinary).IsBinary { + x.Flag |= mysql.BinaryFlag + } + $$ = x + } +| "BINARY" OptFieldLen + { + x := types.NewFieldType(mysql.TypeString) + x.Flen = $2.(int) + x.Charset = charset.CharsetBin + x.Collate = charset.CharsetBin + x.Flag |= mysql.BinaryFlag + $$ = x + } +| "VARBINARY" FieldLen + { + x := types.NewFieldType(mysql.TypeVarchar) + x.Flen = $2.(int) + x.Charset = charset.CharsetBin + x.Collate = charset.CharsetBin + x.Flag |= mysql.BinaryFlag + $$ = x + } +| BlobType + { + x := $1.(*types.FieldType) + x.Charset = charset.CharsetBin + x.Collate = charset.CharsetBin + x.Flag |= mysql.BinaryFlag + $$ = $1.(*types.FieldType) + } +| TextType OptCharsetWithOptBinary + { + x := $1.(*types.FieldType) + x.Charset = $2.(*ast.OptBinary).Charset + if $2.(*ast.OptBinary).IsBinary { + x.Flag |= mysql.BinaryFlag + } + $$ = x + } +| "ENUM" '(' StringList ')' OptCharset + { + x := types.NewFieldType(mysql.TypeEnum) + x.Elems = $3.([]string) + x.Charset = $5.(string) + $$ = x + } +| "SET" '(' StringList ')' OptCharset + { + x := types.NewFieldType(mysql.TypeSet) + x.Elems = $3.([]string) + x.Charset = $5.(string) + $$ = x + } +| "JSON" + { + x := types.NewFieldType(mysql.TypeJSON) + x.Decimal = 0 + x.Charset = charset.CharsetBin + x.Collate = charset.CollationBin + $$ = x + } +| "LONG" Varchar OptCharsetWithOptBinary + { + x := types.NewFieldType(mysql.TypeMediumBlob) + x.Charset = $3.(*ast.OptBinary).Charset + if $3.(*ast.OptBinary).IsBinary { + x.Flag |= mysql.BinaryFlag + } + $$ = x + } +| "LONG" OptCharsetWithOptBinary + { + x := types.NewFieldType(mysql.TypeMediumBlob) + x.Charset = $2.(*ast.OptBinary).Charset + if $2.(*ast.OptBinary).IsBinary { + x.Flag |= mysql.BinaryFlag + } + $$ = x + } + +Char: + "CHARACTER" +| "CHAR" + +NChar: + "NCHAR" +| "NATIONAL" "CHARACTER" +| "NATIONAL" "CHAR" + +Varchar: + "CHARACTER" "VARYING" +| "CHAR" "VARYING" +| "VARCHAR" +| "VARCHARACTER" + +NVarchar: + "NATIONAL" "VARCHAR" +| "NATIONAL" "VARCHARACTER" +| "NVARCHAR" +| "NCHAR" "VARCHAR" +| "NCHAR" "VARCHARACTER" +| "NATIONAL" "CHARACTER" "VARYING" +| "NATIONAL" "CHAR" "VARYING" +| "NCHAR" "VARYING" + +Year: + "YEAR" +| "SQL_TSI_YEAR" + + +BlobType: + "TINYBLOB" + { + x := types.NewFieldType(mysql.TypeTinyBlob) + $$ = x + } +| "BLOB" OptFieldLen + { + x := types.NewFieldType(mysql.TypeBlob) + x.Flen = $2.(int) + $$ = x + } +| "MEDIUMBLOB" + { + x := types.NewFieldType(mysql.TypeMediumBlob) + $$ = x + } +| "LONGBLOB" + { + x := types.NewFieldType(mysql.TypeLongBlob) + $$ = x + } +| "LONG" "VARBINARY" + { + x := types.NewFieldType(mysql.TypeMediumBlob) + $$ = x + } + +TextType: + "TINYTEXT" + { + x := types.NewFieldType(mysql.TypeTinyBlob) + $$ = x + + } +| "TEXT" OptFieldLen + { + x := types.NewFieldType(mysql.TypeBlob) + x.Flen = $2.(int) + $$ = x + } +| "MEDIUMTEXT" + { + x := types.NewFieldType(mysql.TypeMediumBlob) + $$ = x + } +| "LONGTEXT" + { + x := types.NewFieldType(mysql.TypeLongBlob) + $$ = x + } + +OptCharsetWithOptBinary: + OptBinary + { + $$ = $1 + } +| "ASCII" + { + $$ = &ast.OptBinary{ + IsBinary: false, + Charset: charset.CharsetLatin1, + } + } +| "UNICODE" + { + name, _, err := charset.GetCharsetInfo("ucs2") + if err != nil { + yylex.AppendError(ErrUnknownCharacterSet.GenWithStackByArgs("ucs2")) + return 1 + } + $$ = &ast.OptBinary{ + IsBinary: false, + Charset: name, + } + } +| "BYTE" + { + $$ = &ast.OptBinary{ + IsBinary: false, + Charset: "", + } + } + +DateAndTimeType: + "DATE" + { + x := types.NewFieldType(mysql.TypeDate) + $$ = x + } +| "DATETIME" OptFieldLen + { + x := types.NewFieldType(mysql.TypeDatetime) + x.Flen = mysql.MaxDatetimeWidthNoFsp + x.Decimal = $2.(int) + if x.Decimal > 0 { + x.Flen = x.Flen + 1 + x.Decimal + } + $$ = x + } +| "TIMESTAMP" OptFieldLen + { + x := types.NewFieldType(mysql.TypeTimestamp) + x.Flen = mysql.MaxDatetimeWidthNoFsp + x.Decimal = $2.(int) + if x.Decimal > 0 { + x.Flen = x.Flen + 1 + x.Decimal + } + $$ = x + } +| "TIME" OptFieldLen + { + x := types.NewFieldType(mysql.TypeDuration) + x.Flen = mysql.MaxDurationWidthNoFsp + x.Decimal = $2.(int) + if x.Decimal > 0 { + x.Flen = x.Flen + 1 + x.Decimal + } + $$ = x + } +| Year OptFieldLen FieldOpts + { + x := types.NewFieldType(mysql.TypeYear) + x.Flen = $2.(int) + if x.Flen != types.UnspecifiedLength && x.Flen != 4 { + yylex.AppendError(ErrInvalidYearColumnLength.GenWithStackByArgs()) + return -1 + } + $$ = x + } + +FieldLen: + '(' LengthNum ')' + { + $$ = int($2.(uint64)) + } + +OptFieldLen: + { + $$ = types.UnspecifiedLength + } +| FieldLen + { + $$ = $1.(int) + } + +FieldOpt: + "UNSIGNED" + { + $$ = &ast.TypeOpt{IsUnsigned: true} + } +| "SIGNED" + { + $$ = &ast.TypeOpt{IsUnsigned: false} + } +| "ZEROFILL" + { + $$ = &ast.TypeOpt{IsZerofill: true, IsUnsigned: true} + } + +FieldOpts: + { + $$ = []*ast.TypeOpt{} + } +| FieldOpts FieldOpt + { + $$ = append($1.([]*ast.TypeOpt), $2.(*ast.TypeOpt)) + } + +FloatOpt: + { + $$ = &ast.FloatOpt{Flen: types.UnspecifiedLength, Decimal: types.UnspecifiedLength} + } +| FieldLen + { + $$ = &ast.FloatOpt{Flen: $1.(int), Decimal: types.UnspecifiedLength} + } +| Precision + { + $$ = $1.(*ast.FloatOpt) + } + +Precision: + '(' LengthNum ',' LengthNum ')' + { + $$ = &ast.FloatOpt{Flen: int($2.(uint64)), Decimal: int($4.(uint64))} + } + +OptBinMod: + { + $$ = false + } +| "BINARY" + { + $$ = true + } + +OptBinary: + { + $$ = &ast.OptBinary{ + IsBinary: false, + Charset: "", + } + } +| "BINARY" OptCharset + { + $$ = &ast.OptBinary{ + IsBinary: true, + Charset: $2.(string), + } + } +| CharsetKw CharsetName OptBinMod + { + $$ = &ast.OptBinary{ + IsBinary: $3.(bool), + Charset: $2.(string), + } + } + +OptCharset: + { + $$ = "" + } +| CharsetKw CharsetName + { + $$ = $2.(string) + } + +CharsetKw: + "CHARACTER" "SET" +| "CHARSET" +| "CHAR" "SET" + +OptCollate: + { + $$ = "" + } +| "COLLATE" CollationName + { + $$ = $2.(string) + } + +StringList: + stringLit + { + $$ = []string{$1} + } +| StringList ',' stringLit + { + $$ = append($1.([]string), $3) + } + +StringName: + stringLit + { + $$ = $1 + } +| Identifier + { + $$ = $1 + } + +UseStmt: + "USE" DBName + { + $$ = &ast.UseStmt{DBName: $2.(string)} + } + +WhereClause: + "WHERE" Expression + { + $$ = $2 + } + +WhereClauseOptional: + { + $$ = nil + } +| WhereClause + { + $$ = $1 + } + +CommaOpt: + {} +| ',' + {} diff --git a/parser/parser_test.go b/parser/parser_test.go new file mode 100644 index 0000000..701d39e --- /dev/null +++ b/parser/parser_test.go @@ -0,0 +1,1494 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser_test + +import ( + "fmt" + "runtime" + "strings" + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/parser" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/types" + driver "github.com/pingcap/tidb/types/parser_driver" +) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + TestingT(t) +} + +var _ = Suite(&testParserSuite{}) + +type testParserSuite struct { +} + +func (s *testParserSuite) TestSimple(c *C) { + parser := parser.New() + + reservedKws := []string{ + "add", "all", "alter", "analyze", "and", "as", "asc", "between", "bigint", + "binary", "blob", "both", "by", "cascade", "case", "change", "character", "check", "collate", + "column", "constraint", "convert", "create", "cross", "current_date", "current_time", + "current_timestamp", "current_user", "database", "databases", "day_hour", "day_microsecond", + "day_minute", "day_second", "decimal", "default", "delete", "desc", "describe", + "distinct", "distinctRow", "div", "double", "drop", "dual", "else", "enclosed", "escaped", + "exists", "explain", "false", "float", "for", "force", "foreign", "from", + "fulltext", "grant", "group", "having", "hour_microsecond", "hour_minute", + "hour_second", "if", "ignore", "in", "index", "infile", "inner", "insert", "int", "into", "integer", + "interval", "is", "join", "key", "keys", "kill", "leading", "left", "like", "limit", "lines", "load", + "localtime", "localtimestamp", "lock", "longblob", "longtext", "mediumblob", "maxvalue", "mediumint", "mediumtext", + "minute_microsecond", "minute_second", "mod", "not", "no_write_to_binlog", "null", "numeric", + "on", "option", "optionally", "or", "order", "outer", "partition", "precision", "primary", "procedure", "range", "read", "real", + "references", "regexp", "rename", "repeat", "replace", "revoke", "restrict", "right", "rlike", + "schema", "schemas", "second_microsecond", "select", "set", "show", "smallint", + "starting", "table", "terminated", "then", "tinyblob", "tinyint", "tinytext", "to", + "trailing", "true", "union", "unique", "unlock", "unsigned", + "update", "use", "using", "utc_date", "values", "varbinary", "varchar", + "when", "where", "write", "xor", "year_month", "zerofill", + "generated", "virtual", "stored", "usage", + "delayed", "high_priority", "low_priority", + "cumeDist", "denseRank", "firstValue", "lag", "lastValue", "lead", "nthValue", "ntile", + "over", "percentRank", "rank", "row", "rows", "rowNumber", "window", "linear", + "match", "language", "until", + // TODO: support the following keywords + // "with", + } + for _, kw := range reservedKws { + src := fmt.Sprintf("SELECT * FROM db.%s;", kw) + _, err := parser.ParseOneStmt(src, "", "") + c.Assert(err, IsNil, Commentf("source %s", src)) + + src = fmt.Sprintf("SELECT * FROM %s.desc", kw) + _, err = parser.ParseOneStmt(src, "", "") + c.Assert(err, IsNil, Commentf("source %s", src)) + + src = fmt.Sprintf("SELECT t.%s FROM t", kw) + _, err = parser.ParseOneStmt(src, "", "") + c.Assert(err, IsNil, Commentf("source %s", src)) + } + + // Testcase for unreserved keywords + unreservedKws := []string{ + "auto_increment", "after", "begin", "bit", "bool", "boolean", "charset", "columns", "commit", + "date", "datediff", "datetime", "deallocate", "do", "from_days", "end", "engine", "engines", "execute", "extended", "first", "full", + "local", "names", "offset", "password", "prepare", "quick", "rollback", "session", "signed", + "start", "global", "tables", "tablespace", "text", "time", "timestamp", "tidb", "transaction", "truncate", "unknown", + "value", "warnings", "year", "now", "substr", "subpartition", "subpartitions", "substring", "mode", "any", "some", "user", "identified", + "collation", "comment", "avg_row_length", "checksum", "compression", "connection", "key_block_size", + "max_rows", "min_rows", "national", "quarter", "escape", "grants", "status", "fields", "triggers", + "delay_key_write", "isolation", "partitions", "repeatable", "committed", "uncommitted", "only", "serializable", "level", + "curtime", "variables", "dayname", "version", "btree", "hash", "row_format", "dynamic", "fixed", "compressed", + "compact", "redundant", "sql_no_cache sql_no_cache", "sql_cache sql_cache", "action", "round", + "enable", "disable", "reverse", "space", "privileges", "get_lock", "release_lock", "sleep", "no", "greatest", "least", + "binlog", "hex", "unhex", "function", "indexes", "from_unixtime", "processlist", "events", "less", "than", "timediff", + "ln", "log", "log2", "log10", "timestampdiff", "pi", "quote", "none", "super", "shared", "exclusive", + "always", "stats", "stats_meta", "stats_histogram", "stats_buckets", "stats_healthy", "tidb_version", "replication", "slave", "client", + "max_connections_per_hour", "max_queries_per_hour", "max_updates_per_hour", "max_user_connections", "event", "reload", "routine", "temporary", + "following", "preceding", "unbounded", "respect", "nulls", "current", "last", "against", "expansion", + } + for _, kw := range unreservedKws { + src := fmt.Sprintf("SELECT %s FROM tbl;", kw) + _, err := parser.ParseOneStmt(src, "", "") + c.Assert(err, IsNil, Commentf("source %s", src)) + } + + // Testcase for -- Comment and unary -- operator + src := "CREATE TABLE foo (a SMALLINT UNSIGNED, b INT UNSIGNED); -- foo\nSelect --1 from foo;" + stmts, _, err := parser.Parse(src, "", "") + c.Assert(err, IsNil) + c.Assert(stmts, HasLen, 2) + + // Testcase for /*! xx */ + // See http://dev.mysql.com/doc/refman/5.7/en/comments.html + // Fix: https://github.com/pingcap/tidb/issues/971 + src = "/*!40101 SET character_set_client = utf8 */;" + stmts, _, err = parser.Parse(src, "", "") + c.Assert(err, IsNil) + c.Assert(stmts, HasLen, 1) + stmt := stmts[0] + _, ok := stmt.(*ast.SetStmt) + c.Assert(ok, IsTrue) + + // for issue #2017 + src = "insert into blobtable (a) values ('/*! truncated */');" + stmt, err = parser.ParseOneStmt(src, "", "") + c.Assert(err, IsNil) + is, ok := stmt.(*ast.InsertStmt) + c.Assert(ok, IsTrue) + c.Assert(is.Lists, HasLen, 1) + c.Assert(is.Lists[0], HasLen, 1) + c.Assert(is.Lists[0][0].(ast.ValueExpr).GetDatumString(), Equals, "/*! truncated */") + + // for issue #961 + src = "create table t (c int key);" + st, err := parser.ParseOneStmt(src, "", "") + c.Assert(err, IsNil) + cs, ok := st.(*ast.CreateTableStmt) + c.Assert(ok, IsTrue) + c.Assert(cs.Cols, HasLen, 1) + c.Assert(cs.Cols[0].Options, HasLen, 1) + c.Assert(cs.Cols[0].Options[0].Tp, Equals, ast.ColumnOptionPrimaryKey) + + // for issue #4497 + src = "create table t1(a NVARCHAR(100));" + _, err = parser.ParseOneStmt(src, "", "") + c.Assert(err, IsNil) + + // for issue 2803 + src = "use quote;" + _, err = parser.ParseOneStmt(src, "", "") + c.Assert(err, IsNil) + + // issue #4354 + src = "select b'';" + _, err = parser.ParseOneStmt(src, "", "") + c.Assert(err, IsNil) + + src = "select B'';" + _, err = parser.ParseOneStmt(src, "", "") + c.Assert(err, IsNil) + + // src = "select 0b'';" + // _, err = parser.ParseOneStmt(src, "", "") + // c.Assert(err, NotNil) + + // for #4909, support numericType `signed` filedOpt. + src = "CREATE TABLE t(_sms smallint signed, _smu smallint unsigned);" + _, err = parser.ParseOneStmt(src, "", "") + c.Assert(err, IsNil) + + // for #7371, support NATIONAL CHARACTER + // reference link: https://dev.mysql.com/doc/refman/5.7/en/charset-national.html + src = "CREATE TABLE t(c1 NATIONAL CHARACTER(10));" + _, err = parser.ParseOneStmt(src, "", "") + c.Assert(err, IsNil) + + src = `CREATE TABLE t(a tinyint signed, + b smallint signed, + c mediumint signed, + d int signed, + e int1 signed, + f int2 signed, + g int3 signed, + h int4 signed, + i int8 signed, + j integer signed, + k bigint signed, + l bool signed, + m boolean signed + );` + + st, err = parser.ParseOneStmt(src, "", "") + c.Assert(err, IsNil) + ct, ok := st.(*ast.CreateTableStmt) + c.Assert(ok, IsTrue) + for _, col := range ct.Cols { + c.Assert(col.Tp.Flag&mysql.UnsignedFlag, Equals, uint(0)) + } + + // for issue #4006 + src = `insert into tb(v) (select v from tb);` + _, err = parser.ParseOneStmt(src, "", "") + c.Assert(err, IsNil) + + // for issue #9823 + src = "SELECT 9223372036854775807;" + st, err = parser.ParseOneStmt(src, "", "") + c.Assert(err, IsNil) + sel, ok := st.(*ast.SelectStmt) + c.Assert(ok, IsTrue) + expr := sel.Fields.Fields[0] + vExpr := expr.Expr.(*driver.ValueExpr) + c.Assert(vExpr.Kind(), Equals, types.KindInt64) + src = "SELECT 9223372036854775808;" + st, err = parser.ParseOneStmt(src, "", "") + c.Assert(err, IsNil) + sel, ok = st.(*ast.SelectStmt) + c.Assert(ok, IsTrue) + expr = sel.Fields.Fields[0] + vExpr = expr.Expr.(*driver.ValueExpr) + c.Assert(vExpr.Kind(), Equals, types.KindUint64) +} + +func (s *testParserSuite) TestSpecialComments(c *C) { + parser := parser.New() + + // 1. Make sure /*! ... */ respects the same SQL mode. + _, err := parser.ParseOneStmt(`SELECT /*! '\' */;`, "", "") + c.Assert(err, NotNil) + + parser.SetSQLMode(mysql.ModeNoBackslashEscapes) + st, err := parser.ParseOneStmt(`SELECT /*! '\' */;`, "", "") + c.Assert(err, IsNil) + c.Assert(st, FitsTypeOf, &ast.SelectStmt{}) + + // 2. Make sure multiple statements inside /*! ... */ will not crash + // (this is issue #330) + stmts, _, err := parser.Parse("/*! SET x = 1; SELECT 2 */", "", "") + c.Assert(err, IsNil) + c.Assert(stmts, HasLen, 2) + c.Assert(stmts[0], FitsTypeOf, &ast.SetStmt{}) + c.Assert(stmts[0].Text(), Equals, "SET x = 1;") + c.Assert(stmts[1], FitsTypeOf, &ast.SelectStmt{}) + c.Assert(stmts[1].Text(), Equals, "/*! SET x = 1; SELECT 2 */") + // ^ not sure if correct approach; having multiple statements in MySQL is a syntax error. + + // 3. Make sure invalid text won't cause infinite loop + // (this is issue #336) + st, err = parser.ParseOneStmt("SELECT /*+ 😅 */ SLEEP(1);", "", "") + c.Assert(err, IsNil) + sel, ok := st.(*ast.SelectStmt) + c.Assert(ok, IsTrue) + c.Assert(sel.TableHints, HasLen, 0) +} + +type testCase struct { + src string + ok bool + restore string +} + +type testErrMsgCase struct { + src string + ok bool + err error +} + +func (s *testParserSuite) RunTest(c *C, table []testCase) { + parser := parser.New() + for _, t := range table { + _, _, err := parser.Parse(t.src, "", "") + comment := Commentf("source %v", t.src) + if !t.ok { + c.Assert(err, NotNil, comment) + continue + } + c.Assert(err, IsNil, comment) + } +} + +func (s *testParserSuite) RunErrMsgTest(c *C, table []testErrMsgCase) { + parser := parser.New() + for _, t := range table { + _, _, err := parser.Parse(t.src, "", "") + comment := Commentf("source %v", t.src) + if t.err != nil { + c.Assert(terror.ErrorEqual(err, t.err), IsTrue, comment) + } else { + c.Assert(err, IsNil, comment) + } + } +} + +func (s *testParserSuite) TestDMLStmt(c *C) { + table := []testCase{ + {"", true, ""}, + {";", true, ""}, + {"INSERT INTO foo VALUES (1234)", true, "INSERT INTO `foo` VALUES (1234)"}, + {"INSERT INTO foo VALUES (1234, 5678)", true, "INSERT INTO `foo` VALUES (1234,5678)"}, + {"INSERT INTO t1 (SELECT * FROM t2)", true, "INSERT INTO `t1` SELECT * FROM `t2`"}, + // 15 + {"INSERT INTO foo VALUES (1 || 2)", true, "INSERT INTO `foo` VALUES (1 OR 2)"}, + {"INSERT INTO foo VALUES (1 | 2)", true, "INSERT INTO `foo` VALUES (1|2)"}, + {"INSERT INTO foo VALUES (false || true)", true, "INSERT INTO `foo` VALUES (FALSE OR TRUE)"}, + {"INSERT INTO foo VALUES (bar(5678))", true, "INSERT INTO `foo` VALUES (BAR(5678))"}, + // 20 + {"INSERT INTO foo VALUES ()", true, "INSERT INTO `foo` VALUES ()"}, + {"SELECT * FROM t", true, "SELECT * FROM `t`"}, + {"SELECT * FROM t AS u", true, "SELECT * FROM `t` AS `u`"}, + // 25 + {"SELECT * FROM t, v", true, "SELECT * FROM (`t`) JOIN `v`"}, + {"SELECT * FROM t AS u, v", true, "SELECT * FROM (`t` AS `u`) JOIN `v`"}, + {"SELECT * FROM t, v AS w", true, "SELECT * FROM (`t`) JOIN `v` AS `w`"}, + {"SELECT * FROM t AS u, v AS w", true, "SELECT * FROM (`t` AS `u`) JOIN `v` AS `w`"}, + {"SELECT * FROM foo, bar, foo", true, "SELECT * FROM ((`foo`) JOIN `bar`) JOIN `foo`"}, + // 30 + {"SELECT DISTINCTS * FROM t", false, ""}, + {"SELECT DISTINCT * FROM t", true, "SELECT DISTINCT * FROM `t`"}, + {"SELECT DISTINCTROW * FROM t", true, "SELECT DISTINCT * FROM `t`"}, + {"SELECT ALL * FROM t", true, "SELECT * FROM `t`"}, + {"SELECT DISTINCT ALL * FROM t", false, ""}, + {"SELECT DISTINCTROW ALL * FROM t", false, ""}, + {"INSERT INTO foo (a) VALUES (42)", true, "INSERT INTO `foo` (`a`) VALUES (42)"}, + {"INSERT INTO foo (a,) VALUES (42,)", false, ""}, + // 35 + {"INSERT INTO foo (a,b) VALUES (42,314)", true, "INSERT INTO `foo` (`a`,`b`) VALUES (42,314)"}, + {"INSERT INTO foo (a,b,) VALUES (42,314)", false, ""}, + {"INSERT INTO foo (a,b,) VALUES (42,314,)", false, ""}, + {"INSERT INTO foo () VALUES ()", true, "INSERT INTO `foo` () VALUES ()"}, + {"INSERT INTO foo VALUE ()", true, "INSERT INTO `foo` VALUES ()"}, + + // for issue 2402 + {"INSERT INTO tt VALUES (01000001783);", true, "INSERT INTO `tt` VALUES (1000001783)"}, + {"INSERT INTO tt VALUES (default);", true, "INSERT INTO `tt` VALUES (DEFAULT)"}, + + {"REPLACE INTO foo VALUES (1 || 2)", true, "REPLACE INTO `foo` VALUES (1 OR 2)"}, + {"REPLACE INTO foo VALUES (1 | 2)", true, "REPLACE INTO `foo` VALUES (1|2)"}, + {"REPLACE INTO foo VALUES (false || true)", true, "REPLACE INTO `foo` VALUES (FALSE OR TRUE)"}, + {"REPLACE INTO foo VALUES (bar(5678))", true, "REPLACE INTO `foo` VALUES (BAR(5678))"}, + {"REPLACE INTO foo VALUES ()", true, "REPLACE INTO `foo` VALUES ()"}, + {"REPLACE INTO foo (a,b) VALUES (42,314)", true, "REPLACE INTO `foo` (`a`,`b`) VALUES (42,314)"}, + {"REPLACE INTO foo (a,b,) VALUES (42,314)", false, ""}, + {"REPLACE INTO foo (a,b,) VALUES (42,314,)", false, ""}, + {"REPLACE INTO foo () VALUES ()", true, "REPLACE INTO `foo` () VALUES ()"}, + {"REPLACE INTO foo VALUE ()", true, "REPLACE INTO `foo` VALUES ()"}, + {"BEGIN", true, "START TRANSACTION"}, + // 45 + {"COMMIT", true, "COMMIT"}, + {"ROLLBACK", true, "ROLLBACK"}, + {`BEGIN; + INSERT INTO foo VALUES (42, 3.14); + INSERT INTO foo VALUES (-1, 2.78); + COMMIT;`, true, "START TRANSACTION; INSERT INTO `foo` VALUES (42,3.14); INSERT INTO `foo` VALUES (-1,2.78); COMMIT"}, + {`BEGIN; + INSERT INTO tmp SELECT * from bar; + SELECT * from tmp; + ROLLBACK;`, true, "START TRANSACTION; INSERT INTO `tmp` SELECT * FROM `bar`; SELECT * FROM `tmp`; ROLLBACK"}, + + // qualified select + {"SELECT a.b.c FROM t", true, "SELECT `a`.`b`.`c` FROM `t`"}, + {"SELECT a.b.*.c FROM t", false, ""}, + {"SELECT a.b.* FROM t", true, "SELECT `a`.`b`.* FROM `t`"}, + {"SELECT a FROM t", true, "SELECT `a` FROM `t`"}, + {"SELECT a.b.c.d FROM t", false, ""}, + + // from join + {"SELECT * from t1, t2, t3", true, "SELECT * FROM ((`t1`) JOIN `t2`) JOIN `t3`"}, + {"select * from t1 join t2 left join t3 on t2.id = t3.id", true, "SELECT * FROM (`t1` JOIN `t2`) LEFT JOIN `t3` ON `t2`.`id`=`t3`.`id`"}, + {"select * from t1 right join t2 on t1.id = t2.id left join t3 on t3.id = t2.id", true, "SELECT * FROM (`t1` RIGHT JOIN `t2` ON `t1`.`id`=`t2`.`id`) LEFT JOIN `t3` ON `t3`.`id`=`t2`.`id`"}, + {"select * from t1 right join t2 on t1.id = t2.id left join t3", false, ""}, + + // delete statement + // single table syntax + {"DELETE from t1", true, "DELETE FROM `t1`"}, + {"DELETE from t1.*", false, ""}, + {"DELETE LOW_priORITY from t1", true, "DELETE LOW_PRIORITY FROM `t1`"}, + {"DELETE quick from t1", true, "DELETE QUICK FROM `t1`"}, + {"DELETE FROM t1 WHERE t1.a > 0 ORDER BY t1.a", true, "DELETE FROM `t1` WHERE `t1`.`a`>0 ORDER BY `t1`.`a`"}, + {"delete from t1 where a=26", true, "DELETE FROM `t1` WHERE `a`=26"}, + {"DELETE from t1 where a=1 limit 1", true, "DELETE FROM `t1` WHERE `a`=1 LIMIT 1"}, + {"DELETE FROM t1 WHERE t1.a > 0 ORDER BY t1.a LIMIT 1", true, "DELETE FROM `t1` WHERE `t1`.`a`>0 ORDER BY `t1`.`a` LIMIT 1"}, + {"DELETE FROM x.y z WHERE z.a > 0", true, "DELETE FROM `x`.`y` AS `z` WHERE `z`.`a`>0"}, + {"DELETE FROM t1 AS w WHERE a > 0", true, "DELETE FROM `t1` AS `w` WHERE `a`>0"}, + + // for fail case + {"DELETE t1, t2 FROM t1 INNER JOIN t2 INNER JOIN t3 WHERE t1.id=t2.id AND t2.id=t3.id limit 10;", false, ""}, + {"DELETE t1, t2 FROM t1 INNER JOIN t2 INNER JOIN t3 WHERE t1.id=t2.id AND t2.id=t3.id order by t1.id;", false, ""}, + + // for admin + {"admin show ddl;", true, "ADMIN SHOW DDL"}, + {"admin show ddl jobs;", true, "ADMIN SHOW DDL JOBS"}, + {"admin show ddl jobs where id > 0;", true, "ADMIN SHOW DDL JOBS WHERE `id`>0"}, + {"admin show ddl jobs 20 where id=0;", true, "ADMIN SHOW DDL JOBS 20 WHERE `id`=0"}, + {"admin show ddl jobs -1;", false, ""}, + + // for insert ... set + {"INSERT INTO t SET a=1,b=2", true, "INSERT INTO `t` SET `a`=1,`b`=2"}, + {"INSERT INTO t (a) SET a=1", false, ""}, + + // for select with where clause + {"SELECT * FROM t WHERE 1 = 1", true, "SELECT * FROM `t` WHERE 1=1"}, + + // for dual + {"select 1 from dual", true, "SELECT 1"}, + {"select 1 from dual limit 1", true, "SELECT 1 LIMIT 1"}, + {"select 1 as a from dual order by a", true, "SELECT 1 AS `a` ORDER BY `a`"}, + {"select 1 order by 1", true, "SELECT 1 ORDER BY 1"}, + + // for https://github.com/pingcap/tidb/issues/1050 + {`SELECT /*!40001 SQL_NO_CACHE */ * FROM test WHERE 1 limit 0, 2000;`, true, "SELECT SQL_NO_CACHE * FROM `test` WHERE 1 LIMIT 0,2000"}, + + {`ANALYZE TABLE t`, true, "ANALYZE TABLE `t`"}, + + // for comments + {`/** 20180417 **/ show databases;`, true, "SHOW DATABASES"}, + {`/* 20180417 **/ show databases;`, true, "SHOW DATABASES"}, + {`/** 20180417 */ show databases;`, true, "SHOW DATABASES"}, + {`/** 20180417 ******/ show databases;`, true, "SHOW DATABASES"}, + } + s.RunTest(c, table) +} + +func (s *testParserSuite) TestSetVariable(c *C) { + table := []struct { + Input string + Name string + IsGlobal bool + IsSystem bool + }{ + + // Set system variable xx.xx, although xx.xx isn't a system variable, the parser should accept it. + {"set xx.xx = 666", "xx.xx", false, true}, + // Set session system variable xx.xx + {"set session xx.xx = 666", "xx.xx", false, true}, + {"set global xx.xx = 666", "xx.xx", true, true}, + + {"set @@xx.xx = 666", "xx.xx", false, true}, + {"set @@session.xx.xx = 666", "xx.xx", false, true}, + {"set @@global.xx.xx = 666", "xx.xx", true, true}, + + // Set user defined variable xx.xx + {"set @xx.xx = 666", "xx.xx", false, false}, + } + + parser := parser.New() + for _, t := range table { + stmt, err := parser.ParseOneStmt(t.Input, "", "") + c.Assert(err, IsNil) + + setStmt, ok := stmt.(*ast.SetStmt) + c.Assert(ok, IsTrue) + c.Assert(setStmt.Variables, HasLen, 1) + + v := setStmt.Variables[0] + c.Assert(v.Name, Equals, t.Name) + c.Assert(v.IsGlobal, Equals, t.IsGlobal) + c.Assert(v.IsSystem, Equals, t.IsSystem) + } + + _, err := parser.ParseOneStmt("set xx.xx.xx = 666", "", "") + c.Assert(err, NotNil) +} + +func (s *testParserSuite) TestExpression(c *C) { + table := []testCase{ + // sign expression + {"SELECT ++1", true, "SELECT ++1"}, + {"SELECT -*1", false, "SELECT -*1"}, + {"SELECT -+1", true, "SELECT -+1"}, + {"SELECT -1", true, "SELECT -1"}, + {"SELECT --1", true, "SELECT --1"}, + + // for string literal + {`select '''a''', """a"""`, true, "SELECT '''a''','\"a\"'"}, + {`select ''a''`, false, ""}, + {`select ""a""`, false, ""}, + {`select '''a''';`, true, "SELECT '''a'''"}, + {`select '\'a\'';`, true, "SELECT '''a'''"}, + {`select "\"a\"";`, true, "SELECT '\"a\"'"}, + {`select """a""";`, true, "SELECT '\"a\"'"}, + {`select _utf8"string";`, true, "SELECT _UTF8'string'"}, + {`select _binary"string";`, true, "SELECT _BINARY'string'"}, + {"select N'string'", true, "SELECT _UTF8'string'"}, + {"select n'string'", true, "SELECT _UTF8'string'"}, + // for comparison + {"select 1 <=> 0, 1 <=> null, 1 = null", true, "SELECT 1<=>0,1<=>NULL,1=NULL"}, + // for date literal + {"select date'1989-09-10'", true, "SELECT DATE '1989-09-10'"}, + {"select date 19890910", false, ""}, + // for time literal + {"select time '00:00:00.111'", true, "SELECT TIME '00:00:00.111'"}, + {"select time 19890910", false, ""}, + // for timestamp literal + {"select timestamp '1989-09-10 11:11:11'", true, "SELECT TIMESTAMP '1989-09-10 11:11:11'"}, + {"select timestamp 19890910", false, ""}, + + // The ODBC syntax for time/date/timestamp literal. + // See: https://dev.mysql.com/doc/refman/5.7/en/date-and-time-literals.html + {"select {ts '1989-09-10 11:11:11'}", true, "SELECT '1989-09-10 11:11:11'"}, + {"select {d '1989-09-10'}", true, "SELECT '1989-09-10'"}, + {"select {t '00:00:00.111'}", true, "SELECT '00:00:00.111'"}, + // If the identifier is not in (t, d, ts), we just ignore it and consider the following expression as the value. + // See: https://dev.mysql.com/doc/refman/5.7/en/expressions.html + {"select {ts123 '1989-09-10 11:11:11'}", true, "SELECT '1989-09-10 11:11:11'"}, + {"select {ts123 123}", true, "SELECT 123"}, + {"select {ts123 1 xor 1}", true, "SELECT 1 XOR 1"}, + } + s.RunTest(c, table) +} + +func (s *testParserSuite) TestBuiltin(c *C) { + table := []testCase{ + // for builtin functions + {"SELECT POW(1, 2)", true, "SELECT POW(1, 2)"}, + {"SELECT POW(1, 2, 1)", true, "SELECT POW(1, 2, 1)"}, // illegal number of arguments shall pass too + {"SELECT POW(1, 0.5)", true, "SELECT POW(1, 0.5)"}, + {"SELECT POW(1, -1)", true, "SELECT POW(1, -1)"}, + {"SELECT POW(-1, 1)", true, "SELECT POW(-1, 1)"}, + {"SELECT RAND();", true, "SELECT RAND()"}, + {"SELECT RAND(1);", true, "SELECT RAND(1)"}, + {"SELECT MOD(10, 2);", true, "SELECT 10%2"}, + {"SELECT ROUND(-1.23);", true, "SELECT ROUND(-1.23)"}, + {"SELECT ROUND(1.23, 1);", true, "SELECT ROUND(1.23, 1)"}, + {"SELECT ROUND(1.23, 1, 1);", true, "SELECT ROUND(1.23, 1, 1)"}, + {"SELECT CEIL(-1.23);", true, "SELECT CEIL(-1.23)"}, + {"SELECT CEILING(1.23);", true, "SELECT CEILING(1.23)"}, + {"SELECT FLOOR(-1.23);", true, "SELECT FLOOR(-1.23)"}, + {"SELECT LN(1);", true, "SELECT LN(1)"}, + {"SELECT LN(1, 2);", true, "SELECT LN(1, 2)"}, + {"SELECT LOG(-2);", true, "SELECT LOG(-2)"}, + {"SELECT LOG(2, 65536);", true, "SELECT LOG(2, 65536)"}, + {"SELECT LOG(2, 65536, 1);", true, "SELECT LOG(2, 65536, 1)"}, + {"SELECT LOG2(2);", true, "SELECT LOG2(2)"}, + {"SELECT LOG2(2, 2);", true, "SELECT LOG2(2, 2)"}, + {"SELECT LOG10(10);", true, "SELECT LOG10(10)"}, + {"SELECT LOG10(10, 1);", true, "SELECT LOG10(10, 1)"}, + {"SELECT ABS(10, 1);", true, "SELECT ABS(10, 1)"}, + {"SELECT ABS(10);", true, "SELECT ABS(10)"}, + {"SELECT ABS();", true, "SELECT ABS()"}, + {"SELECT CONV(10+'10'+'10'+X'0a',10,10);", true, "SELECT CONV(10+'10'+'10'+x'0a', 10, 10)"}, + {"SELECT CONV();", true, "SELECT CONV()"}, + {"SELECT CRC32('MySQL');", true, "SELECT CRC32('MySQL')"}, + {"SELECT CRC32();", true, "SELECT CRC32()"}, + {"SELECT SIGN();", true, "SELECT SIGN()"}, + {"SELECT SIGN(0);", true, "SELECT SIGN(0)"}, + {"SELECT SQRT(0);", true, "SELECT SQRT(0)"}, + {"SELECT SQRT();", true, "SELECT SQRT()"}, + {"SELECT ACOS();", true, "SELECT ACOS()"}, + {"SELECT ACOS(1);", true, "SELECT ACOS(1)"}, + {"SELECT ACOS(1, 2);", true, "SELECT ACOS(1, 2)"}, + {"SELECT ASIN();", true, "SELECT ASIN()"}, + {"SELECT ASIN(1);", true, "SELECT ASIN(1)"}, + {"SELECT ASIN(1, 2);", true, "SELECT ASIN(1, 2)"}, + {"SELECT ATAN(0), ATAN(1), ATAN(1, 2);", true, "SELECT ATAN(0),ATAN(1),ATAN(1, 2)"}, + {"SELECT ATAN2(), ATAN2(1,2);", true, "SELECT ATAN2(),ATAN2(1, 2)"}, + {"SELECT COS(0);", true, "SELECT COS(0)"}, + {"SELECT COS(1);", true, "SELECT COS(1)"}, + {"SELECT COS(1, 2);", true, "SELECT COS(1, 2)"}, + {"SELECT COT();", true, "SELECT COT()"}, + {"SELECT COT(1);", true, "SELECT COT(1)"}, + {"SELECT COT(1, 2);", true, "SELECT COT(1, 2)"}, + {"SELECT DEGREES();", true, "SELECT DEGREES()"}, + {"SELECT DEGREES(0);", true, "SELECT DEGREES(0)"}, + {"SELECT EXP();", true, "SELECT EXP()"}, + {"SELECT EXP(1);", true, "SELECT EXP(1)"}, + {"SELECT PI();", true, "SELECT PI()"}, + {"SELECT PI(1);", true, "SELECT PI(1)"}, + {"SELECT RADIANS();", true, "SELECT RADIANS()"}, + {"SELECT RADIANS(1);", true, "SELECT RADIANS(1)"}, + {"SELECT SIN();", true, "SELECT SIN()"}, + {"SELECT SIN(1);", true, "SELECT SIN(1)"}, + {"SELECT TAN(1);", true, "SELECT TAN(1)"}, + {"SELECT TAN();", true, "SELECT TAN()"}, + {"SELECT TRUNCATE(1.223,1);", true, "SELECT TRUNCATE(1.223, 1)"}, + {"SELECT TRUNCATE();", true, "SELECT TRUNCATE()"}, + + {"SELECT SUBSTR('Quadratically',5);", true, "SELECT SUBSTR('Quadratically', 5)"}, + {"SELECT SUBSTR('Quadratically',5, 3);", true, "SELECT SUBSTR('Quadratically', 5, 3)"}, + {"SELECT SUBSTR('Quadratically' FROM 5);", true, "SELECT SUBSTR('Quadratically', 5)"}, + {"SELECT SUBSTR('Quadratically' FROM 5 FOR 3);", true, "SELECT SUBSTR('Quadratically', 5, 3)"}, + + {"SELECT SUBSTRING('Quadratically',5);", true, "SELECT SUBSTRING('Quadratically', 5)"}, + {"SELECT SUBSTRING('Quadratically',5, 3);", true, "SELECT SUBSTRING('Quadratically', 5, 3)"}, + {"SELECT SUBSTRING('Quadratically' FROM 5);", true, "SELECT SUBSTRING('Quadratically', 5)"}, + {"SELECT SUBSTRING('Quadratically' FROM 5 FOR 3);", true, "SELECT SUBSTRING('Quadratically', 5, 3)"}, + + {"SELECT LEAST(), LEAST(1, 2, 3);", true, "SELECT LEAST(),LEAST(1, 2, 3)"}, + // information functions + {"SELECT DATABASE();", true, "SELECT DATABASE()"}, + {"SELECT SCHEMA();", true, "SELECT SCHEMA()"}, + {"SELECT USER();", true, "SELECT USER()"}, + {"SELECT USER(1);", true, "SELECT USER(1)"}, + {"SELECT CURRENT_USER();", true, "SELECT CURRENT_USER()"}, + {"SELECT CURRENT_ROLE();", true, "SELECT CURRENT_ROLE()"}, + {"SELECT CURRENT_USER;", true, "SELECT CURRENT_USER()"}, + {"SELECT CONNECTION_ID();", true, "SELECT CONNECTION_ID()"}, + {"SELECT VERSION();", true, "SELECT VERSION()"}, + {"SELECT BENCHMARK(1000000, AES_ENCRYPT('text',UNHEX('F3229A0B371ED2D9441B830D21A390C3')));", true, "SELECT BENCHMARK(1000000, AES_ENCRYPT('text', UNHEX('F3229A0B371ED2D9441B830D21A390C3')))"}, + {"SELECT BENCHMARK(AES_ENCRYPT('text',UNHEX('F3229A0B371ED2D9441B830D21A390C3')));", true, "SELECT BENCHMARK(AES_ENCRYPT('text', UNHEX('F3229A0B371ED2D9441B830D21A390C3')))"}, + {"SELECT CHARSET('abc');", true, "SELECT CHARSET('abc')"}, + {"SELECT COERCIBILITY('abc');", true, "SELECT COERCIBILITY('abc')"}, + {"SELECT COERCIBILITY('abc', 'a');", true, "SELECT COERCIBILITY('abc', 'a')"}, + {"SELECT COLLATION('abc');", true, "SELECT COLLATION('abc')"}, + {"SELECT ROW_COUNT();", true, "SELECT ROW_COUNT()"}, + {"SELECT SESSION_USER();", true, "SELECT SESSION_USER()"}, + {"SELECT SYSTEM_USER();", true, "SELECT SYSTEM_USER()"}, + + {"SELECT SUBSTRING_INDEX('www.mysql.com', '.', 2);", true, "SELECT SUBSTRING_INDEX('www.mysql.com', '.', 2)"}, + {"SELECT SUBSTRING_INDEX('www.mysql.com', '.', -2);", true, "SELECT SUBSTRING_INDEX('www.mysql.com', '.', -2)"}, + + {`SELECT ASCII(), ASCII(""), ASCII("A"), ASCII(1);`, true, "SELECT ASCII(),ASCII(''),ASCII('A'),ASCII(1)"}, + + {`SELECT LOWER("A"), UPPER("a")`, true, "SELECT LOWER('A'),UPPER('a')"}, + {`SELECT LCASE("A"), UCASE("a")`, true, "SELECT LCASE('A'),UCASE('a')"}, + + {`SELECT REPLACE('www.mysql.com', 'w', 'Ww')`, true, "SELECT REPLACE('www.mysql.com', 'w', 'Ww')"}, + + {`SELECT LOCATE('bar', 'foobarbar');`, true, "SELECT LOCATE('bar', 'foobarbar')"}, + {`SELECT LOCATE('bar', 'foobarbar', 5);`, true, "SELECT LOCATE('bar', 'foobarbar', 5)"}, + + {`SELECT tidb_version();`, true, "SELECT TIDB_VERSION()"}, + {`SELECT tidb_is_ddl_owner();`, true, "SELECT TIDB_IS_DDL_OWNER()"}, + {`SELECT tidb_decode_plan();`, true, "SELECT TIDB_DECODE_PLAN()"}, + {`SELECT tidb_decode_key('abc');`, true, "SELECT TIDB_DECODE_KEY('abc')"}, + + // for time fsp + {"CREATE TABLE t( c1 TIME(2), c2 DATETIME(2), c3 TIMESTAMP(2) );", true, "CREATE TABLE `t` (`c1` TIME(2),`c2` DATETIME(2),`c3` TIMESTAMP(2))"}, + + // for row + {"select row(1)", false, ""}, + {"select row(1, 1,)", false, ""}, + {"select (1, 1,)", false, ""}, + {"select row(1, 1) > row(1, 1), row(1, 1, 1) > row(1, 1, 1)", true, "SELECT ROW(1,1)>ROW(1,1),ROW(1,1,1)>ROW(1,1,1)"}, + {"Select (1, 1) > (1, 1)", true, "SELECT ROW(1,1)>ROW(1,1)"}, + {"create table t (`row` int)", true, "CREATE TABLE `t` (`row` INT)"}, + {"create table t (row int)", false, ""}, + + // for last_insert_id + {"SELECT last_insert_id();", true, "SELECT LAST_INSERT_ID()"}, + {"SELECT last_insert_id(1);", true, "SELECT LAST_INSERT_ID(1)"}, + + // for bit_count + {`SELECT BIT_COUNT(1);`, true, "SELECT BIT_COUNT(1)"}, + + // select time + {"select current_timestamp", true, "SELECT CURRENT_TIMESTAMP()"}, + {"select current_timestamp()", true, "SELECT CURRENT_TIMESTAMP()"}, + {"select current_timestamp(6)", true, "SELECT CURRENT_TIMESTAMP(6)"}, + {"select current_timestamp(null)", false, ""}, + {"select current_timestamp(-1)", false, ""}, + {"select current_timestamp(1.0)", false, ""}, + {"select current_timestamp('2')", false, ""}, + {"select now()", true, "SELECT NOW()"}, + {"select now(6)", true, "SELECT NOW(6)"}, + {"select sysdate(), sysdate(6)", true, "SELECT SYSDATE(),SYSDATE(6)"}, + {"SELECT time('01:02:03');", true, "SELECT TIME('01:02:03')"}, + {"SELECT time('01:02:03.1')", true, "SELECT TIME('01:02:03.1')"}, + {"SELECT time('20.1')", true, "SELECT TIME('20.1')"}, + // select current_time + {"select current_time", true, "SELECT CURRENT_TIME()"}, + {"select current_time()", true, "SELECT CURRENT_TIME()"}, + {"select current_time(6)", true, "SELECT CURRENT_TIME(6)"}, + {"select current_time(-1)", false, ""}, + {"select current_time(1.0)", false, ""}, + {"select current_time('1')", false, ""}, + {"select current_time(null)", false, ""}, + {"select curtime()", true, "SELECT CURTIME()"}, + {"select curtime(6)", true, "SELECT CURTIME(6)"}, + {"select curtime(-1)", false, ""}, + {"select curtime(1.0)", false, ""}, + {"select curtime('1')", false, ""}, + {"select curtime(null)", false, ""}, + + // select utc_timestamp + {"select utc_timestamp", true, "SELECT UTC_TIMESTAMP()"}, + {"select utc_timestamp()", true, "SELECT UTC_TIMESTAMP()"}, + {"select utc_timestamp(6)", true, "SELECT UTC_TIMESTAMP(6)"}, + {"select utc_timestamp(-1)", false, ""}, + {"select utc_timestamp(1.0)", false, ""}, + {"select utc_timestamp('1')", false, ""}, + {"select utc_timestamp(null)", false, ""}, + + // select utc_time + {"select utc_time", true, "SELECT UTC_TIME()"}, + {"select utc_time()", true, "SELECT UTC_TIME()"}, + {"select utc_time(6)", true, "SELECT UTC_TIME(6)"}, + {"select utc_time(-1)", false, ""}, + {"select utc_time(1.0)", false, ""}, + {"select utc_time('1')", false, ""}, + {"select utc_time(null)", false, ""}, + + // for microsecond, second, minute, hour + {"SELECT MICROSECOND('2009-12-31 23:59:59.000010');", true, "SELECT MICROSECOND('2009-12-31 23:59:59.000010')"}, + {"SELECT SECOND('10:05:03');", true, "SELECT SECOND('10:05:03')"}, + {"SELECT MINUTE('2008-02-03 10:05:03');", true, "SELECT MINUTE('2008-02-03 10:05:03')"}, + {"SELECT HOUR(), HOUR('10:05:03');", true, "SELECT HOUR(),HOUR('10:05:03')"}, + + // for date, day, weekday + {"SELECT CURRENT_DATE, CURRENT_DATE(), CURDATE()", true, "SELECT CURRENT_DATE(),CURRENT_DATE(),CURDATE()"}, + {"SELECT CURRENT_DATE, CURRENT_DATE(), CURDATE(1)", false, ""}, + {"SELECT DATEDIFF('2003-12-31', '2003-12-30');", true, "SELECT DATEDIFF('2003-12-31', '2003-12-30')"}, + {"SELECT DATE('2003-12-31 01:02:03');", true, "SELECT DATE('2003-12-31 01:02:03')"}, + {"SELECT DATE();", true, "SELECT DATE()"}, + {"SELECT DATE('2003-12-31 01:02:03', '');", true, "SELECT DATE('2003-12-31 01:02:03', '')"}, + {`SELECT DATE_FORMAT('2003-12-31 01:02:03', '%W %M %Y');`, true, "SELECT DATE_FORMAT('2003-12-31 01:02:03', '%W %M %Y')"}, + {"SELECT DAY('2007-02-03');", true, "SELECT DAY('2007-02-03')"}, + {"SELECT DAYOFMONTH('2007-02-03');", true, "SELECT DAYOFMONTH('2007-02-03')"}, + {"SELECT DAYOFWEEK('2007-02-03');", true, "SELECT DAYOFWEEK('2007-02-03')"}, + {"SELECT DAYOFYEAR('2007-02-03');", true, "SELECT DAYOFYEAR('2007-02-03')"}, + {"SELECT DAYNAME('2007-02-03');", true, "SELECT DAYNAME('2007-02-03')"}, + {"SELECT FROM_DAYS(1423);", true, "SELECT FROM_DAYS(1423)"}, + {"SELECT WEEKDAY('2007-02-03');", true, "SELECT WEEKDAY('2007-02-03')"}, + + // for utc_date + {"SELECT UTC_DATE, UTC_DATE();", true, "SELECT UTC_DATE(),UTC_DATE()"}, + {"SELECT UTC_DATE(), UTC_DATE()+0", true, "SELECT UTC_DATE(),UTC_DATE()+0"}, + + // for week, month, year + {"SELECT WEEK();", true, "SELECT WEEK()"}, + {"SELECT WEEK('2007-02-03');", true, "SELECT WEEK('2007-02-03')"}, + {"SELECT WEEK('2007-02-03', 0);", true, "SELECT WEEK('2007-02-03', 0)"}, + {"SELECT WEEKOFYEAR('2007-02-03');", true, "SELECT WEEKOFYEAR('2007-02-03')"}, + {"SELECT MONTH('2007-02-03');", true, "SELECT MONTH('2007-02-03')"}, + {"SELECT MONTHNAME('2007-02-03');", true, "SELECT MONTHNAME('2007-02-03')"}, + {"SELECT YEAR('2007-02-03');", true, "SELECT YEAR('2007-02-03')"}, + {"SELECT YEARWEEK('2007-02-03');", true, "SELECT YEARWEEK('2007-02-03')"}, + {"SELECT YEARWEEK('2007-02-03', 0);", true, "SELECT YEARWEEK('2007-02-03', 0)"}, + + // for ADDTIME, SUBTIME + {"SELECT ADDTIME('01:00:00.999999', '02:00:00.999998');", true, "SELECT ADDTIME('01:00:00.999999', '02:00:00.999998')"}, + {"SELECT ADDTIME('02:00:00.999998');", true, "SELECT ADDTIME('02:00:00.999998')"}, + {"SELECT ADDTIME();", true, "SELECT ADDTIME()"}, + {"SELECT SUBTIME('01:00:00.999999', '02:00:00.999998');", true, "SELECT SUBTIME('01:00:00.999999', '02:00:00.999998')"}, + + // for CONVERT_TZ + {"SELECT CONVERT_TZ();", true, "SELECT CONVERT_TZ()"}, + {"SELECT CONVERT_TZ('2004-01-01 12:00:00','+00:00','+10:00');", true, "SELECT CONVERT_TZ('2004-01-01 12:00:00', '+00:00', '+10:00')"}, + {"SELECT CONVERT_TZ('2004-01-01 12:00:00','+00:00','+10:00', '+10:00');", true, "SELECT CONVERT_TZ('2004-01-01 12:00:00', '+00:00', '+10:00', '+10:00')"}, + // for LOCALTIME, LOCALTIMESTAMP + {"SELECT LOCALTIME(), LOCALTIME(1)", true, "SELECT LOCALTIME(),LOCALTIME(1)"}, + {"SELECT LOCALTIMESTAMP(), LOCALTIMESTAMP(2)", true, "SELECT LOCALTIMESTAMP(),LOCALTIMESTAMP(2)"}, + + // for MAKEDATE, MAKETIME + {"SELECT MAKEDATE(2011,31);", true, "SELECT MAKEDATE(2011, 31)"}, + {"SELECT MAKETIME(12,15,30);", true, "SELECT MAKETIME(12, 15, 30)"}, + {"SELECT MAKEDATE();", true, "SELECT MAKEDATE()"}, + {"SELECT MAKETIME();", true, "SELECT MAKETIME()"}, + + // for PERIOD_ADD, PERIOD_DIFF + {"SELECT PERIOD_ADD(200801,2)", true, "SELECT PERIOD_ADD(200801, 2)"}, + {"SELECT PERIOD_DIFF(200802,200703)", true, "SELECT PERIOD_DIFF(200802, 200703)"}, + + // for QUARTER + {"SELECT QUARTER('2008-04-01');", true, "SELECT QUARTER('2008-04-01')"}, + + // for SEC_TO_TIME + {"SELECT SEC_TO_TIME(2378)", true, "SELECT SEC_TO_TIME(2378)"}, + + // for TIME_FORMAT + {`SELECT TIME_FORMAT('100:00:00', '%H %k %h %I %l')`, true, "SELECT TIME_FORMAT('100:00:00', '%H %k %h %I %l')"}, + + // for TIME_TO_SEC + {"SELECT TIME_TO_SEC('22:23:00')", true, "SELECT TIME_TO_SEC('22:23:00')"}, + + // for TO_DAYS, TO_SECONDS + {"SELECT TO_DAYS('2007-10-07')", true, "SELECT TO_DAYS('2007-10-07')"}, + {"SELECT TO_SECONDS('2009-11-29')", true, "SELECT TO_SECONDS('2009-11-29')"}, + + // for LAST_DAY + {"SELECT LAST_DAY('2003-02-05');", true, "SELECT LAST_DAY('2003-02-05')"}, + + // for UTC_TIME + {"SELECT UTC_TIME(), UTC_TIME(1)", true, "SELECT UTC_TIME(),UTC_TIME(1)"}, + + // for from_unixtime + {`select from_unixtime(1447430881)`, true, "SELECT FROM_UNIXTIME(1447430881)"}, + {`select from_unixtime(1447430881.123456)`, true, "SELECT FROM_UNIXTIME(1447430881.123456)"}, + {`select from_unixtime(1447430881.1234567)`, true, "SELECT FROM_UNIXTIME(1447430881.1234567)"}, + {`select from_unixtime(1447430881.9999999)`, true, "SELECT FROM_UNIXTIME(1447430881.9999999)"}, + {`select from_unixtime(1447430881, "%Y %D %M %h:%i:%s %x")`, true, "SELECT FROM_UNIXTIME(1447430881, '%Y %D %M %h:%i:%s %x')"}, + {`select from_unixtime(1447430881.123456, "%Y %D %M %h:%i:%s %x")`, true, "SELECT FROM_UNIXTIME(1447430881.123456, '%Y %D %M %h:%i:%s %x')"}, + {`select from_unixtime(1447430881.1234567, "%Y %D %M %h:%i:%s %x")`, true, "SELECT FROM_UNIXTIME(1447430881.1234567, '%Y %D %M %h:%i:%s %x')"}, + + {`SELECT RPAD('hi', 6, 'c');`, true, "SELECT RPAD('hi', 6, 'c')"}, + {`SELECT BIT_LENGTH('hi');`, true, "SELECT BIT_LENGTH('hi')"}, + {`SELECT CHAR_LENGTH('abc');`, true, "SELECT CHAR_LENGTH('abc')"}, + {`SELECT CHARACTER_LENGTH('abc');`, true, "SELECT CHARACTER_LENGTH('abc')"}, + {`SELECT FIELD('ej', 'Hej', 'ej', 'Heja', 'hej', 'foo');`, true, "SELECT FIELD('ej', 'Hej', 'ej', 'Heja', 'hej', 'foo')"}, + {`SELECT FIND_IN_SET('foo', 'foo,bar')`, true, "SELECT FIND_IN_SET('foo', 'foo,bar')"}, + {`SELECT FIND_IN_SET('foo')`, true, "SELECT FIND_IN_SET('foo')"}, // illegal number of argument still pass + {`SELECT MAKE_SET(1,'a'), MAKE_SET(1,'a','b','c')`, true, "SELECT MAKE_SET(1, 'a'),MAKE_SET(1, 'a', 'b', 'c')"}, + {`SELECT MID('Sakila', -5, 3)`, true, "SELECT MID('Sakila', -5, 3)"}, + {`SELECT OCT(12)`, true, "SELECT OCT(12)"}, + {`SELECT OCTET_LENGTH('text')`, true, "SELECT OCTET_LENGTH('text')"}, + {`SELECT ORD('2')`, true, "SELECT ORD('2')"}, + {`SELECT POSITION('bar' IN 'foobarbar')`, true, "SELECT POSITION('bar' IN 'foobarbar')"}, + {`SELECT QUOTE('Don\'t!')`, true, "SELECT QUOTE('Don''t!')"}, + {`SELECT BIN(12)`, true, "SELECT BIN(12)"}, + {`SELECT ELT(1, 'ej', 'Heja', 'hej', 'foo')`, true, "SELECT ELT(1, 'ej', 'Heja', 'hej', 'foo')"}, + {`SELECT EXPORT_SET(5,'Y','N'), EXPORT_SET(5,'Y','N',','), EXPORT_SET(5,'Y','N',',',4)`, true, "SELECT EXPORT_SET(5, 'Y', 'N'),EXPORT_SET(5, 'Y', 'N', ','),EXPORT_SET(5, 'Y', 'N', ',', 4)"}, + {`SELECT FORMAT(), FORMAT(12332.2,2,'de_DE'), FORMAT(12332.123456, 4)`, true, "SELECT FORMAT(),FORMAT(12332.2, 2, 'de_DE'),FORMAT(12332.123456, 4)"}, + {`SELECT FROM_BASE64('abc')`, true, "SELECT FROM_BASE64('abc')"}, + {`SELECT TO_BASE64('abc')`, true, "SELECT TO_BASE64('abc')"}, + {`SELECT LOAD_FILE('/tmp/picture')`, true, "SELECT LOAD_FILE('/tmp/picture')"}, + {`SELECT LPAD('hi',4,'??')`, true, "SELECT LPAD('hi', 4, '??')"}, + {`SELECT LEFT("foobar", 3)`, true, "SELECT LEFT('foobar', 3)"}, + {`SELECT RIGHT("foobar", 3)`, true, "SELECT RIGHT('foobar', 3)"}, + + // repeat + {`SELECT REPEAT("a", 10);`, true, "SELECT REPEAT('a', 10)"}, + + // for miscellaneous functions + {`SELECT SLEEP(10);`, true, "SELECT SLEEP(10)"}, + {`SELECT ANY_VALUE(@arg);`, true, "SELECT ANY_VALUE(@`arg`)"}, + {`SELECT INET_ATON('10.0.5.9');`, true, "SELECT INET_ATON('10.0.5.9')"}, + {`SELECT INET_NTOA(167773449);`, true, "SELECT INET_NTOA(167773449)"}, + {`SELECT INET6_ATON('fdfe::5a55:caff:fefa:9089');`, true, "SELECT INET6_ATON('fdfe::5a55:caff:fefa:9089')"}, + {`SELECT INET6_NTOA(INET_NTOA(167773449));`, true, "SELECT INET6_NTOA(INET_NTOA(167773449))"}, + {`SELECT IS_FREE_LOCK(@str);`, true, "SELECT IS_FREE_LOCK(@`str`)"}, + {`SELECT IS_IPV4('10.0.5.9');`, true, "SELECT IS_IPV4('10.0.5.9')"}, + {`SELECT IS_IPV4_COMPAT(INET6_ATON('::10.0.5.9'));`, true, "SELECT IS_IPV4_COMPAT(INET6_ATON('::10.0.5.9'))"}, + {`SELECT IS_IPV4_MAPPED(INET6_ATON('::10.0.5.9'));`, true, "SELECT IS_IPV4_MAPPED(INET6_ATON('::10.0.5.9'))"}, + {`SELECT IS_IPV6('10.0.5.9');`, true, "SELECT IS_IPV6('10.0.5.9')"}, + {`SELECT IS_USED_LOCK(@str);`, true, "SELECT IS_USED_LOCK(@`str`)"}, + {`SELECT MASTER_POS_WAIT(@log_name, @log_pos), MASTER_POS_WAIT(@log_name, @log_pos, @timeout), MASTER_POS_WAIT(@log_name, @log_pos, @timeout, @channel_name);`, true, "SELECT MASTER_POS_WAIT(@`log_name`, @`log_pos`),MASTER_POS_WAIT(@`log_name`, @`log_pos`, @`timeout`),MASTER_POS_WAIT(@`log_name`, @`log_pos`, @`timeout`, @`channel_name`)"}, + {`SELECT NAME_CONST('myname', 14);`, true, "SELECT NAME_CONST('myname', 14)"}, + {`SELECT RELEASE_ALL_LOCKS();`, true, "SELECT RELEASE_ALL_LOCKS()"}, + {`SELECT UUID();`, true, "SELECT UUID()"}, + {`SELECT UUID_SHORT()`, true, "SELECT UUID_SHORT()"}, + // test illegal arguments + {`SELECT SLEEP();`, true, "SELECT SLEEP()"}, + {`SELECT ANY_VALUE();`, true, "SELECT ANY_VALUE()"}, + {`SELECT INET_ATON();`, true, "SELECT INET_ATON()"}, + {`SELECT INET_NTOA();`, true, "SELECT INET_NTOA()"}, + {`SELECT INET6_ATON();`, true, "SELECT INET6_ATON()"}, + {`SELECT INET6_NTOA(INET_NTOA());`, true, "SELECT INET6_NTOA(INET_NTOA())"}, + {`SELECT IS_FREE_LOCK();`, true, "SELECT IS_FREE_LOCK()"}, + {`SELECT IS_IPV4();`, true, "SELECT IS_IPV4()"}, + {`SELECT IS_IPV4_COMPAT(INET6_ATON());`, true, "SELECT IS_IPV4_COMPAT(INET6_ATON())"}, + {`SELECT IS_IPV4_MAPPED(INET6_ATON());`, true, "SELECT IS_IPV4_MAPPED(INET6_ATON())"}, + {`SELECT IS_IPV6()`, true, "SELECT IS_IPV6()"}, + {`SELECT IS_USED_LOCK();`, true, "SELECT IS_USED_LOCK()"}, + {`SELECT MASTER_POS_WAIT();`, true, "SELECT MASTER_POS_WAIT()"}, + {`SELECT NAME_CONST();`, true, "SELECT NAME_CONST()"}, + {`SELECT RELEASE_ALL_LOCKS(1);`, true, "SELECT RELEASE_ALL_LOCKS(1)"}, + {`SELECT UUID(1);`, true, "SELECT UUID(1)"}, + {`SELECT UUID_SHORT(1)`, true, "SELECT UUID_SHORT(1)"}, + // for aggregate functions + {`select avg(), avg(c1,c2) from t;`, false, "SELECT AVG(),AVG(`c1`, `c2`) FROM `t`"}, + {`select avg(c2) from t;`, true, "SELECT AVG(`c2`) FROM `t`"}, + {`select max(c1,c2) from t;`, false, ""}, + {`select max(c2) from t;`, true, "SELECT MAX(`c2`) FROM `t`"}, + {`select min(c1,c2) from t;`, false, ""}, + {`select min(c2) from t;`, true, "SELECT MIN(`c2`) FROM `t`"}, + {`select sum(c1,c2) from t;`, false, ""}, + {`select sum(c2) from t;`, true, "SELECT SUM(`c2`) FROM `t`"}, + {`select count(c1) from t;`, true, "SELECT COUNT(`c1`) FROM `t`"}, + {`select count(*) from t;`, true, "SELECT COUNT(1) FROM `t`"}, + {`select count(c1, c2) from t;`, false, ""}, + } + s.RunTest(c, table) +} + +func (s *testParserSuite) TestIdentifier(c *C) { + table := []testCase{ + // for quote identifier + {"select `a`, `a.b`, `a b` from t", true, "SELECT `a`,`a.b`,`a b` FROM `t`"}, + // for unquoted identifier + {"create table MergeContextTest$Simple (value integer not null, primary key (value))", true, "CREATE TABLE `MergeContextTest$Simple` (`value` INT NOT NULL,PRIMARY KEY(`value`))"}, + // for as + {"select 1 as a, 1 as `a`, 1 as \"a\", 1 as 'a'", true, "SELECT 1 AS `a`,1 AS `a`,1 AS `a`,1 AS `a`"}, + {`select 1 as a, 1 as "a", 1 as 'a'`, true, "SELECT 1 AS `a`,1 AS `a`,1 AS `a`"}, + {`select 1 a, 1 "a", 1 'a'`, true, "SELECT 1 AS `a`,1 AS `a`,1 AS `a`"}, + {`select * from t as "a"`, false, ""}, + {`select * from t a`, true, "SELECT * FROM `t` AS `a`"}, + // reserved keyword can't be used as identifier directly, but A.B pattern is an exception + {`select COUNT from DESC`, false, ""}, + {`select COUNT from SELECT.DESC`, true, "SELECT `COUNT` FROM `SELECT`.`DESC`"}, + {"use `select`", true, "USE `select`"}, + {"use select", false, "USE `select`"}, + {`select * from t as a`, true, "SELECT * FROM `t` AS `a`"}, + {"select 1 full, 1 row, 1 abs", false, ""}, + {"select 1 full, 1 `row`, 1 abs", true, "SELECT 1 AS `full`,1 AS `row`,1 AS `abs`"}, + {"select * from t full, t1 row, t2 abs", false, ""}, + {"select * from t full, t1 `row`, t2 abs", true, "SELECT * FROM ((`t` AS `full`) JOIN `t1` AS `row`) JOIN `t2` AS `abs`"}, + // for issue 1878, identifiers may begin with digit. + {"create database 123test", true, "CREATE DATABASE `123test`"}, + {"create database 123", false, "CREATE DATABASE `123`"}, + {"create database `123`", true, "CREATE DATABASE `123`"}, + {"create database `12``3`", true, "CREATE DATABASE `12``3`"}, + {"create table `123` (123a1 int)", true, "CREATE TABLE `123` (`123a1` INT)"}, + {"create table 123 (123a1 int)", false, ""}, + {fmt.Sprintf("select * from t%cble", 0), false, ""}, + // for issue 3954, should NOT be recognized as identifiers. + {`select .78+123`, true, "SELECT 0.78+123"}, + {`select .78+.21`, true, "SELECT 0.78+0.21"}, + {`select .78-123`, true, "SELECT 0.78-123"}, + {`select .78-.21`, true, "SELECT 0.78-0.21"}, + {`select .78--123`, true, "SELECT 0.78--123"}, + {`select .78*123`, true, "SELECT 0.78*123"}, + {`select .78*.21`, true, "SELECT 0.78*0.21"}, + {`select .78/123`, true, "SELECT 0.78/123"}, + {`select .78/.21`, true, "SELECT 0.78/0.21"}, + {`select .78,123`, true, "SELECT 0.78,123"}, + {`select .78,.21`, true, "SELECT 0.78,0.21"}, + {`select .78 , 123`, true, "SELECT 0.78,123"}, + {`select .78.123`, false, ""}, + {`select .78#123`, true, "SELECT 0.78"}, + {`insert float_test values(.67, 'string');`, true, "INSERT INTO `float_test` VALUES (0.67,'string')"}, + {`select .78'123'`, true, "SELECT 0.78 AS `123`"}, + {"select .78`123`", true, "SELECT 0.78 AS `123`"}, + {`select .78"123"`, true, "SELECT 0.78 AS `123`"}, + } + s.RunTest(c, table) +} + +func (s *testParserSuite) TestDDL(c *C) { + table := []testCase{ + {"CREATE", false, ""}, + {"CREATE TABLE", false, ""}, + {"CREATE TABLE foo (", false, ""}, + {"CREATE TABLE foo ()", false, ""}, + {"CREATE TABLE foo ();", false, ""}, + {"CREATE TABLE foo.* (a varchar(50), b int);", false, ""}, + {"CREATE TABLE foo (a varchar(50), b int);", true, "CREATE TABLE `foo` (`a` VARCHAR(50),`b` INT)"}, + {"CREATE TABLE foo (a TINYINT UNSIGNED);", true, "CREATE TABLE `foo` (`a` TINYINT UNSIGNED)"}, + {"CREATE TABLE foo (a SMALLINT UNSIGNED, b INT UNSIGNED)", true, "CREATE TABLE `foo` (`a` SMALLINT UNSIGNED,`b` INT UNSIGNED)"}, + {"CREATE TABLE foo (a bigint unsigned, b bool);", true, "CREATE TABLE `foo` (`a` BIGINT UNSIGNED,`b` TINYINT(1))"}, + {"CREATE TABLE foo (a TINYINT, b SMALLINT) CREATE TABLE bar (x INT, y int64)", false, ""}, + {"CREATE TABLE foo (a int, b float); CREATE TABLE bar (x double, y float)", true, "CREATE TABLE `foo` (`a` INT,`b` FLOAT); CREATE TABLE `bar` (`x` DOUBLE,`y` FLOAT)"}, + {"CREATE TABLE foo (a bytes)", false, ""}, + {"CREATE TABLE foo (a SMALLINT UNSIGNED, b INT UNSIGNED)", true, "CREATE TABLE `foo` (`a` SMALLINT UNSIGNED,`b` INT UNSIGNED)"}, + {"CREATE TABLE foo (a SMALLINT UNSIGNED, b INT UNSIGNED) -- foo", true, "CREATE TABLE `foo` (`a` SMALLINT UNSIGNED,`b` INT UNSIGNED)"}, + {"CREATE TABLE foo (a SMALLINT UNSIGNED, b INT UNSIGNED) // foo", false, ""}, + {"CREATE TABLE foo (a SMALLINT UNSIGNED, b INT UNSIGNED) /* foo */", true, "CREATE TABLE `foo` (`a` SMALLINT UNSIGNED,`b` INT UNSIGNED)"}, + {"CREATE TABLE foo /* foo */ (a SMALLINT UNSIGNED, b INT UNSIGNED) /* foo */", true, "CREATE TABLE `foo` (`a` SMALLINT UNSIGNED,`b` INT UNSIGNED)"}, + {"CREATE TABLE foo (name CHAR(50) BINARY);", true, "CREATE TABLE `foo` (`name` CHAR(50) BINARY)"}, + {"CREATE TABLE foo (name CHAR(50) COLLATE utf8_bin)", true, "CREATE TABLE `foo` (`name` CHAR(50) COLLATE utf8_bin)"}, + {"CREATE TABLE foo (id varchar(50) collate utf8_bin);", true, "CREATE TABLE `foo` (`id` VARCHAR(50) COLLATE utf8_bin)"}, + {"CREATE TABLE foo (name CHAR(50) CHARACTER SET UTF8)", true, "CREATE TABLE `foo` (`name` CHAR(50) CHARACTER SET UTF8)"}, + {"CREATE TABLE foo (name CHAR(50) CHARACTER SET utf8 BINARY)", true, "CREATE TABLE `foo` (`name` CHAR(50) BINARY CHARACTER SET UTF8)"}, + {"CREATE TABLE foo (name CHAR(50) CHARACTER SET utf8 BINARY CHARACTER set utf8)", false, ""}, + {"CREATE TABLE foo (name CHAR(50) BINARY CHARACTER SET utf8 COLLATE utf8_bin)", true, "CREATE TABLE `foo` (`name` CHAR(50) BINARY CHARACTER SET UTF8 COLLATE utf8_bin)"}, + {"CREATE TABLE foo (name CHAR(50) CHARACTER SET utf8 COLLATE utf8_bin COLLATE ascii_bin)", true, "CREATE TABLE `foo` (`name` CHAR(50) CHARACTER SET UTF8 COLLATE utf8_bin COLLATE ascii_bin)"}, + {"CREATE TABLE foo (name CHAR(50) COLLATE ascii_bin COLLATE latin1_bin)", true, "CREATE TABLE `foo` (`name` CHAR(50) COLLATE ascii_bin COLLATE latin1_bin)"}, + {"CREATE TABLE foo (name CHAR(50) COLLATE ascii_bin PRIMARY KEY COLLATE latin1_bin)", true, "CREATE TABLE `foo` (`name` CHAR(50) COLLATE ascii_bin PRIMARY KEY COLLATE latin1_bin)"}, + {"CREATE TABLE foo (a.b, b);", false, ""}, + {"CREATE TABLE foo (a, b.c);", false, ""}, + {"CREATE TABLE (name CHAR(50) BINARY)", false, ""}, + // for create temporary table + {"CREATE TEMPORARY TABLE t (a varchar(50), b int);", true, "CREATE TEMPORARY TABLE `t` (`a` VARCHAR(50),`b` INT)"}, + {"CREATE TEMPORARY TABLE t LIKE t1", true, "CREATE TEMPORARY TABLE `t` LIKE `t1`"}, + {"DROP TEMPORARY TABLE t", true, "DROP TEMPORARY TABLE `t`"}, + // test use key word as column name + {"CREATE TABLE foo (pump varchar(50), b int);", true, "CREATE TABLE `foo` (`pump` VARCHAR(50),`b` INT)"}, + {"CREATE TABLE foo (drainer varchar(50), b int);", true, "CREATE TABLE `foo` (`drainer` VARCHAR(50),`b` INT)"}, + {"CREATE TABLE foo (node_id varchar(50), b int);", true, "CREATE TABLE `foo` (`node_id` VARCHAR(50),`b` INT)"}, + {"CREATE TABLE foo (node_state varchar(50), b int);", true, "CREATE TABLE `foo` (`node_state` VARCHAR(50),`b` INT)"}, + + {"create database xxx", true, "CREATE DATABASE `xxx`"}, + {"create database if exists xxx", false, ""}, + {"create database if not exists xxx", true, "CREATE DATABASE IF NOT EXISTS `xxx`"}, + + {"create schema xxx", true, "CREATE DATABASE `xxx`"}, + {"create schema if exists xxx", false, ""}, + {"create schema if not exists xxx", true, "CREATE DATABASE IF NOT EXISTS `xxx`"}, + // for drop database/schema/table/view/stats + {"drop database xxx", true, "DROP DATABASE `xxx`"}, + {"drop database if exists xxx", true, "DROP DATABASE IF EXISTS `xxx`"}, + {"drop database if not exists xxx", false, ""}, + {"drop schema xxx", true, "DROP DATABASE `xxx`"}, + {"drop schema if exists xxx", true, "DROP DATABASE IF EXISTS `xxx`"}, + {"drop schema if not exists xxx", false, ""}, + {"drop table", false, "DROP TABLE"}, + {"drop table xxx", true, "DROP TABLE `xxx`"}, + {"drop table xxx, yyy", true, "DROP TABLE `xxx`, `yyy`"}, + {"drop tables xxx", true, "DROP TABLE `xxx`"}, + {"drop tables xxx, yyy", true, "DROP TABLE `xxx`, `yyy`"}, + {"drop table if exists xxx", true, "DROP TABLE IF EXISTS `xxx`"}, + {"drop table if exists xxx, yyy", true, "DROP TABLE IF EXISTS `xxx`, `yyy`"}, + {"drop table if not exists xxx", false, ""}, + {"drop table xxx restrict", true, "DROP TABLE `xxx`"}, + {"drop table xxx, yyy cascade", true, "DROP TABLE `xxx`, `yyy`"}, + {"drop table if exists xxx restrict", true, "DROP TABLE IF EXISTS `xxx`"}, + {`create table t (c int KEY);`, true, "CREATE TABLE `t` (`c` INT PRIMARY KEY)"}, + // for issue pingcap/parser#310 + {`CREATE TABLE t (a DECIMAL(20,0), b DECIMAL(30), c FLOAT(25,0))`, true, "CREATE TABLE `t` (`a` DECIMAL(20,0),`b` DECIMAL(30),`c` FLOAT(25,0))"}, + // Create table with multiple index options. + {`create table t (c int, index ci (c) USING BTREE COMMENT "123");`, true, "CREATE TABLE `t` (`c` INT,INDEX `ci`(`c`) USING BTREE COMMENT '123')"}, + // for default value + {"CREATE TABLE sbtest (id INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, k integer UNSIGNED DEFAULT '0' NOT NULL, c char(120) DEFAULT '' NOT NULL, pad char(60) DEFAULT '' NOT NULL, PRIMARY KEY (id) )", true, "CREATE TABLE `sbtest` (`id` INT UNSIGNED NOT NULL AUTO_INCREMENT,`k` INT UNSIGNED DEFAULT '0' NOT NULL,`c` CHAR(120) DEFAULT '' NOT NULL,`pad` CHAR(60) DEFAULT '' NOT NULL,PRIMARY KEY(`id`))"}, + {"create table test (create_date TIMESTAMP NOT NULL COMMENT '创建日期 create date' DEFAULT now());", true, "CREATE TABLE `test` (`create_date` TIMESTAMP NOT NULL COMMENT '创建日期 create date' DEFAULT CURRENT_TIMESTAMP())"}, + {"create table ts (t int, v timestamp(3) default CURRENT_TIMESTAMP(3));", true, "CREATE TABLE `ts` (`t` INT,`v` TIMESTAMP(3) DEFAULT CURRENT_TIMESTAMP(3))"}, //TODO: The number yacc in parentheses has not been implemented yet. + // Create table with primary key name. + {"create table if not exists `t` (`id` int not null auto_increment comment '消息ID', primary key `pk_id` (`id`) );", true, "CREATE TABLE IF NOT EXISTS `t` (`id` INT NOT NULL AUTO_INCREMENT COMMENT '消息ID',PRIMARY KEY `pk_id`(`id`))"}, + // Create table with no option is valid for parser + {"create table a", true, "CREATE TABLE `a` "}, + + {"create table t (a timestamp default now)", false, ""}, + {"create table t (a timestamp default now())", true, "CREATE TABLE `t` (`a` TIMESTAMP DEFAULT CURRENT_TIMESTAMP())"}, + {"create table t (a timestamp default now() on update now)", false, ""}, + {"create table t (a timestamp default now() on update now())", true, "CREATE TABLE `t` (`a` TIMESTAMP DEFAULT CURRENT_TIMESTAMP() ON UPDATE CURRENT_TIMESTAMP())"}, + {"ALTER TABLE t ADD COLUMN (a SMALLINT UNSIGNED)", true, "ALTER TABLE `t` ADD COLUMN (`a` SMALLINT UNSIGNED)"}, + {"ALTER TABLE t.* ADD COLUMN (a SMALLINT UNSIGNED)", false, ""}, + {"ALTER TABLE t ADD COLUMN IF NOT EXISTS (a SMALLINT UNSIGNED)", true, "ALTER TABLE `t` ADD COLUMN IF NOT EXISTS (`a` SMALLINT UNSIGNED)"}, + {"ALTER TABLE ADD COLUMN (a SMALLINT UNSIGNED)", false, ""}, + {"ALTER TABLE t ADD COLUMN (a SMALLINT UNSIGNED, b varchar(255))", true, "ALTER TABLE `t` ADD COLUMN (`a` SMALLINT UNSIGNED, `b` VARCHAR(255))"}, + {"ALTER TABLE t ADD COLUMN IF NOT EXISTS (a SMALLINT UNSIGNED, b varchar(255))", true, "ALTER TABLE `t` ADD COLUMN IF NOT EXISTS (`a` SMALLINT UNSIGNED, `b` VARCHAR(255))"}, + {"ALTER TABLE t ADD COLUMN a SMALLINT UNSIGNED", true, "ALTER TABLE `t` ADD COLUMN `a` SMALLINT UNSIGNED"}, + {"ALTER TABLE t DISABLE KEYS", true, "ALTER TABLE `t` DISABLE KEYS"}, + {"ALTER TABLE t ENABLE KEYS", true, "ALTER TABLE `t` ENABLE KEYS"}, + {"ALTER TABLE t MODIFY COLUMN a varchar(255)", true, "ALTER TABLE `t` MODIFY COLUMN `a` VARCHAR(255)"}, + {"ALTER TABLE t MODIFY COLUMN IF EXISTS a varchar(255)", true, "ALTER TABLE `t` MODIFY COLUMN IF EXISTS `a` VARCHAR(255)"}, + {"ALTER TABLE t CHANGE COLUMN a b varchar(255)", true, "ALTER TABLE `t` CHANGE COLUMN `a` `b` VARCHAR(255)"}, + {"ALTER TABLE t CHANGE COLUMN IF EXISTS a b varchar(255)", true, "ALTER TABLE `t` CHANGE COLUMN IF EXISTS `a` `b` VARCHAR(255)"}, + // For alter table rename column statement. + {"ALTER TABLE t RENAME COLUMN a TO b", true, "ALTER TABLE `t` RENAME COLUMN `a` TO `b`"}, + + {"ALTER TABLE t ALTER COLUMN a SET DEFAULT 1", true, "ALTER TABLE `t` ALTER COLUMN `a` SET DEFAULT 1"}, + {"ALTER TABLE t ALTER a SET DEFAULT 1", true, "ALTER TABLE `t` ALTER COLUMN `a` SET DEFAULT 1"}, + {"ALTER TABLE t ALTER COLUMN a SET DEFAULT CURRENT_TIMESTAMP", false, ""}, + {"ALTER TABLE t ALTER COLUMN a SET DEFAULT NOW()", false, ""}, + {"ALTER TABLE t ALTER COLUMN a SET DEFAULT 1+1", false, ""}, + {"ALTER TABLE t ALTER COLUMN a SET DEFAULT (CURRENT_TIMESTAMP())", true, "ALTER TABLE `t` ALTER COLUMN `a` SET DEFAULT (CURRENT_TIMESTAMP())"}, + {"ALTER TABLE t ALTER COLUMN a SET DEFAULT (NOW())", true, "ALTER TABLE `t` ALTER COLUMN `a` SET DEFAULT (NOW())"}, + {"ALTER TABLE t ALTER COLUMN a SET DEFAULT (1+1)", true, "ALTER TABLE `t` ALTER COLUMN `a` SET DEFAULT (1+1)"}, + {"ALTER TABLE t ALTER COLUMN a SET DEFAULT (1)", true, "ALTER TABLE `t` ALTER COLUMN `a` SET DEFAULT 1"}, + {"ALTER TABLE t ALTER COLUMN a DROP DEFAULT", true, "ALTER TABLE `t` ALTER COLUMN `a` DROP DEFAULT"}, + {"ALTER TABLE t ALTER a DROP DEFAULT", true, "ALTER TABLE `t` ALTER COLUMN `a` DROP DEFAULT"}, + {"ALTER TABLE t ADD FULLTEXT KEY `FullText` (`name` ASC)", true, "ALTER TABLE `t` ADD FULLTEXT `FullText`(`name`)"}, + {"ALTER TABLE t ADD FULLTEXT `FullText` (`name` ASC)", true, "ALTER TABLE `t` ADD FULLTEXT `FullText`(`name`)"}, + {"ALTER TABLE t ADD FULLTEXT INDEX `FullText` (`name` ASC)", true, "ALTER TABLE `t` ADD FULLTEXT `FullText`(`name`)"}, + {"ALTER TABLE t ADD INDEX (a) USING BTREE COMMENT 'a'", true, "ALTER TABLE `t` ADD INDEX(`a`) USING BTREE COMMENT 'a'"}, + {"ALTER TABLE t ADD INDEX IF NOT EXISTS (a) USING BTREE COMMENT 'a'", true, "ALTER TABLE `t` ADD INDEX IF NOT EXISTS(`a`) USING BTREE COMMENT 'a'"}, + {"ALTER TABLE t ADD INDEX (a) USING RTREE COMMENT 'a'", true, "ALTER TABLE `t` ADD INDEX(`a`) USING RTREE COMMENT 'a'"}, + {"ALTER TABLE t ADD KEY (a) USING HASH COMMENT 'a'", true, "ALTER TABLE `t` ADD INDEX(`a`) USING HASH COMMENT 'a'"}, + {"ALTER TABLE t ADD KEY IF NOT EXISTS (a) USING HASH COMMENT 'a'", true, "ALTER TABLE `t` ADD INDEX IF NOT EXISTS(`a`) USING HASH COMMENT 'a'"}, + {"ALTER TABLE t ADD PRIMARY KEY ident USING RTREE ( a DESC , b )", true, "ALTER TABLE `t` ADD PRIMARY KEY `ident`(`a`, `b`) USING RTREE"}, + {"ALTER TABLE t ADD KEY USING RTREE ( a ) ", true, "ALTER TABLE `t` ADD INDEX(`a`) USING RTREE"}, + {"ALTER TABLE t ADD KEY USING RTREE ( ident ASC , ident ( 123 ) )", true, "ALTER TABLE `t` ADD INDEX(`ident`, `ident`(123)) USING RTREE"}, + {"ALTER TABLE t ADD PRIMARY KEY (a) COMMENT 'a'", true, "ALTER TABLE `t` ADD PRIMARY KEY(`a`) COMMENT 'a'"}, + {"ALTER TABLE t ADD UNIQUE (a) COMMENT 'a'", true, "ALTER TABLE `t` ADD UNIQUE(`a`) COMMENT 'a'"}, + {"ALTER TABLE t ADD UNIQUE KEY (a) COMMENT 'a'", true, "ALTER TABLE `t` ADD UNIQUE(`a`) COMMENT 'a'"}, + {"ALTER TABLE t ADD UNIQUE INDEX (a) COMMENT 'a'", true, "ALTER TABLE `t` ADD UNIQUE(`a`) COMMENT 'a'"}, + {"ALTER TABLE t FORCE", true, "ALTER TABLE `t` FORCE /* AlterTableForce is not supported */ "}, + {"ALTER TABLE t DROP INDEX;", false, "ALTER TABLE `t` DROP INDEX"}, + {"ALTER TABLE t DROP INDEX a", true, "ALTER TABLE `t` DROP INDEX `a`"}, + {"ALTER TABLE t DROP INDEX IF EXISTS a", true, "ALTER TABLE `t` DROP INDEX IF EXISTS `a`"}, + + // For alter table alter index statement + {"ALTER TABLE t ALTER INDEX a INVISIBLE", true, "ALTER TABLE `t` ALTER INDEX `a` INVISIBLE"}, + {"ALTER TABLE t ALTER INDEX a VISIBLE", true, "ALTER TABLE `t` ALTER INDEX `a` VISIBLE"}, + + {"ALTER TABLE t DROP FOREIGN KEY a", true, "ALTER TABLE `t` DROP FOREIGN KEY `a`"}, + {"ALTER TABLE t DROP FOREIGN KEY IF EXISTS a", true, "ALTER TABLE `t` DROP FOREIGN KEY IF EXISTS `a`"}, + {"ALTER TABLE t DROP COLUMN a CASCADE", true, "ALTER TABLE `t` DROP COLUMN `a`"}, + {"ALTER TABLE t DROP COLUMN IF EXISTS a CASCADE", true, "ALTER TABLE `t` DROP COLUMN IF EXISTS `a`"}, + + // For #6405 + {"ALTER TABLE t RENAME KEY a TO b;", true, "ALTER TABLE `t` RENAME INDEX `a` TO `b`"}, + {"ALTER TABLE t RENAME INDEX a TO b;", true, "ALTER TABLE `t` RENAME INDEX `a` TO `b`"}, + // For create index statement + {"CREATE INDEX idx ON t (a)", true, "CREATE INDEX `idx` ON `t` (`a`)"}, + {"CREATE INDEX IF NOT EXISTS idx ON t (a)", true, "CREATE INDEX IF NOT EXISTS `idx` ON `t` (`a`)"}, + {"CREATE UNIQUE INDEX idx ON t (a)", true, "CREATE UNIQUE INDEX `idx` ON `t` (`a`)"}, + {"CREATE UNIQUE INDEX IF NOT EXISTS idx ON t (a)", true, "CREATE UNIQUE INDEX IF NOT EXISTS `idx` ON `t` (`a`)"}, + {"CREATE UNIQUE INDEX ident ON d_n.t_n ( ident , ident ASC ) TYPE BTREE", true, "CREATE UNIQUE INDEX `ident` ON `d_n`.`t_n` (`ident`, `ident`) USING BTREE"}, + {"CREATE UNIQUE INDEX ident ON d_n.t_n ( ident , ident ASC ) TYPE HASH", true, "CREATE UNIQUE INDEX `ident` ON `d_n`.`t_n` (`ident`, `ident`) USING HASH"}, + {"CREATE UNIQUE INDEX ident ON d_n.t_n ( ident , ident ASC ) TYPE RTREE", true, "CREATE UNIQUE INDEX `ident` ON `d_n`.`t_n` (`ident`, `ident`) USING RTREE"}, + {"CREATE UNIQUE INDEX ident TYPE BTREE ON d_n.t_n ( ident , ident ASC )", true, "CREATE UNIQUE INDEX `ident` ON `d_n`.`t_n` (`ident`, `ident`) USING BTREE"}, + {"CREATE UNIQUE INDEX ident USING BTREE ON d_n.t_n ( ident , ident ASC )", true, "CREATE UNIQUE INDEX `ident` ON `d_n`.`t_n` (`ident`, `ident`) USING BTREE"}, + {"CREATE SPATIAL INDEX idx ON t (a)", true, "CREATE SPATIAL INDEX `idx` ON `t` (`a`)"}, + {"CREATE SPATIAL INDEX IF NOT EXISTS idx ON t (a)", true, "CREATE SPATIAL INDEX IF NOT EXISTS `idx` ON `t` (`a`)"}, + {"CREATE FULLTEXT INDEX idx ON t (a)", true, "CREATE FULLTEXT INDEX `idx` ON `t` (`a`)"}, + {"CREATE FULLTEXT INDEX IF NOT EXISTS idx ON t (a)", true, "CREATE FULLTEXT INDEX IF NOT EXISTS `idx` ON `t` (`a`)"}, + {"CREATE FULLTEXT INDEX idx ON t (a) WITH PARSER ident", true, "CREATE FULLTEXT INDEX `idx` ON `t` (`a`) WITH PARSER `ident`"}, + {"CREATE FULLTEXT INDEX idx ON t (a) WITH PARSER ident comment 'string'", true, "CREATE FULLTEXT INDEX `idx` ON `t` (`a`) WITH PARSER `ident` COMMENT 'string'"}, + {"CREATE FULLTEXT INDEX idx ON t (a) comment 'string' with parser ident", true, "CREATE FULLTEXT INDEX `idx` ON `t` (`a`) WITH PARSER `ident` COMMENT 'string'"}, + {"CREATE INDEX idx ON t (a) USING HASH", true, "CREATE INDEX `idx` ON `t` (`a`) USING HASH"}, + {"CREATE INDEX idx ON t (a) COMMENT 'foo'", true, "CREATE INDEX `idx` ON `t` (`a`) COMMENT 'foo'"}, + {"CREATE INDEX idx ON t (a) USING HASH COMMENT 'foo'", true, "CREATE INDEX `idx` ON `t` (`a`) USING HASH COMMENT 'foo'"}, + {"CREATE INDEX idx USING BTREE ON t (a) USING HASH COMMENT 'foo'", true, "CREATE INDEX `idx` ON `t` (`a`) USING HASH COMMENT 'foo'"}, + {"CREATE INDEX idx USING BTREE ON t (a)", true, "CREATE INDEX `idx` ON `t` (`a`) USING BTREE"}, + {"CREATE INDEX idx ON t ( a ) VISIBLE", true, "CREATE INDEX `idx` ON `t` (`a`) VISIBLE"}, + {"CREATE INDEX idx ON t ( a ) INVISIBLE", true, "CREATE INDEX `idx` ON `t` (`a`) INVISIBLE"}, + {"CREATE INDEX idx ON t ( a ) INVISIBLE VISIBLE", true, "CREATE INDEX `idx` ON `t` (`a`) VISIBLE"}, + {"CREATE INDEX idx ON t ( a ) VISIBLE INVISIBLE", true, "CREATE INDEX `idx` ON `t` (`a`) INVISIBLE"}, + {"CREATE INDEX idx ON t ( a ) USING HASH VISIBLE", true, "CREATE INDEX `idx` ON `t` (`a`) USING HASH VISIBLE"}, + {"CREATE INDEX idx ON t ( a ) USING HASH INVISIBLE", true, "CREATE INDEX `idx` ON `t` (`a`) USING HASH INVISIBLE"}, + + //For dorp index statement + {"drop index a on t", true, "DROP INDEX `a` ON `t`"}, + {"drop index a on db.t", true, "DROP INDEX `a` ON `db`.`t`"}, + {"drop index a on db.`tb-ttb`", true, "DROP INDEX `a` ON `db`.`tb-ttb`"}, + {"drop index if exists a on t", true, "DROP INDEX IF EXISTS `a` ON `t`"}, + {"drop index if exists a on db.t", true, "DROP INDEX IF EXISTS `a` ON `db`.`t`"}, + {"drop index if exists a on db.`tb-ttb`", true, "DROP INDEX IF EXISTS `a` ON `db`.`tb-ttb`"}, + + // for truncate statement + {"TRUNCATE TABLE t1", true, "TRUNCATE TABLE `t1`"}, + {"TRUNCATE t1", true, "TRUNCATE TABLE `t1`"}, + + // for empty alert table index + {"ALTER TABLE t ADD INDEX () ", false, ""}, + {"ALTER TABLE t ADD UNIQUE ()", false, ""}, + {"ALTER TABLE t ADD UNIQUE INDEX ()", false, ""}, + {"ALTER TABLE t ADD UNIQUE KEY ()", false, ""}, + + // for issue 4538 + {"create table a (process double)", true, "CREATE TABLE `a` (`process` DOUBLE)"}, + + // for issue 4740 + {"create table t (a int1, b int2, c int3, d int4, e int8)", true, "CREATE TABLE `t` (`a` TINYINT,`b` SMALLINT,`c` MEDIUMINT,`d` INT,`e` BIGINT)"}, + + // for issue 5918 + {"create table t (lv long varchar null)", true, "CREATE TABLE `t` (`lv` MEDIUMTEXT NULL)"}, + + // special table name + {"CREATE TABLE cdp_test.`test2-1` (id int(11) DEFAULT NULL,key(id));", true, "CREATE TABLE `cdp_test`.`test2-1` (`id` INT(11) DEFAULT NULL,INDEX(`id`))"}, + {"CREATE TABLE miantiao (`扁豆焖面` INT(11));", true, "CREATE TABLE `miantiao` (`扁豆焖面` INT(11))"}, + + // for generated column definition + {"create table t (a timestamp, b timestamp as (a) not null on update current_timestamp);", false, ""}, + {"create table t (a bigint, b bigint as (a) primary key auto_increment);", false, ""}, + {"create table t (a bigint, b bigint as (a) not null default 10);", false, ""}, + {"create table t (a bigint, b bigint as (a+1) not null);", true, "CREATE TABLE `t` (`a` BIGINT,`b` BIGINT GENERATED ALWAYS AS(`a`+1) VIRTUAL NOT NULL)"}, + {"create table t (a bigint, b bigint as (a+1) not null);", true, "CREATE TABLE `t` (`a` BIGINT,`b` BIGINT GENERATED ALWAYS AS(`a`+1) VIRTUAL NOT NULL)"}, + {"create table t (a bigint, b bigint as (a+1) not null comment 'ttt');", true, "CREATE TABLE `t` (`a` BIGINT,`b` BIGINT GENERATED ALWAYS AS(`a`+1) VIRTUAL NOT NULL COMMENT 'ttt')"}, + {"alter table t add column (f timestamp as (a+1) default '2019-01-01 11:11:11');", false, ""}, + {"alter table t modify column f int as (a+1) default 55;", false, ""}, + + // for column format + {"create table t (a int column_format fixed)", true, "CREATE TABLE `t` (`a` INT COLUMN_FORMAT FIXED)"}, + {"create table t (a int column_format default)", true, "CREATE TABLE `t` (`a` INT COLUMN_FORMAT DEFAULT)"}, + {"create table t (a int column_format dynamic)", true, "CREATE TABLE `t` (`a` INT COLUMN_FORMAT DYNAMIC)"}, + {"alter table t modify column a bigint column_format default", true, "ALTER TABLE `t` MODIFY COLUMN `a` BIGINT COLUMN_FORMAT DEFAULT"}, + + // for character vary syntax + {"create table t (a character varying(1));", true, "CREATE TABLE `t` (`a` VARCHAR(1))"}, + {"create table t (a character varying(255));", true, "CREATE TABLE `t` (`a` VARCHAR(255))"}, + {"create table t (a char varying(50));", true, "CREATE TABLE `t` (`a` VARCHAR(50))"}, + {"create table t (a varcharacter(1));", true, "CREATE TABLE `t` (`a` VARCHAR(1))"}, + {"create table t (a varcharacter(50));", true, "CREATE TABLE `t` (`a` VARCHAR(50))"}, + {"create table t (a varcharacter(1), b varcharacter(255));", true, "CREATE TABLE `t` (`a` VARCHAR(1),`b` VARCHAR(255))"}, + {"create table t (a char);", true, "CREATE TABLE `t` (`a` CHAR)"}, + {"create table t (a character);", true, "CREATE TABLE `t` (`a` CHAR)"}, + {"create table t (a character varying(50), b int);", true, "CREATE TABLE `t` (`a` VARCHAR(50),`b` INT)"}, + {"create table t (a character, b int);", true, "CREATE TABLE `t` (`a` CHAR,`b` INT)"}, + {"create table t (a national character varying(50));", true, "CREATE TABLE `t` (`a` VARCHAR(50))"}, + {"create table t (a national char varying(50));", true, "CREATE TABLE `t` (`a` VARCHAR(50))"}, + {"create table t (a national char);", true, "CREATE TABLE `t` (`a` CHAR)"}, + {"create table t (a national character);", true, "CREATE TABLE `t` (`a` CHAR)"}, + {"create table t (a nchar);", true, "CREATE TABLE `t` (`a` CHAR)"}, + {"create table t (a nchar varchar(50));", true, "CREATE TABLE `t` (`a` VARCHAR(50))"}, + {"create table t (a nchar varcharacter(50));", true, "CREATE TABLE `t` (`a` VARCHAR(50))"}, + {"create table t (a national varchar);", false, ""}, + {"create table t (a national varchar(50));", true, "CREATE TABLE `t` (`a` VARCHAR(50))"}, + {"create table t (a national varcharacter(50));", true, "CREATE TABLE `t` (`a` VARCHAR(50))"}, + {"create table t (a nchar varying(50));", true, "CREATE TABLE `t` (`a` VARCHAR(50))"}, + {"create table t (a nvarchar(50));", true, "CREATE TABLE `t` (`a` VARCHAR(50))"}, + {"create table nchar (a int);", true, "CREATE TABLE `nchar` (`a` INT)"}, + {"create table nchar (a int, b nchar);", true, "CREATE TABLE `nchar` (`a` INT,`b` CHAR)"}, + {"create table nchar (a int, b nchar(50));", true, "CREATE TABLE `nchar` (`a` INT,`b` CHAR(50))"}, + // Test keyword `SERIAL` + {"create table t (a serial);", true, "CREATE TABLE `t` (`a` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT UNIQUE KEY)"}, + {"create table t (a serial null);", true, "CREATE TABLE `t` (`a` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT UNIQUE KEY NULL)"}, + {"create table t (b int, a serial);", true, "CREATE TABLE `t` (`b` INT,`a` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT UNIQUE KEY)"}, + {"create table t (a int serial default value);", true, "CREATE TABLE `t` (`a` INT NOT NULL AUTO_INCREMENT UNIQUE KEY)"}, + {"create table t (a int serial default value null);", true, "CREATE TABLE `t` (`a` INT NOT NULL AUTO_INCREMENT UNIQUE KEY NULL)"}, + {"create table t (a bigint serial default value);", true, "CREATE TABLE `t` (`a` BIGINT NOT NULL AUTO_INCREMENT UNIQUE KEY)"}, + {"create table t (a smallint serial default value);", true, "CREATE TABLE `t` (`a` SMALLINT NOT NULL AUTO_INCREMENT UNIQUE KEY)"}, + + // for LONG syntax + {"create table t (a long);", true, "CREATE TABLE `t` (`a` MEDIUMTEXT)"}, + {"create table t (a long varchar);", true, "CREATE TABLE `t` (`a` MEDIUMTEXT)"}, + {"create table t (a long varcharacter);", true, "CREATE TABLE `t` (`a` MEDIUMTEXT)"}, + {"create table t (a long char varying);", true, "CREATE TABLE `t` (`a` MEDIUMTEXT)"}, + {"create table t (a long character varying);", true, "CREATE TABLE `t` (`a` MEDIUMTEXT)"}, + {"create table t (a mediumtext, b long varchar, c long, d long varcharacter, e long char varying, f long character varying, g long);", true, "CREATE TABLE `t` (`a` MEDIUMTEXT,`b` MEDIUMTEXT,`c` MEDIUMTEXT,`d` MEDIUMTEXT,`e` MEDIUMTEXT,`f` MEDIUMTEXT,`g` MEDIUMTEXT)"}, + {"create table t (a long varbinary);", true, "CREATE TABLE `t` (`a` MEDIUMBLOB)"}, + {"create table t (a long char varying, b long varbinary);", true, "CREATE TABLE `t` (`a` MEDIUMTEXT,`b` MEDIUMBLOB)"}, + {"create table t (a long char set utf8);", true, "CREATE TABLE `t` (`a` MEDIUMTEXT CHARACTER SET UTF8)"}, + {"create table t (a long char varying char set utf8);", true, "CREATE TABLE `t` (`a` MEDIUMTEXT CHARACTER SET UTF8)"}, + {"create table t (a long character set utf8);", true, "CREATE TABLE `t` (`a` MEDIUMTEXT CHARACTER SET UTF8)"}, + {"create table t (a long character varying character set utf8);", true, "CREATE TABLE `t` (`a` MEDIUMTEXT CHARACTER SET UTF8)"}, + // for issue 501 + {"ALTER TABLE t IMPORT TABLESPACE;", true, "ALTER TABLE `t` IMPORT TABLESPACE"}, + {"ALTER TABLE t DISCARD TABLESPACE;", true, "ALTER TABLE `t` DISCARD TABLESPACE"}, + {"ALTER TABLE db.t IMPORT TABLESPACE;", true, "ALTER TABLE `db`.`t` IMPORT TABLESPACE"}, + {"ALTER TABLE db.t DISCARD TABLESPACE;", true, "ALTER TABLE `db`.`t` DISCARD TABLESPACE"}, + + // for CONSTRAINT syntax, see issue 413 + {"ALTER TABLE t ADD ( CHECK ( true ) )", true, "ALTER TABLE `t` ADD COLUMN (CHECK(TRUE) ENFORCED)"}, + {"ALTER TABLE t ADD ( CONSTRAINT CHECK ( true ) )", true, "ALTER TABLE `t` ADD COLUMN (CHECK(TRUE) ENFORCED)"}, + {"ALTER TABLE t ADD COLUMN ( CONSTRAINT ident CHECK ( 1>2 ) NOT ENFORCED )", true, "ALTER TABLE `t` ADD COLUMN (CONSTRAINT `ident` CHECK(1>2) NOT ENFORCED)"}, + {"alter table t add column (b int, constraint c unique key (b))", true, "ALTER TABLE `t` ADD COLUMN (`b` INT, UNIQUE `c`(`b`))"}, + {"ALTER TABLE t ADD COLUMN ( CONSTRAINT CHECK ( true ) )", true, "ALTER TABLE `t` ADD COLUMN (CHECK(TRUE) ENFORCED)"}, + {"ALTER TABLE t ADD COLUMN ( CONSTRAINT CHECK ( true ) ENFORCED , CHECK ( true ) )", true, "ALTER TABLE `t` ADD COLUMN (CHECK(TRUE) ENFORCED, CHECK(TRUE) ENFORCED)"}, + {"ALTER TABLE t ADD COLUMN (a1 int, CONSTRAINT b1 CHECK (a1>0))", true, "ALTER TABLE `t` ADD COLUMN (`a1` INT, CONSTRAINT `b1` CHECK(`a1`>0) ENFORCED)"}, + {"ALTER TABLE t ADD COLUMN (a1 int, a2 int, CONSTRAINT b1 CHECK (a1>0), CONSTRAINT b2 CHECK (a2<10))", true, "ALTER TABLE `t` ADD COLUMN (`a1` INT, `a2` INT, CONSTRAINT `b1` CHECK(`a1`>0) ENFORCED, CONSTRAINT `b2` CHECK(`a2`<10) ENFORCED)"}, + {"ALTER TABLE `t` ADD COLUMN (`a1` INT, PRIMARY KEY (`a1`))", true, "ALTER TABLE `t` ADD COLUMN (`a1` INT, PRIMARY KEY(`a1`))"}, + {"ALTER TABLE t ADD (a1 int, CONSTRAINT PRIMARY KEY (a1))", true, "ALTER TABLE `t` ADD COLUMN (`a1` INT, PRIMARY KEY(`a1`))"}, + {"ALTER TABLE t ADD (a1 int, a2 int, PRIMARY KEY (a1), UNIQUE (a2))", true, "ALTER TABLE `t` ADD COLUMN (`a1` INT, `a2` INT, PRIMARY KEY(`a1`), UNIQUE(`a2`))"}, + {"ALTER TABLE t ADD (a1 int, a2 int, PRIMARY KEY (a1), CONSTRAINT b2 UNIQUE (a2))", true, "ALTER TABLE `t` ADD COLUMN (`a1` INT, `a2` INT, PRIMARY KEY(`a1`), UNIQUE `b2`(`a2`))"}, + // for issue 537 + {"CREATE TABLE IF NOT EXISTS table_ident (a SQL_TSI_YEAR(4), b SQL_TSI_YEAR);", true, "CREATE TABLE IF NOT EXISTS `table_ident` (`a` YEAR(4),`b` YEAR)"}, + {`CREATE TABLE IF NOT EXISTS table_ident (ident1 BOOL COMMENT "text_string" unique, ident2 SQL_TSI_YEAR(4) ZEROFILL);`, true, "CREATE TABLE IF NOT EXISTS `table_ident` (`ident1` TINYINT(1) COMMENT 'text_string' UNIQUE KEY,`ident2` YEAR(4))"}, + {"create table t (y sql_tsi_year(4), y1 sql_tsi_year)", true, "CREATE TABLE `t` (`y` YEAR(4),`y1` YEAR)"}, + {"create table t (y sql_tsi_year(4) unsigned zerofill zerofill, y1 sql_tsi_year signed unsigned zerofill)", true, "CREATE TABLE `t` (`y` YEAR(4),`y1` YEAR)"}, + + // for issue 529 + {"create table t (a text byte ascii)", false, ""}, + {"create table t (a text byte charset latin1)", false, ""}, + {"create table t (a longtext ascii)", true, "CREATE TABLE `t` (`a` LONGTEXT CHARACTER SET LATIN1)"}, + {"create table t (a mediumtext ascii)", true, "CREATE TABLE `t` (`a` MEDIUMTEXT CHARACTER SET LATIN1)"}, + {"create table t (a tinytext ascii)", true, "CREATE TABLE `t` (`a` TINYTEXT CHARACTER SET LATIN1)"}, + {"create table t (a text byte)", true, "CREATE TABLE `t` (`a` TEXT)"}, + {"create table t (a long byte, b text ascii)", true, "CREATE TABLE `t` (`a` MEDIUMTEXT,`b` TEXT CHARACTER SET LATIN1)"}, + {"create table t (a text ascii, b mediumtext ascii, c int)", true, "CREATE TABLE `t` (`a` TEXT CHARACTER SET LATIN1,`b` MEDIUMTEXT CHARACTER SET LATIN1,`c` INT)"}, + {"create table t (a int, b text ascii, c mediumtext ascii)", true, "CREATE TABLE `t` (`a` INT,`b` TEXT CHARACTER SET LATIN1,`c` MEDIUMTEXT CHARACTER SET LATIN1)"}, + {"create table t (a long ascii, b long ascii)", true, "CREATE TABLE `t` (`a` MEDIUMTEXT CHARACTER SET LATIN1,`b` MEDIUMTEXT CHARACTER SET LATIN1)"}, + {"create table t (a long character set utf8mb4, b long charset utf8mb4, c long char set utf8mb4)", true, "CREATE TABLE `t` (`a` MEDIUMTEXT CHARACTER SET UTF8MB4,`b` MEDIUMTEXT CHARACTER SET UTF8MB4,`c` MEDIUMTEXT CHARACTER SET UTF8MB4)"}, + + {"create table t (a int STORAGE MEMORY, b varchar(255) STORAGE MEMORY)", true, "CREATE TABLE `t` (`a` INT STORAGE MEMORY,`b` VARCHAR(255) STORAGE MEMORY)"}, + {"create table t (a int storage DISK, b varchar(255) STORAGE DEFAULT)", true, "CREATE TABLE `t` (`a` INT STORAGE DISK,`b` VARCHAR(255) STORAGE DEFAULT)"}, + {"create table t (a int STORAGE DEFAULT, b varchar(255) STORAGE DISK)", true, "CREATE TABLE `t` (`a` INT STORAGE DEFAULT,`b` VARCHAR(255) STORAGE DISK)"}, + + // for issue 555 + {"create table t (a fixed(6, 3), b fixed key)", true, "CREATE TABLE `t` (`a` DECIMAL(6,3),`b` DECIMAL PRIMARY KEY)"}, + {"create table t (a numeric, b fixed(6))", true, "CREATE TABLE `t` (`a` DECIMAL,`b` DECIMAL(6))"}, + {"create table t (a fixed(65, 30) zerofill, b numeric, c fixed(65) unsigned zerofill)", true, "CREATE TABLE `t` (`a` DECIMAL(65,30) UNSIGNED ZEROFILL,`b` DECIMAL,`c` DECIMAL(65) UNSIGNED ZEROFILL)"}, + + // create table with expression index + {"create table a(a int, key(lower(a)));", false, ""}, + {"create table a(a int, key(a+1));", false, ""}, + {"create table a(a int, key(a, a+1));", false, ""}, + {"create table a(a int, b int, key((a+1), (b+1)));", true, "CREATE TABLE `a` (`a` INT,`b` INT,INDEX((`a`+1), (`b`+1)))"}, + {"create table a(a int, b int, key(a, (b+1)));", true, "CREATE TABLE `a` (`a` INT,`b` INT,INDEX(`a`, (`b`+1)))"}, + {"create table a(a int, b int, key((a+1), b));", true, "CREATE TABLE `a` (`a` INT,`b` INT,INDEX((`a`+1), `b`))"}, + {"create table a(a int, b int, key((a + 1) desc));", true, "CREATE TABLE `a` (`a` INT,`b` INT,INDEX((`a`+1)))"}, + + // for auto_random + {"create table t (a bigint auto_random(3) primary key, b varchar(255))", true, "CREATE TABLE `t` (`a` BIGINT AUTO_RANDOM(3) PRIMARY KEY,`b` VARCHAR(255))"}, + {"create table t (a bigint auto_random primary key, b varchar(255))", true, "CREATE TABLE `t` (`a` BIGINT AUTO_RANDOM PRIMARY KEY,`b` VARCHAR(255))"}, + {"create table t (a bigint primary key auto_random(4), b varchar(255))", true, "CREATE TABLE `t` (`a` BIGINT PRIMARY KEY AUTO_RANDOM(4),`b` VARCHAR(255))"}, + {"create table t (a bigint primary key auto_random(3) primary key unique, b varchar(255))", true, "CREATE TABLE `t` (`a` BIGINT PRIMARY KEY AUTO_RANDOM(3) PRIMARY KEY UNIQUE KEY,`b` VARCHAR(255))"}, + } + s.RunTest(c, table) +} + +func (s *testParserSuite) TestType(c *C) { + table := []testCase{ + // for time fsp + {"CREATE TABLE t( c1 TIME(2), c2 DATETIME(2), c3 TIMESTAMP(2) );", true, "CREATE TABLE `t` (`c1` TIME(2),`c2` DATETIME(2),`c3` TIMESTAMP(2))"}, + + // for hexadecimal + {"select x'0a', X'11', 0x11", true, "SELECT x'0a',x'11',x'11'"}, + {"select x'13181C76734725455A'", true, "SELECT x'13181c76734725455a'"}, + {"select x'0xaa'", false, ""}, + {"select 0X11", false, ""}, + {"select 0x4920616D2061206C6F6E672068657820737472696E67", true, "SELECT x'4920616d2061206c6f6e672068657820737472696e67'"}, + + // for bit + {"select 0b01, 0b0, b'11', B'11'", true, "SELECT b'1',b'0',b'11',b'11'"}, + // 0B01 and 0b21 are identifiers, the following two statement could parse. + // {"select 0B01", false, ""}, + // {"select 0b21", false, ""}, + + // for enum and set type + {"create table t (c1 enum('a', 'b'), c2 set('a', 'b'))", true, "CREATE TABLE `t` (`c1` ENUM('a','b'),`c2` SET('a','b'))"}, + {"create table t (c1 enum)", false, ""}, + {"create table t (c1 set)", false, ""}, + + // for blob and text field length + {"create table t (c1 blob(1024), c2 text(1024))", true, "CREATE TABLE `t` (`c1` BLOB(1024),`c2` TEXT(1024))"}, + + // for year + {"create table t (y year(4), y1 year)", true, "CREATE TABLE `t` (`y` YEAR(4),`y1` YEAR)"}, + {"create table t (y year(4) unsigned zerofill zerofill, y1 year signed unsigned zerofill)", true, "CREATE TABLE `t` (`y` YEAR(4),`y1` YEAR)"}, + + // for national + {"create table t (c1 national char(2), c2 national varchar(2))", true, "CREATE TABLE `t` (`c1` CHAR(2),`c2` VARCHAR(2))"}, + + // for json type + {`create table t (a JSON);`, true, "CREATE TABLE `t` (`a` JSON)"}, + } + s.RunTest(c, table) +} + +func (s *testParserSuite) TestCommentErrMsg(c *C) { + table := []testErrMsgCase{ + {"delete from t where a = 7 or 1=1/*' and b = 'p'", false, errors.New("near '/*' and b = 'p'' at line 1")}, + {"delete from t where a = 7 or\n 1=1/*' and b = 'p'", false, errors.New("near '/*' and b = 'p'' at line 2")}, + {"select 1/*", false, errors.New("near '/*' at line 1")}, + {"select 1/* comment */", false, nil}, + } + s.RunErrMsgTest(c, table) +} + +func (s *testParserSuite) TestSQLResult(c *C) { + table := []testCase{ + {`select SQL_BIG_RESULT c1 from t group by c1`, true, "SELECT SQL_BIG_RESULT `c1` FROM `t` GROUP BY `c1`"}, + {`select SQL_SMALL_RESULT c1 from t group by c1`, true, "SELECT SQL_SMALL_RESULT `c1` FROM `t` GROUP BY `c1`"}, + {`select SQL_BUFFER_RESULT * from t`, true, "SELECT SQL_BUFFER_RESULT * FROM `t`"}, + {`select sql_small_result sql_big_result sql_buffer_result 1`, true, "SELECT SQL_SMALL_RESULT SQL_BIG_RESULT SQL_BUFFER_RESULT 1"}, + } + s.RunTest(c, table) +} + +func (s *testParserSuite) TestSQLNoCache(c *C) { + table := []testCase{ + {`select SQL_NO_CACHE * from t`, false, ""}, + {`select SQL_CACHE * from t`, true, "SELECT * FROM `t`"}, + {`select * from t`, true, "SELECT * FROM `t`"}, + } + + parser := parser.New() + for _, tt := range table { + stmt, _, err := parser.Parse(tt.src, "", "") + c.Assert(err, IsNil) + + sel := stmt[0].(*ast.SelectStmt) + c.Assert(sel.SelectStmtOpts.SQLCache, Equals, tt.ok) + } +} + +func (s *testParserSuite) TestEscape(c *C) { + table := []testCase{ + {`select """;`, false, ""}, + {`select """";`, true, "SELECT '\"'"}, + {`select "汉字";`, true, "SELECT '汉字'"}, + {`select 'abc"def';`, true, "SELECT 'abc\"def'"}, + {`select 'a\r\n';`, true, "SELECT 'a\r\n'"}, + {`select "\a\r\n"`, true, "SELECT 'a\r\n'"}, + {`select "\xFF"`, true, "SELECT 'xFF'"}, + } + s.RunTest(c, table) +} + +func (s *testParserSuite) TestInsertStatementMemoryAllocation(c *C) { + sql := "insert t values (1)" + strings.Repeat(",(1)", 1000) + var oldStats, newStats runtime.MemStats + runtime.ReadMemStats(&oldStats) + _, err := parser.New().ParseOneStmt(sql, "", "") + c.Assert(err, IsNil) + runtime.ReadMemStats(&newStats) + c.Assert(int(newStats.TotalAlloc-oldStats.TotalAlloc), Less, 1024*500) +} + +func (s *testParserSuite) TestExplain(c *C) { + table := []testCase{ + {"explain select c1 from t1", true, "EXPLAIN FORMAT = 'row' SELECT `c1` FROM `t1`"}, + {"explain insert into t values (1), (2), (3)", true, "EXPLAIN FORMAT = 'row' INSERT INTO `t` VALUES (1),(2),(3)"}, + {"explain replace into foo values (1 || 2)", true, "EXPLAIN FORMAT = 'row' REPLACE INTO `foo` VALUES (1 OR 2)"}, + {"EXPLAIN FORMAT = 'dot' SELECT 1", true, "EXPLAIN FORMAT = 'dot' SELECT 1"}, + {"EXPLAIN FORMAT = 'row' SELECT 1", true, "EXPLAIN FORMAT = 'row' SELECT 1"}, + {"EXPLAIN FORMAT = 'ROW' SELECT 1", true, "EXPLAIN FORMAT = 'ROW' SELECT 1"}, + {"EXPLAIN SELECT 1", true, "EXPLAIN FORMAT = 'row' SELECT 1"}, + {"EXPLAIN FORMAT = JSON SELECT 1", true, "EXPLAIN FORMAT = 'json' SELECT 1"}, + {"EXPLAIN FORMAT = 'hint' SELECT 1", true, "EXPLAIN FORMAT = 'hint' SELECT 1"}, + } + s.RunTest(c, table) +} + +func (s *testParserSuite) TestSQLModeANSIQuotes(c *C) { + parser := parser.New() + parser.SetSQLMode(mysql.ModeANSIQuotes) + tests := []string{ + `CREATE TABLE "table" ("id" int)`, + `select * from t "tt"`, + } + for _, test := range tests { + _, _, err := parser.Parse(test, "", "") + c.Assert(err, IsNil) + } +} + +func (s *testParserSuite) TestAnalyze(c *C) { + table := []testCase{ + {"analyze table t1", true, "ANALYZE TABLE `t1`"}, + {"analyze table t1.*", false, ""}, + {"analyze table t,t1", true, "ANALYZE TABLE `t`,`t1`"}, + } + s.RunTest(c, table) +} + +func (s *testParserSuite) TestSideEffect(c *C) { + // This test cover a bug that parse an error SQL doesn't leave the parser in a + // clean state, cause the following SQL parse fail. + parser := parser.New() + _, err := parser.ParseOneStmt("create table t /*!50100 'abc', 'abc' */;", "", "") + c.Assert(err, NotNil) + + _, err = parser.ParseOneStmt("show tables;", "", "") + c.Assert(err, IsNil) +} + +// See https://github.com/pingcap/tidb/parser/issue/94 +func (s *testParserSuite) TestQuotedSystemVariables(c *C) { + parser := parser.New() + + st, err := parser.ParseOneStmt( + "select @@Sql_Mode, @@`SQL_MODE`, @@session.`sql_mode`, @@global.`s ql``mode`, @@session.'sql\\nmode', @@local.\"sql\\\"mode\";", + "", + "", + ) + c.Assert(err, IsNil) + ss := st.(*ast.SelectStmt) + expected := []*ast.VariableExpr{ + { + Name: "sql_mode", + IsGlobal: false, + IsSystem: true, + ExplicitScope: false, + }, + { + Name: "sql_mode", + IsGlobal: false, + IsSystem: true, + ExplicitScope: false, + }, + { + Name: "sql_mode", + IsGlobal: false, + IsSystem: true, + ExplicitScope: true, + }, + { + Name: "s ql`mode", + IsGlobal: true, + IsSystem: true, + ExplicitScope: true, + }, + { + Name: "sql\nmode", + IsGlobal: false, + IsSystem: true, + ExplicitScope: true, + }, + { + Name: `sql"mode`, + IsGlobal: false, + IsSystem: true, + ExplicitScope: true, + }, + } + + c.Assert(len(ss.Fields.Fields), Equals, len(expected)) + for i, field := range ss.Fields.Fields { + ve := field.Expr.(*ast.VariableExpr) + cmt := Commentf("field %d, ve = %v", i, ve) + c.Assert(ve.Name, Equals, expected[i].Name, cmt) + c.Assert(ve.IsGlobal, Equals, expected[i].IsGlobal, cmt) + c.Assert(ve.IsSystem, Equals, expected[i].IsSystem, cmt) + c.Assert(ve.ExplicitScope, Equals, expected[i].ExplicitScope, cmt) + } +} + +// See https://github.com/pingcap/tidb/parser/issue/95 +func (s *testParserSuite) TestQuotedVariableColumnName(c *C) { + parser := parser.New() + + st, err := parser.ParseOneStmt( + "select @abc, @`abc`, @'aBc', @\"AbC\", @6, @`6`, @'6', @\"6\", @@sql_mode, @@`sql_mode`, @;", + "", + "", + ) + c.Assert(err, IsNil) + ss := st.(*ast.SelectStmt) + expected := []string{ + "@abc", + "@`abc`", + "@'aBc'", + `@"AbC"`, + "@6", + "@`6`", + "@'6'", + `@"6"`, + "@@sql_mode", + "@@`sql_mode`", + "@", + } + + c.Assert(len(ss.Fields.Fields), Equals, len(expected)) + for i, field := range ss.Fields.Fields { + c.Assert(field.Text(), Equals, expected[i]) + } +} diff --git a/parser/terror/terror.go b/parser/terror/terror.go new file mode 100644 index 0000000..9469d92 --- /dev/null +++ b/parser/terror/terror.go @@ -0,0 +1,361 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package terror + +import ( + "encoding/json" + "fmt" + "strconv" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/parser/mysql" + log "github.com/sirupsen/logrus" +) + +// Global error instances. +var ( + ErrCritical = ClassGlobal.New(CodeExecResultIsEmpty, "critical error %v") + ErrResultUndetermined = ClassGlobal.New(CodeResultUndetermined, "execution result undetermined") +) + +// ErrCode represents a specific error type in a error class. +// Same error code can be used in different error classes. +type ErrCode int + +const ( + // Executor error codes. + + // CodeUnknown is for errors of unknown reason. + CodeUnknown ErrCode = -1 + // CodeExecResultIsEmpty indicates execution result is empty. + CodeExecResultIsEmpty ErrCode = 3 + + // Expression error codes. + + // CodeMissConnectionID indicates connection id is missing. + CodeMissConnectionID ErrCode = 1 + + // Special error codes. + + // CodeResultUndetermined indicates the sql execution result is undetermined. + CodeResultUndetermined ErrCode = 2 +) + +// ErrClass represents a class of errors. +type ErrClass int + +// Error classes. +const ( + ClassAutoid ErrClass = iota + 1 + ClassDDL + ClassDomain + ClassEvaluator + ClassExecutor + ClassExpression + ClassAdmin + ClassKV + ClassMeta + ClassOptimizer + ClassParser + ClassPerfSchema + ClassPrivilege + ClassSchema + ClassServer + ClassStructure + ClassVariable + ClassXEval + ClassTable + ClassTypes + ClassGlobal + ClassMockTikv + ClassJSON + ClassTiKV + ClassSession + ClassPlugin + ClassUtil + // Add more as needed. +) + +var errClz2Str = map[ErrClass]string{ + ClassAutoid: "autoid", + ClassDDL: "ddl", + ClassDomain: "domain", + ClassExecutor: "executor", + ClassExpression: "expression", + ClassAdmin: "admin", + ClassMeta: "meta", + ClassKV: "kv", + ClassOptimizer: "planner", + ClassParser: "parser", + ClassPerfSchema: "perfschema", + ClassPrivilege: "privilege", + ClassSchema: "schema", + ClassServer: "server", + ClassStructure: "structure", + ClassVariable: "variable", + ClassTable: "table", + ClassTypes: "types", + ClassGlobal: "global", + ClassMockTikv: "mocktikv", + ClassJSON: "json", + ClassTiKV: "tikv", + ClassSession: "session", + ClassPlugin: "plugin", + ClassUtil: "util", +} + +// String implements fmt.Stringer interface. +func (ec ErrClass) String() string { + if s, exists := errClz2Str[ec]; exists { + return s + } + return strconv.Itoa(int(ec)) +} + +// EqualClass returns true if err is *Error with the same class. +func (ec ErrClass) EqualClass(err error) bool { + e := errors.Cause(err) + if e == nil { + return false + } + if te, ok := e.(*Error); ok { + return te.class == ec + } + return false +} + +// NotEqualClass returns true if err is not *Error with the same class. +func (ec ErrClass) NotEqualClass(err error) bool { + return !ec.EqualClass(err) +} + +// New creates an *Error with an error code and an error message. +// Usually used to create base *Error. +func (ec ErrClass) New(code ErrCode, message string) *Error { + return &Error{ + class: ec, + code: code, + message: message, + } +} + +// NewStd calls New using the standard message for the error code +func (ec ErrClass) NewStd(code ErrCode) *Error { + return ec.New(code, mysql.MySQLErrName[uint16(code)]) +} + +// Error implements error interface and adds integer Class and Code, so +// errors with different message can be compared. +type Error struct { + class ErrClass + code ErrCode + message string + args []interface{} + file string + line int +} + +// Class returns ErrClass +func (e *Error) Class() ErrClass { + return e.class +} + +// Code returns ErrCode +func (e *Error) Code() ErrCode { + return e.code +} + +// MarshalJSON implements json.Marshaler interface. +func (e *Error) MarshalJSON() ([]byte, error) { + return json.Marshal(&struct { + Class ErrClass `json:"class"` + Code ErrCode `json:"code"` + Msg string `json:"message"` + }{ + Class: e.class, + Code: e.code, + Msg: e.getMsg(), + }) +} + +// UnmarshalJSON implements json.Unmarshaler interface. +func (e *Error) UnmarshalJSON(data []byte) error { + err := &struct { + Class ErrClass `json:"class"` + Code ErrCode `json:"code"` + Msg string `json:"message"` + }{} + + if err := json.Unmarshal(data, &err); err != nil { + return errors.Trace(err) + } + + e.class = err.Class + e.code = err.Code + e.message = err.Msg + return nil +} + +// Location returns the location where the error is created, +// implements juju/errors locationer interface. +func (e *Error) Location() (file string, line int) { + return e.file, e.line +} + +// Error implements error interface. +func (e *Error) Error() string { + return fmt.Sprintf("[%s:%d]%s", e.class, e.code, e.getMsg()) +} + +func (e *Error) getMsg() string { + if len(e.args) > 0 { + return fmt.Sprintf(e.message, e.args...) + } + return e.message +} + +// GenWithStack generates a new *Error with the same class and code, and a new formatted message. +func (e *Error) GenWithStack(format string, args ...interface{}) error { + err := *e + err.message = format + err.args = args + return errors.AddStack(&err) +} + +// GenWithStackByArgs generates a new *Error with the same class and code, and new arguments. +func (e *Error) GenWithStackByArgs(args ...interface{}) error { + err := *e + err.args = args + return errors.AddStack(&err) +} + +// FastGen generates a new *Error with the same class and code, and a new formatted message. +// This will not call runtime.Caller to get file and line. +func (e *Error) FastGen(format string, args ...interface{}) error { + err := *e + err.message = format + err.args = args + return errors.SuspendStack(&err) +} + +// FastGen generates a new *Error with the same class and code, and a new arguments. +// This will not call runtime.Caller to get file and line. +func (e *Error) FastGenByArgs(args ...interface{}) error { + err := *e + err.args = args + return errors.SuspendStack(&err) +} + +// Equal checks if err is equal to e. +func (e *Error) Equal(err error) bool { + originErr := errors.Cause(err) + if originErr == nil { + return false + } + + if error(e) == originErr { + return true + } + inErr, ok := originErr.(*Error) + return ok && e.class == inErr.class && e.code == inErr.code +} + +// NotEqual checks if err is not equal to e. +func (e *Error) NotEqual(err error) bool { + return !e.Equal(err) +} + +// ToSQLError convert Error to mysql.SQLError. +func (e *Error) ToSQLError() *mysql.SQLError { + code := e.getMySQLErrorCode() + return mysql.NewErrf(code, "%s", e.getMsg()) +} + +var defaultMySQLErrorCode uint16 + +func (e *Error) getMySQLErrorCode() uint16 { + codeMap, ok := ErrClassToMySQLCodes[e.class] + if !ok { + log.Warnf("Unknown error class: %v", e.class) + return defaultMySQLErrorCode + } + code, ok := codeMap[e.code] + if !ok { + log.Debugf("Unknown error class: %v code: %v", e.class, e.code) + return defaultMySQLErrorCode + } + return code +} + +var ( + // ErrClassToMySQLCodes is the map of ErrClass to code-map. + ErrClassToMySQLCodes map[ErrClass]map[ErrCode]uint16 +) + +func init() { + ErrClassToMySQLCodes = make(map[ErrClass]map[ErrCode]uint16) + defaultMySQLErrorCode = mysql.ErrUnknown +} + +// ErrorEqual returns a boolean indicating whether err1 is equal to err2. +func ErrorEqual(err1, err2 error) bool { + e1 := errors.Cause(err1) + e2 := errors.Cause(err2) + + if e1 == e2 { + return true + } + + if e1 == nil || e2 == nil { + return e1 == e2 + } + + te1, ok1 := e1.(*Error) + te2, ok2 := e2.(*Error) + if ok1 && ok2 { + return te1.class == te2.class && te1.code == te2.code + } + + return e1.Error() == e2.Error() +} + +// ErrorNotEqual returns a boolean indicating whether err1 isn't equal to err2. +func ErrorNotEqual(err1, err2 error) bool { + return !ErrorEqual(err1, err2) +} + +// MustNil cleans up and fatals if err is not nil. +func MustNil(err error, closeFuns ...func()) { + if err != nil { + for _, f := range closeFuns { + f() + } + log.Fatalf(errors.ErrorStack(err)) + } +} + +// Call executes a function and checks the returned err. +func Call(fn func() error) { + err := fn() + if err != nil { + log.Error(errors.ErrorStack(err)) + } +} + +// Log logs the error if it is not nil. +func Log(err error) { + if err != nil { + log.Error(errors.ErrorStack(err)) + } +} diff --git a/parser/terror/terror_test.go b/parser/terror/terror_test.go new file mode 100644 index 0000000..3b72ea9 --- /dev/null +++ b/parser/terror/terror_test.go @@ -0,0 +1,166 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package terror + +import ( + "encoding/json" + "runtime" + "strings" + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/errors" +) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + TestingT(t) +} + +var _ = Suite(&testTErrorSuite{}) + +type testTErrorSuite struct { +} + +func (s *testTErrorSuite) TestErrCode(c *C) { + c.Assert(CodeMissConnectionID, Equals, ErrCode(1)) + c.Assert(CodeResultUndetermined, Equals, ErrCode(2)) +} + +func (s *testTErrorSuite) TestTError(c *C) { + c.Assert(ClassParser.String(), Not(Equals), "") + c.Assert(ClassOptimizer.String(), Not(Equals), "") + c.Assert(ClassKV.String(), Not(Equals), "") + c.Assert(ClassServer.String(), Not(Equals), "") + + parserErr := ClassParser.New(ErrCode(1), "error 1") + c.Assert(parserErr.Error(), Not(Equals), "") + c.Assert(ClassParser.EqualClass(parserErr), IsTrue) + c.Assert(ClassParser.NotEqualClass(parserErr), IsFalse) + + c.Assert(ClassOptimizer.EqualClass(parserErr), IsFalse) + optimizerErr := ClassOptimizer.New(ErrCode(2), "abc") + c.Assert(ClassOptimizer.EqualClass(errors.New("abc")), IsFalse) + c.Assert(ClassOptimizer.EqualClass(nil), IsFalse) + c.Assert(optimizerErr.Equal(optimizerErr.GenWithStack("def")), IsTrue) + c.Assert(optimizerErr.Equal(nil), IsFalse) + c.Assert(optimizerErr.Equal(errors.New("abc")), IsFalse) + + // Test case for FastGen. + c.Assert(optimizerErr.Equal(optimizerErr.FastGen("def")), IsTrue) + c.Assert(optimizerErr.Equal(optimizerErr.FastGen("def: %s", "def")), IsTrue) + kvErr := ClassKV.New(1062, "key already exist") + e := kvErr.FastGen("Duplicate entry '%d' for key 'PRIMARY'", 1) + c.Assert(e.Error(), Equals, "[kv:1062]Duplicate entry '1' for key 'PRIMARY'") + kvMySQLErrCodes := map[ErrCode]uint16{ + 1062: uint16(1062), + } + ErrClassToMySQLCodes[ClassKV] = kvMySQLErrCodes + sqlErr := errors.Cause(e).(*Error).ToSQLError() + c.Assert(sqlErr.Message, Equals, "Duplicate entry '1' for key 'PRIMARY'") + c.Assert(sqlErr.Code, Equals, uint16(1062)) + + err := errors.Trace(ErrCritical.GenWithStackByArgs("test")) + c.Assert(ErrCritical.Equal(err), IsTrue) + + err = errors.Trace(ErrCritical) + c.Assert(ErrCritical.Equal(err), IsTrue) +} + +func (s *testTErrorSuite) TestJson(c *C) { + prevTErr := &Error{ + class: ClassTable, + code: CodeExecResultIsEmpty, + message: "json test", + } + buf, err := json.Marshal(prevTErr) + c.Assert(err, IsNil) + var curTErr Error + err = json.Unmarshal(buf, &curTErr) + c.Assert(err, IsNil) + isEqual := prevTErr.Equal(&curTErr) + c.Assert(isEqual, IsTrue) +} + +var predefinedErr = ClassExecutor.New(ErrCode(123), "predefiend error") + +func example() error { + err := call() + return errors.Trace(err) +} + +func call() error { + return predefinedErr.GenWithStack("error message:%s", "abc") +} + +func (s *testTErrorSuite) TestTraceAndLocation(c *C) { + err := example() + stack := errors.ErrorStack(err) + lines := strings.Split(stack, "\n") + var sysStack = 0 + for _, line := range lines { + if strings.Contains(line, runtime.GOROOT()) { + sysStack++ + } + } + c.Assert(len(lines)-(2*sysStack), Equals, 15) + var containTerr bool + for _, v := range lines { + if strings.Contains(v, "terror_test.go") { + containTerr = true + break + } + } + c.Assert(containTerr, IsTrue) +} + +func (s *testTErrorSuite) TestErrorEqual(c *C) { + e1 := errors.New("test error") + c.Assert(e1, NotNil) + + e2 := errors.Trace(e1) + c.Assert(e2, NotNil) + + e3 := errors.Trace(e2) + c.Assert(e3, NotNil) + + c.Assert(errors.Cause(e2), Equals, e1) + c.Assert(errors.Cause(e3), Equals, e1) + c.Assert(errors.Cause(e2), Equals, errors.Cause(e3)) + + e4 := errors.New("test error") + c.Assert(errors.Cause(e4), Not(Equals), e1) + + e5 := errors.Errorf("test error") + c.Assert(errors.Cause(e5), Not(Equals), e1) + + c.Assert(ErrorEqual(e1, e2), IsTrue) + c.Assert(ErrorEqual(e1, e3), IsTrue) + c.Assert(ErrorEqual(e1, e4), IsTrue) + c.Assert(ErrorEqual(e1, e5), IsTrue) + + var e6 error + + c.Assert(ErrorEqual(nil, nil), IsTrue) + c.Assert(ErrorNotEqual(e1, e6), IsTrue) + code1 := ErrCode(1) + code2 := ErrCode(2) + te1 := ClassParser.New(code1, "abc") + te2 := ClassParser.New(code1, "def") + te3 := ClassKV.New(code1, "abc") + te4 := ClassKV.New(code2, "abc") + c.Assert(ErrorEqual(te1, te2), IsTrue) + c.Assert(ErrorEqual(te1, te3), IsFalse) + c.Assert(ErrorEqual(te3, te4), IsFalse) +} diff --git a/parser/types/etc.go b/parser/types/etc.go new file mode 100644 index 0000000..e9c6d19 --- /dev/null +++ b/parser/types/etc.go @@ -0,0 +1,108 @@ +// Copyright 2014 The ql Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSES/QL-LICENSE file. + +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "strings" + + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" +) + +// IsTypeBlob returns a boolean indicating whether the tp is a blob type. +func IsTypeBlob(tp byte) bool { + switch tp { + case mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeBlob, mysql.TypeLongBlob: + return true + default: + return false + } +} + +// IsTypeChar returns a boolean indicating +// whether the tp is the char type like a string type or a varchar type. +func IsTypeChar(tp byte) bool { + return tp == mysql.TypeString || tp == mysql.TypeVarchar +} + +var type2Str = map[byte]string{ + mysql.TypeBit: "bit", + mysql.TypeBlob: "text", + mysql.TypeDate: "date", + mysql.TypeDatetime: "datetime", + mysql.TypeDecimal: "unspecified", + mysql.TypeNewDecimal: "decimal", + mysql.TypeDouble: "double", + mysql.TypeEnum: "enum", + mysql.TypeFloat: "float", + mysql.TypeGeometry: "geometry", + mysql.TypeInt24: "mediumint", + mysql.TypeJSON: "json", + mysql.TypeLong: "int", + mysql.TypeLonglong: "bigint", + mysql.TypeLongBlob: "longtext", + mysql.TypeMediumBlob: "mediumtext", + mysql.TypeNull: "null", + mysql.TypeSet: "set", + mysql.TypeShort: "smallint", + mysql.TypeString: "char", + mysql.TypeDuration: "time", + mysql.TypeTimestamp: "timestamp", + mysql.TypeTiny: "tinyint", + mysql.TypeTinyBlob: "tinytext", + mysql.TypeVarchar: "varchar", + mysql.TypeVarString: "var_string", + mysql.TypeYear: "year", +} + +// TypeStr converts tp to a string. +func TypeStr(tp byte) (r string) { + return type2Str[tp] +} + +// TypeToStr converts a field to a string. +// It is used for converting Text to Blob, +// or converting Char to Binary. +// Args: +// tp: type enum +// cs: charset +func TypeToStr(tp byte, cs string) (r string) { + ts := type2Str[tp] + if cs != "binary" { + return ts + } + if IsTypeBlob(tp) { + ts = strings.Replace(ts, "text", "blob", 1) + } else if IsTypeChar(tp) { + ts = strings.Replace(ts, "char", "binary", 1) + } + return ts +} + +var ( + dig2bytes = [10]int{0, 1, 1, 2, 2, 3, 3, 4, 4, 4} +) + +// constant values. +const ( + digitsPerWord = 9 // A word holds 9 digits. + wordSize = 4 // A word is 4 bytes int32. +) + +// ErrInvalidDefault is returned when meet a invalid default value. +var ErrInvalidDefault = terror.ClassTypes.New(mysql.ErrInvalidDefault, mysql.MySQLErrName[mysql.ErrInvalidDefault]) diff --git a/parser/types/eval_type.go b/parser/types/eval_type.go new file mode 100644 index 0000000..4777595 --- /dev/null +++ b/parser/types/eval_type.go @@ -0,0 +1,42 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +// EvalType indicates the specified types that arguments and result of a built-in function should be. +type EvalType byte + +const ( + // ETInt represents type INT in evaluation. + ETInt EvalType = iota + // ETReal represents type REAL in evaluation. + ETReal + // ETDecimal represents type DECIMAL in evaluation. + ETDecimal + // ETString represents type STRING in evaluation. + ETString + // ETDatetime represents type DATETIME in evaluation. + ETDatetime + // ETTimestamp represents type TIMESTAMP in evaluation. + ETTimestamp + // ETDuration represents type DURATION in evaluation. + ETDuration + // ETJson represents type JSON in evaluation. + ETJson +) + +// IsStringKind returns true for ETString, ETDatetime, ETTimestamp, ETDuration, ETJson EvalTypes. +func (et EvalType) IsStringKind() bool { + return et == ETString || et == ETDatetime || + et == ETTimestamp || et == ETDuration || et == ETJson +} diff --git a/parser/types/field_type.go b/parser/types/field_type.go new file mode 100644 index 0000000..08a90b9 --- /dev/null +++ b/parser/types/field_type.go @@ -0,0 +1,230 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "strings" + + "github.com/pingcap/tidb/parser/charset" + "github.com/pingcap/tidb/parser/format" + "github.com/pingcap/tidb/parser/mysql" +) + +// UnspecifiedLength is unspecified length. +const ( + UnspecifiedLength = -1 +) + +// FieldType records field type information. +type FieldType struct { + Tp byte + Flag uint + Flen int + Decimal int + Charset string + Collate string + // Elems is the element list for enum and set type. + Elems []string +} + +// NewFieldType returns a FieldType, +// with a type and other information about field type. +func NewFieldType(tp byte) *FieldType { + return &FieldType{ + Tp: tp, + Flen: UnspecifiedLength, + Decimal: UnspecifiedLength, + } +} + +// Clone returns a copy of itself. +func (ft *FieldType) Clone() *FieldType { + ret := *ft + return &ret +} + +// Equal checks whether two FieldType objects are equal. +func (ft *FieldType) Equal(other *FieldType) bool { + // We do not need to compare whole `ft.Flag == other.Flag` when wrapping cast upon an Expression. + // but need compare unsigned_flag of ft.Flag. + partialEqual := ft.Tp == other.Tp && + ft.Flen == other.Flen && + ft.Decimal == other.Decimal && + ft.Charset == other.Charset && + ft.Collate == other.Collate && + mysql.HasUnsignedFlag(ft.Flag) == mysql.HasUnsignedFlag(other.Flag) + if !partialEqual || len(ft.Elems) != len(other.Elems) { + return false + } + for i := range ft.Elems { + if ft.Elems[i] != other.Elems[i] { + return false + } + } + return true +} + +// EvalType gets the type in evaluation. +func (ft *FieldType) EvalType() EvalType { + switch ft.Tp { + case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong, + mysql.TypeBit, mysql.TypeYear: + return ETInt + case mysql.TypeFloat, mysql.TypeDouble: + return ETReal + case mysql.TypeNewDecimal: + return ETDecimal + case mysql.TypeDate, mysql.TypeDatetime: + return ETDatetime + case mysql.TypeTimestamp: + return ETTimestamp + case mysql.TypeDuration: + return ETDuration + case mysql.TypeJSON: + return ETJson + } + return ETString +} + +// Hybrid checks whether a type is a hybrid type, which can represent different types of value in specific context. +func (ft *FieldType) Hybrid() bool { + return ft.Tp == mysql.TypeEnum || ft.Tp == mysql.TypeBit || ft.Tp == mysql.TypeSet +} + +// Init initializes the FieldType data. +func (ft *FieldType) Init(tp byte) { + ft.Tp = tp + ft.Flen = UnspecifiedLength + ft.Decimal = UnspecifiedLength +} + +// CompactStr only considers Tp/CharsetBin/Flen/Deimal. +// This is used for showing column type in infoschema. +func (ft *FieldType) CompactStr() string { + ts := TypeToStr(ft.Tp, ft.Charset) + suffix := "" + + defaultFlen, defaultDecimal := mysql.GetDefaultFieldLengthAndDecimal(ft.Tp) + isDecimalNotDefault := ft.Decimal != defaultDecimal && ft.Decimal != 0 && ft.Decimal != UnspecifiedLength + + // displayFlen and displayDecimal are flen and decimal values with `-1` substituted with default value. + displayFlen, displayDecimal := ft.Flen, ft.Decimal + if displayFlen == 0 || displayFlen == UnspecifiedLength { + displayFlen = defaultFlen + } + if displayDecimal == 0 || displayDecimal == UnspecifiedLength { + displayDecimal = defaultDecimal + } + + switch ft.Tp { + case mysql.TypeEnum, mysql.TypeSet: + // Format is ENUM ('e1', 'e2') or SET ('e1', 'e2') + es := make([]string, 0, len(ft.Elems)) + for _, e := range ft.Elems { + e = format.OutputFormat(e) + es = append(es, e) + } + suffix = fmt.Sprintf("('%s')", strings.Join(es, "','")) + case mysql.TypeTimestamp, mysql.TypeDatetime, mysql.TypeDuration: + if isDecimalNotDefault { + suffix = fmt.Sprintf("(%d)", displayDecimal) + } + case mysql.TypeDouble, mysql.TypeFloat: + // 1. Flen Not Default, Decimal Not Default -> Valid + // 2. Flen Not Default, Decimal Default (-1) -> Invalid + // 3. Flen Default, Decimal Not Default -> Valid + // 4. Flen Default, Decimal Default -> Valid (hide) + if isDecimalNotDefault { + suffix = fmt.Sprintf("(%d,%d)", displayFlen, displayDecimal) + } + case mysql.TypeNewDecimal: + suffix = fmt.Sprintf("(%d,%d)", displayFlen, displayDecimal) + case mysql.TypeBit, mysql.TypeShort, mysql.TypeTiny, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong, mysql.TypeVarchar, mysql.TypeString, mysql.TypeVarString: + // Flen is always shown. + suffix = fmt.Sprintf("(%d)", displayFlen) + case mysql.TypeYear: + suffix = fmt.Sprintf("(%d)", ft.Flen) + } + return ts + suffix +} + +// InfoSchemaStr joins the CompactStr with unsigned flag and +// returns a string. +func (ft *FieldType) InfoSchemaStr() string { + suffix := "" + if mysql.HasUnsignedFlag(ft.Flag) { + suffix = " unsigned" + } + return ft.CompactStr() + suffix +} + +// String joins the information of FieldType and returns a string. +// Note: when flen or decimal is unspecified, this function will use the default value instead of -1. +func (ft *FieldType) String() string { + strs := []string{ft.CompactStr()} + if mysql.HasUnsignedFlag(ft.Flag) { + strs = append(strs, "UNSIGNED") + } + if mysql.HasZerofillFlag(ft.Flag) { + strs = append(strs, "ZEROFILL") + } + if mysql.HasBinaryFlag(ft.Flag) && ft.Tp != mysql.TypeString { + strs = append(strs, "BINARY") + } + + if IsTypeChar(ft.Tp) || IsTypeBlob(ft.Tp) { + if ft.Charset != "" && ft.Charset != charset.CharsetBin { + strs = append(strs, fmt.Sprintf("CHARACTER SET %s", ft.Charset)) + } + if ft.Collate != "" && ft.Collate != charset.CharsetBin { + strs = append(strs, fmt.Sprintf("COLLATE %s", ft.Collate)) + } + } + + return strings.Join(strs, " ") +} + +// VarStorageLen indicates this column is a variable length column. +const VarStorageLen = -1 + +// StorageLength is the length of stored value for the type. +func (ft *FieldType) StorageLength() int { + switch ft.Tp { + case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, + mysql.TypeLonglong, mysql.TypeDouble, mysql.TypeFloat, mysql.TypeYear, mysql.TypeDuration, + mysql.TypeDate, mysql.TypeDatetime, mysql.TypeTimestamp, mysql.TypeEnum, mysql.TypeSet, + mysql.TypeBit: + // This may not be the accurate length, because we may encode them as varint. + return 8 + case mysql.TypeNewDecimal: + precision, frac := ft.Flen-ft.Decimal, ft.Decimal + return precision/digitsPerWord*wordSize + dig2bytes[precision%digitsPerWord] + frac/digitsPerWord*wordSize + dig2bytes[frac%digitsPerWord] + default: + return VarStorageLen + } +} + +// HasCharset indicates if a COLUMN has an associated charset. Returning false here prevents some information +// statements(like `SHOW CREATE TABLE`) from attaching a CHARACTER SET clause to the column. +func HasCharset(ft *FieldType) bool { + switch ft.Tp { + case mysql.TypeVarchar, mysql.TypeString, mysql.TypeVarString, mysql.TypeBlob, + mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob: + return !mysql.HasBinaryFlag(ft.Flag) + case mysql.TypeEnum, mysql.TypeSet: + return true + } + return false +} diff --git a/parser/types/field_type_test.go b/parser/types/field_type_test.go new file mode 100644 index 0000000..d45268e --- /dev/null +++ b/parser/types/field_type_test.go @@ -0,0 +1,240 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package types_test + +import ( + "fmt" + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/charset" + "github.com/pingcap/tidb/parser/mysql" + . "github.com/pingcap/tidb/parser/types" + _ "github.com/pingcap/tidb/types/parser_driver" +) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + TestingT(t) +} + +var _ = Suite(&testFieldTypeSuite{}) + +type testFieldTypeSuite struct { +} + +func (s *testFieldTypeSuite) TestFieldType(c *C) { + ft := NewFieldType(mysql.TypeDuration) + c.Assert(ft.Flen, Equals, UnspecifiedLength) + c.Assert(ft.Decimal, Equals, UnspecifiedLength) + ft.Decimal = 5 + c.Assert(ft.String(), Equals, "time(5)") + c.Assert(HasCharset(ft), IsFalse) + + ft = NewFieldType(mysql.TypeLong) + ft.Flen = 5 + ft.Flag = mysql.UnsignedFlag | mysql.ZerofillFlag + c.Assert(ft.String(), Equals, "int(5) UNSIGNED ZEROFILL") + c.Assert(ft.InfoSchemaStr(), Equals, "int(5) unsigned") + c.Assert(HasCharset(ft), IsFalse) + + ft = NewFieldType(mysql.TypeFloat) + ft.Flen = 12 // Default + ft.Decimal = 3 // Not Default + c.Assert(ft.String(), Equals, "float(12,3)") + ft = NewFieldType(mysql.TypeFloat) + ft.Flen = 12 // Default + ft.Decimal = -1 // Default + c.Assert(ft.String(), Equals, "float") + ft = NewFieldType(mysql.TypeFloat) + ft.Flen = 5 // Not Default + ft.Decimal = -1 // Default + c.Assert(ft.String(), Equals, "float") + ft = NewFieldType(mysql.TypeFloat) + ft.Flen = 7 // Not Default + ft.Decimal = 3 // Not Default + c.Assert(ft.String(), Equals, "float(7,3)") + c.Assert(HasCharset(ft), IsFalse) + + ft = NewFieldType(mysql.TypeDouble) + ft.Flen = 22 // Default + ft.Decimal = 3 // Not Default + c.Assert(ft.String(), Equals, "double(22,3)") + ft = NewFieldType(mysql.TypeDouble) + ft.Flen = 22 // Default + ft.Decimal = -1 // Default + c.Assert(ft.String(), Equals, "double") + ft = NewFieldType(mysql.TypeDouble) + ft.Flen = 5 // Not Default + ft.Decimal = -1 // Default + c.Assert(ft.String(), Equals, "double") + ft = NewFieldType(mysql.TypeDouble) + ft.Flen = 7 // Not Default + ft.Decimal = 3 // Not Default + c.Assert(ft.String(), Equals, "double(7,3)") + c.Assert(HasCharset(ft), IsFalse) + + ft = NewFieldType(mysql.TypeBlob) + ft.Flen = 10 + ft.Charset = "UTF8" + ft.Collate = "UTF8_UNICODE_GI" + c.Assert(ft.String(), Equals, "text CHARACTER SET UTF8 COLLATE UTF8_UNICODE_GI") + c.Assert(HasCharset(ft), IsTrue) + + ft = NewFieldType(mysql.TypeVarchar) + ft.Flen = 10 + ft.Flag |= mysql.BinaryFlag + c.Assert(ft.String(), Equals, "varchar(10) BINARY") + c.Assert(HasCharset(ft), IsFalse) + + ft = NewFieldType(mysql.TypeString) + ft.Charset = charset.CollationBin + ft.Flag |= mysql.BinaryFlag + c.Assert(ft.String(), Equals, "binary(1)") + c.Assert(HasCharset(ft), IsFalse) + + ft = NewFieldType(mysql.TypeEnum) + ft.Elems = []string{"a", "b"} + c.Assert(ft.String(), Equals, "enum('a','b')") + c.Assert(HasCharset(ft), IsTrue) + + ft = NewFieldType(mysql.TypeEnum) + ft.Elems = []string{"'a'", "'b'"} + c.Assert(ft.String(), Equals, "enum('''a''','''b''')") + c.Assert(HasCharset(ft), IsTrue) + + ft = NewFieldType(mysql.TypeEnum) + ft.Elems = []string{"a\nb", "a\tb", "a\rb"} + c.Assert(ft.String(), Equals, "enum('a\\nb','a\tb','a\\rb')") + c.Assert(HasCharset(ft), IsTrue) + + ft = NewFieldType(mysql.TypeEnum) + ft.Elems = []string{"a\nb", "a'\t\r\nb", "a\rb"} + c.Assert(ft.String(), Equals, "enum('a\\nb','a'' \\r\\nb','a\\rb')") + c.Assert(HasCharset(ft), IsTrue) + + ft = NewFieldType(mysql.TypeSet) + ft.Elems = []string{"a", "b"} + c.Assert(ft.String(), Equals, "set('a','b')") + c.Assert(HasCharset(ft), IsTrue) + + ft = NewFieldType(mysql.TypeSet) + ft.Elems = []string{"'a'", "'b'"} + c.Assert(ft.String(), Equals, "set('''a''','''b''')") + c.Assert(HasCharset(ft), IsTrue) + + ft = NewFieldType(mysql.TypeSet) + ft.Elems = []string{"a\nb", "a'\t\r\nb", "a\rb"} + c.Assert(ft.String(), Equals, "set('a\\nb','a'' \\r\\nb','a\\rb')") + c.Assert(HasCharset(ft), IsTrue) + + ft = NewFieldType(mysql.TypeSet) + ft.Elems = []string{"a'\nb", "a'b\tc"} + c.Assert(ft.String(), Equals, "set('a''\\nb','a''b c')") + c.Assert(HasCharset(ft), IsTrue) + + ft = NewFieldType(mysql.TypeTimestamp) + ft.Flen = 8 + ft.Decimal = 2 + c.Assert(ft.String(), Equals, "timestamp(2)") + c.Assert(HasCharset(ft), IsFalse) + ft = NewFieldType(mysql.TypeTimestamp) + ft.Flen = 8 + ft.Decimal = 0 + c.Assert(ft.String(), Equals, "timestamp") + c.Assert(HasCharset(ft), IsFalse) + + ft = NewFieldType(mysql.TypeDatetime) + ft.Flen = 8 + ft.Decimal = 2 + c.Assert(ft.String(), Equals, "datetime(2)") + c.Assert(HasCharset(ft), IsFalse) + ft = NewFieldType(mysql.TypeDatetime) + ft.Flen = 8 + ft.Decimal = 0 + c.Assert(ft.String(), Equals, "datetime") + c.Assert(HasCharset(ft), IsFalse) + + ft = NewFieldType(mysql.TypeDate) + ft.Flen = 8 + ft.Decimal = 2 + c.Assert(ft.String(), Equals, "date") + c.Assert(HasCharset(ft), IsFalse) + ft = NewFieldType(mysql.TypeDate) + ft.Flen = 8 + ft.Decimal = 0 + c.Assert(ft.String(), Equals, "date") + c.Assert(HasCharset(ft), IsFalse) + + ft = NewFieldType(mysql.TypeYear) + ft.Flen = 4 + ft.Decimal = 0 + c.Assert(ft.String(), Equals, "year(4)") + c.Assert(HasCharset(ft), IsFalse) + ft = NewFieldType(mysql.TypeYear) + ft.Flen = 2 + ft.Decimal = 2 + c.Assert(ft.String(), Equals, "year(2)") // Note: Invalid year. + c.Assert(HasCharset(ft), IsFalse) +} + +func (s *testFieldTypeSuite) TestHasCharsetFromStmt(c *C) { + template := "CREATE TABLE t(a %s)" + + types := []struct { + strType string + hasCharset bool + }{ + {"int", false}, + {"real", false}, + {"float", false}, + {"bit", false}, + {"bool", false}, + {"char(1)", true}, + {"national char(1)", true}, + {"binary", false}, + {"varchar(1)", true}, + {"national varchar(1)", true}, + {"varbinary(1)", false}, + {"year", false}, + {"date", false}, + {"time", false}, + {"datetime", false}, + {"timestamp", false}, + {"blob", false}, + {"tinyblob", false}, + {"mediumblob", false}, + {"longblob", false}, + {"bit", false}, + {"text", true}, + {"tinytext", true}, + {"mediumtext", true}, + {"longtext", true}, + {"json", false}, + {"enum('1')", true}, + {"set('1')", true}, + } + + p := parser.New() + for _, t := range types { + sql := fmt.Sprintf(template, t.strType) + stmt, err := p.ParseOneStmt(sql, "", "") + c.Assert(err, IsNil) + + col := stmt.(*ast.CreateTableStmt).Cols[0] + c.Assert(HasCharset(col.Tp), Equals, t.hasCharset) + } +} diff --git a/parser/yy_parser.go b/parser/yy_parser.go new file mode 100644 index 0000000..4b32b4c --- /dev/null +++ b/parser/yy_parser.go @@ -0,0 +1,288 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + "fmt" + "math" + "regexp" + "strconv" + "unicode" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" +) + +var ( + // ErrSyntax returns for sql syntax error. + ErrSyntax = terror.ClassParser.New(mysql.ErrSyntax, mysql.MySQLErrName[mysql.ErrSyntax]) + // ErrParse returns for sql parse error. + ErrParse = terror.ClassParser.New(mysql.ErrParse, mysql.MySQLErrName[mysql.ErrParse]) + // ErrUnknownCharacterSet returns for no character set found error. + ErrUnknownCharacterSet = terror.ClassParser.New(mysql.ErrUnknownCharacterSet, mysql.MySQLErrName[mysql.ErrUnknownCharacterSet]) + // ErrInvalidYearColumnLength returns for illegal column length for year type. + ErrInvalidYearColumnLength = terror.ClassParser.New(mysql.ErrInvalidYearColumnLength, mysql.MySQLErrName[mysql.ErrInvalidYearColumnLength]) + // ErrWrongArguments returns for illegal argument. + ErrWrongArguments = terror.ClassParser.New(mysql.ErrWrongArguments, mysql.MySQLErrName[mysql.ErrWrongArguments]) + // ErrWrongFieldTerminators returns for illegal field terminators. + ErrWrongFieldTerminators = terror.ClassParser.New(mysql.ErrWrongFieldTerminators, mysql.MySQLErrName[mysql.ErrWrongFieldTerminators]) + // ErrTooBigDisplayWidth returns for data display width exceed limit . + ErrTooBigDisplayWidth = terror.ClassParser.New(mysql.ErrTooBigDisplaywidth, mysql.MySQLErrName[mysql.ErrTooBigDisplaywidth]) + // ErrTooBigPrecision returns for data precision exceed limit. + ErrTooBigPrecision = terror.ClassParser.New(mysql.ErrTooBigPrecision, mysql.MySQLErrName[mysql.ErrTooBigPrecision]) + // ErrUnknownAlterLock returns for no alter lock type found error. + ErrUnknownAlterLock = terror.ClassParser.New(mysql.ErrUnknownAlterLock, mysql.MySQLErrName[mysql.ErrUnknownAlterLock]) + // ErrUnknownAlterAlgorithm returns for no alter algorithm found error. + ErrUnknownAlterAlgorithm = terror.ClassParser.New(mysql.ErrUnknownAlterAlgorithm, mysql.MySQLErrName[mysql.ErrUnknownAlterAlgorithm]) + // SpecFieldPattern special result field pattern + SpecFieldPattern = regexp.MustCompile(`(\/\*!(M?[0-9]{5,6})?|\*\/)`) + specCodePattern = regexp.MustCompile(`\/\*!(M?[0-9]{5,6})?([^*]|\*+[^*/])*\*+\/`) + specCodeStart = regexp.MustCompile(`^\/\*!(M?[0-9]{5,6})?[ \t]*`) + specCodeEnd = regexp.MustCompile(`[ \t]*\*\/$`) + // SpecVersionCodePattern is a pattern for special comments with version. + SpecVersionCodePattern = regexp.MustCompile(`\/\*T![0-9]{5,6}([^*]|\*+[^*/])*\*+\/`) + specVersionCodeStart = regexp.MustCompile(`^\/\*T![0-9]{5,6}[ \t]*`) + specVersionCodeValue = regexp.MustCompile(`[0-9]{5,6}`) +) + +func init() { + parserMySQLErrCodes := map[terror.ErrCode]uint16{ + mysql.ErrSyntax: mysql.ErrSyntax, + mysql.ErrParse: mysql.ErrParse, + mysql.ErrUnknownCharacterSet: mysql.ErrUnknownCharacterSet, + mysql.ErrInvalidYearColumnLength: mysql.ErrInvalidYearColumnLength, + mysql.ErrWrongArguments: mysql.ErrWrongArguments, + mysql.ErrWrongFieldTerminators: mysql.ErrWrongFieldTerminators, + mysql.ErrTooBigDisplaywidth: mysql.ErrTooBigDisplaywidth, + mysql.ErrUnknownAlterLock: mysql.ErrUnknownAlterLock, + mysql.ErrUnknownAlterAlgorithm: mysql.ErrUnknownAlterAlgorithm, + mysql.ErrTooBigPrecision: mysql.ErrTooBigPrecision, + } + terror.ErrClassToMySQLCodes[terror.ClassParser] = parserMySQLErrCodes +} + +// TrimComment trim comment for special comment code of MySQL. +func TrimComment(txt string) string { + txt = specCodeStart.ReplaceAllString(txt, "") + return specCodeEnd.ReplaceAllString(txt, "") +} + +func TrimCodeVersionComment(txt string) string { + txt = specVersionCodeStart.ReplaceAllString(txt, "") + return specCodeEnd.ReplaceAllString(txt, "") +} + +// Parser represents a parser instance. Some temporary objects are stored in it to reduce object allocation during Parse function. +type Parser struct { + charset string + collation string + result []ast.StmtNode + src string + lexer Scanner + + // the following fields are used by yyParse to reduce allocation. + cache []yySymType + yylval yySymType + yyVAL *yySymType +} + +type stmtTexter interface { + stmtText() string +} + +// New returns a Parser object. +func New() *Parser { + if ast.NewValueExpr == nil || + ast.NewHexLiteral == nil || + ast.NewBitLiteral == nil { + panic("no parser driver (forgotten import?) https://github.com/pingcap/tidb/parser/issues/43") + } + + return &Parser{ + cache: make([]yySymType, 200), + } +} + +// Parse parses a query string to raw ast.StmtNode. +// If charset or collation is "", default charset and collation will be used. +func (parser *Parser) Parse(sql, charset, collation string) (stmt []ast.StmtNode, warns []error, err error) { + if charset == "" { + charset = mysql.DefaultCharset + } + if collation == "" { + collation = mysql.DefaultCollationName + } + parser.charset = charset + parser.collation = collation + parser.src = sql + parser.result = parser.result[:0] + + var l yyLexer + parser.lexer.reset(sql) + l = &parser.lexer + yyParse(l, parser) + + warns, errs := l.Errors() + if len(warns) > 0 { + warns = append([]error(nil), warns...) + } else { + warns = nil + } + if len(errs) != 0 { + return nil, warns, errors.Trace(errs[0]) + } + for _, stmt := range parser.result { + ast.SetFlag(stmt) + } + return parser.result, warns, nil +} + +func (parser *Parser) lastErrorAsWarn() { + if len(parser.lexer.errs) == 0 { + return + } + parser.lexer.warns = append(parser.lexer.warns, parser.lexer.errs[len(parser.lexer.errs)-1]) + parser.lexer.errs = parser.lexer.errs[:len(parser.lexer.errs)-1] +} + +// ParseOneStmt parses a query and returns an ast.StmtNode. +// The query must have one statement, otherwise ErrSyntax is returned. +func (parser *Parser) ParseOneStmt(sql, charset, collation string) (ast.StmtNode, error) { + stmts, _, err := parser.Parse(sql, charset, collation) + if err != nil { + return nil, errors.Trace(err) + } + if len(stmts) != 1 { + return nil, ErrSyntax + } + ast.SetFlag(stmts[0]) + return stmts[0], nil +} + +// SetSQLMode sets the SQL mode for parser. +func (parser *Parser) SetSQLMode(mode mysql.SQLMode) { + parser.lexer.SetSQLMode(mode) +} + +// ParseErrorWith returns "You have a syntax error near..." error message compatible with mysql. +func ParseErrorWith(errstr string, lineno int) error { + if len(errstr) > mysql.ErrTextLength { + errstr = errstr[:mysql.ErrTextLength] + } + return fmt.Errorf("near '%-.80s' at line %d", errstr, lineno) +} + +// The select statement is not at the end of the whole statement, if the last +// field text was set from its offset to the end of the src string, update +// the last field text. +func (parser *Parser) setLastSelectFieldText(st *ast.SelectStmt, lastEnd int) { + lastField := st.Fields.Fields[len(st.Fields.Fields)-1] + if lastField.Offset+len(lastField.Text()) >= len(parser.src)-1 { + lastField.SetText(parser.src[lastField.Offset:lastEnd]) + } +} + +func (parser *Parser) startOffset(v *yySymType) int { + return v.offset +} + +func (parser *Parser) endOffset(v *yySymType) int { + offset := v.offset + for offset > 0 && unicode.IsSpace(rune(parser.src[offset-1])) { + offset-- + } + return offset +} + +func toInt(l yyLexer, lval *yySymType, str string) int { + n, err := strconv.ParseUint(str, 10, 64) + if err != nil { + e := err.(*strconv.NumError) + if e.Err == strconv.ErrRange { + // TODO: toDecimal maybe out of range still. + // This kind of error should be throw to higher level, because truncated data maybe legal. + // For example, this SQL returns error: + // create table test (id decimal(30, 0)); + // insert into test values(123456789012345678901234567890123094839045793405723406801943850); + // While this SQL: + // select 1234567890123456789012345678901230948390457934057234068019438509023041874359081325875128590860234789847359871045943057; + // get value 99999999999999999999999999999999999999999999999999999999999999999 + return toDecimal(l, lval, str) + } + l.AppendError(l.Errorf("integer literal: %v", err)) + return int(unicode.ReplacementChar) + } + + switch { + case n <= math.MaxInt64: + lval.item = int64(n) + default: + lval.item = n + } + return intLit +} + +func toDecimal(l yyLexer, lval *yySymType, str string) int { + dec, err := ast.NewDecimal(str) + if err != nil { + l.AppendError(l.Errorf("decimal literal: %v", err)) + } + lval.item = dec + return decLit +} + +func toFloat(l yyLexer, lval *yySymType, str string) int { + n, err := strconv.ParseFloat(str, 64) + if err != nil { + l.AppendError(l.Errorf("float literal: %v", err)) + return int(unicode.ReplacementChar) + } + + lval.item = n + return floatLit +} + +// See https://dev.mysql.com/doc/refman/5.7/en/hexadecimal-literals.html +func toHex(l yyLexer, lval *yySymType, str string) int { + h, err := ast.NewHexLiteral(str) + if err != nil { + l.AppendError(l.Errorf("hex literal: %v", err)) + return int(unicode.ReplacementChar) + } + lval.item = h + return hexLit +} + +// See https://dev.mysql.com/doc/refman/5.7/en/bit-type.html +func toBit(l yyLexer, lval *yySymType, str string) int { + b, err := ast.NewBitLiteral(str) + if err != nil { + l.AppendError(l.Errorf("bit literal: %v", err)) + return int(unicode.ReplacementChar) + } + lval.item = b + return bitLit +} + +func getUint64FromNUM(num interface{}) uint64 { + switch v := num.(type) { + case int64: + return uint64(v) + case uint64: + return v + } + return 0 +} diff --git a/planner/cascades/enforcer_rules.go b/planner/cascades/enforcer_rules.go new file mode 100644 index 0000000..bc82671 --- /dev/null +++ b/planner/cascades/enforcer_rules.go @@ -0,0 +1,85 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package cascades + +import ( + "math" + + plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/planner/implementation" + "github.com/pingcap/tidb/planner/memo" + "github.com/pingcap/tidb/planner/property" +) + +// Enforcer defines the interface for enforcer rules. +type Enforcer interface { + // NewProperty generates relaxed property with the help of enforcer. + NewProperty(prop *property.PhysicalProperty) (newProp *property.PhysicalProperty) + // OnEnforce adds physical operators on top of child implementation to satisfy + // required physical property. + OnEnforce(reqProp *property.PhysicalProperty, child memo.Implementation) (impl memo.Implementation) + // GetEnforceCost calculates cost of enforcing required physical property. + GetEnforceCost(g *memo.Group) float64 +} + +// GetEnforcerRules gets all candidate enforcer rules based +// on required physical property. +func GetEnforcerRules(g *memo.Group, prop *property.PhysicalProperty) (enforcers []Enforcer) { + if g.EngineType != memo.EngineTiDB { + return + } + if !prop.IsEmpty() { + enforcers = append(enforcers, orderEnforcer) + } + return +} + +// OrderEnforcer enforces order property on child implementation. +type OrderEnforcer struct { +} + +var orderEnforcer = &OrderEnforcer{} + +// NewProperty removes order property from required physical property. +func (e *OrderEnforcer) NewProperty(prop *property.PhysicalProperty) (newProp *property.PhysicalProperty) { + // Order property cannot be empty now. + newProp = &property.PhysicalProperty{ExpectedCnt: math.MaxFloat64} + return +} + +// OnEnforce adds sort operator to satisfy required order property. +func (e *OrderEnforcer) OnEnforce(reqProp *property.PhysicalProperty, child memo.Implementation) (impl memo.Implementation) { + childPlan := child.GetPlan() + sort := plannercore.PhysicalSort{ + ByItems: make([]*plannercore.ByItems, 0, len(reqProp.Items)), + }.Init(childPlan.SCtx(), childPlan.Stats(), &property.PhysicalProperty{ExpectedCnt: math.MaxFloat64}) + for _, item := range reqProp.Items { + item := &plannercore.ByItems{ + Expr: item.Col, + Desc: item.Desc, + } + sort.ByItems = append(sort.ByItems, item) + } + impl = implementation.NewSortImpl(sort).AttachChildren(child) + return +} + +// GetEnforceCost calculates cost of sort operator. +func (e *OrderEnforcer) GetEnforceCost(g *memo.Group) float64 { + // We need a SessionCtx to calculate the cost of a sort. + sctx := g.Equivalents.Front().Value.(*memo.GroupExpr).ExprNode.SCtx() + sort := plannercore.PhysicalSort{}.Init(sctx, nil, nil) + cost := sort.GetCost(g.Prop.Stats.RowCount) + return cost +} diff --git a/planner/cascades/enforcer_rules_test.go b/planner/cascades/enforcer_rules_test.go new file mode 100644 index 0000000..94cc96b --- /dev/null +++ b/planner/cascades/enforcer_rules_test.go @@ -0,0 +1,46 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package cascades + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/planner/memo" + "github.com/pingcap/tidb/planner/property" +) + +func (s *testCascadesSuite) TestGetEnforcerRules(c *C) { + prop := &property.PhysicalProperty{} + group := memo.NewGroupWithSchema(nil, expression.NewSchema()) + enforcers := GetEnforcerRules(group, prop) + c.Assert(enforcers, IsNil) + col := &expression.Column{} + prop.Items = append(prop.Items, property.Item{Col: col}) + enforcers = GetEnforcerRules(group, prop) + c.Assert(enforcers, NotNil) + c.Assert(len(enforcers), Equals, 1) + _, ok := enforcers[0].(*OrderEnforcer) + c.Assert(ok, IsTrue) +} + +func (s *testCascadesSuite) TestNewProperties(c *C) { + prop := &property.PhysicalProperty{} + col := &expression.Column{} + group := memo.NewGroupWithSchema(nil, expression.NewSchema()) + prop.Items = append(prop.Items, property.Item{Col: col}) + enforcers := GetEnforcerRules(group, prop) + orderEnforcer, _ := enforcers[0].(*OrderEnforcer) + newProp := orderEnforcer.NewProperty(prop) + c.Assert(newProp.Items, IsNil) +} diff --git a/planner/cascades/implementation_rules.go b/planner/cascades/implementation_rules.go new file mode 100644 index 0000000..fa4ffa4 --- /dev/null +++ b/planner/cascades/implementation_rules.go @@ -0,0 +1,431 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package cascades + +import ( + "math" + + "github.com/pingcap/tidb/expression" + plannercore "github.com/pingcap/tidb/planner/core" + impl "github.com/pingcap/tidb/planner/implementation" + "github.com/pingcap/tidb/planner/memo" + "github.com/pingcap/tidb/planner/property" +) + +// ImplementationRule defines the interface for implementation rules. +type ImplementationRule interface { + // Match checks if current GroupExpr matches this rule under required physical property. + Match(expr *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) + // OnImplement generates physical plan using this rule for current GroupExpr. Note that + // childrenReqProps of generated physical plan should be set correspondingly in this function. + OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalProperty) (memo.Implementation, error) +} + +var defaultImplementationMap = map[memo.Operand][]ImplementationRule{ + memo.OperandTableDual: { + &ImplTableDual{}, + }, + memo.OperandProjection: { + &ImplProjection{}, + }, + memo.OperandTableScan: { + &ImplTableScan{}, + }, + memo.OperandIndexScan: { + &ImplIndexScan{}, + }, + memo.OperandTiKVSingleGather: { + &ImplTiKVSingleReadGather{}, + }, + memo.OperandShow: { + &ImplShow{}, + }, + memo.OperandSelection: { + &ImplSelection{}, + }, + memo.OperandSort: { + &ImplSort{}, + }, + memo.OperandAggregation: { + &ImplHashAgg{}, + }, + memo.OperandLimit: { + &ImplLimit{}, + }, + memo.OperandTopN: { + &ImplTopN{}, + &ImplTopNAsLimit{}, + }, + memo.OperandJoin: { + &ImplHashJoinBuildLeft{}, + &ImplHashJoinBuildRight{}, + }, +} + +// ImplTableDual implements LogicalTableDual as PhysicalTableDual. +type ImplTableDual struct { +} + +// Match implements ImplementationRule Match interface. +func (r *ImplTableDual) Match(expr *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { + if !prop.IsEmpty() { + return false + } + return true +} + +// OnImplement implements ImplementationRule OnImplement interface. +func (r *ImplTableDual) OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalProperty) (memo.Implementation, error) { + logicProp := expr.Group.Prop + logicDual := expr.ExprNode.(*plannercore.LogicalTableDual) + dual := plannercore.PhysicalTableDual{RowCount: logicDual.RowCount}.Init(logicDual.SCtx(), logicProp.Stats) + dual.SetSchema(logicProp.Schema) + return impl.NewTableDualImpl(dual), nil +} + +// ImplProjection implements LogicalProjection as PhysicalProjection. +type ImplProjection struct { +} + +// Match implements ImplementationRule Match interface. +func (r *ImplProjection) Match(expr *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { + return true +} + +// OnImplement implements ImplementationRule OnImplement interface. +func (r *ImplProjection) OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalProperty) (memo.Implementation, error) { + logicProp := expr.Group.Prop + logicProj := expr.ExprNode.(*plannercore.LogicalProjection) + childProp, ok := logicProj.TryToGetChildProp(reqProp) + if !ok { + return nil, nil + } + proj := plannercore.PhysicalProjection{ + Exprs: logicProj.Exprs, + }.Init(logicProj.SCtx(), logicProp.Stats.ScaleByExpectCnt(reqProp.ExpectedCnt), childProp) + proj.SetSchema(logicProp.Schema) + return impl.NewProjectionImpl(proj), nil +} + +// ImplTiKVSingleReadGather implements TiKVSingleGather +// as PhysicalTableReader or PhysicalIndexReader. +type ImplTiKVSingleReadGather struct { +} + +// Match implements ImplementationRule Match interface. +func (r *ImplTiKVSingleReadGather) Match(expr *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { + return true +} + +// OnImplement implements ImplementationRule OnImplement interface. +func (r *ImplTiKVSingleReadGather) OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalProperty) (memo.Implementation, error) { + logicProp := expr.Group.Prop + sg := expr.ExprNode.(*plannercore.TiKVSingleGather) + if sg.IsIndexGather { + reader := sg.GetPhysicalIndexReader(logicProp.Schema, logicProp.Stats.ScaleByExpectCnt(reqProp.ExpectedCnt), reqProp) + return impl.NewIndexReaderImpl(reader, sg.Source.TblColHists), nil + } + reader := sg.GetPhysicalTableReader(logicProp.Schema, logicProp.Stats.ScaleByExpectCnt(reqProp.ExpectedCnt), reqProp) + return impl.NewTableReaderImpl(reader, sg.Source.TblColHists), nil +} + +// ImplTableScan implements TableScan as PhysicalTableScan. +type ImplTableScan struct { +} + +// Match implements ImplementationRule Match interface. +func (r *ImplTableScan) Match(expr *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { + ts := expr.ExprNode.(*plannercore.LogicalTableScan) + return prop.IsEmpty() || (len(prop.Items) == 1 && ts.Handle != nil && prop.Items[0].Col.Equal(nil, ts.Handle)) +} + +// OnImplement implements ImplementationRule OnImplement interface. +func (r *ImplTableScan) OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalProperty) (memo.Implementation, error) { + logicProp := expr.Group.Prop + logicalScan := expr.ExprNode.(*plannercore.LogicalTableScan) + ts := logicalScan.GetPhysicalScan(logicProp.Schema, logicProp.Stats.ScaleByExpectCnt(reqProp.ExpectedCnt)) + if !reqProp.IsEmpty() { + ts.KeepOrder = true + ts.Desc = reqProp.Items[0].Desc + } + tblCols, tblColHists := logicalScan.Source.TblCols, logicalScan.Source.TblColHists + return impl.NewTableScanImpl(ts, tblCols, tblColHists), nil +} + +// ImplIndexScan implements IndexScan as PhysicalIndexScan. +type ImplIndexScan struct { +} + +// Match implements ImplementationRule Match interface. +func (r *ImplIndexScan) Match(expr *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { + is := expr.ExprNode.(*plannercore.LogicalIndexScan) + return is.MatchIndexProp(prop) +} + +// OnImplement implements ImplementationRule OnImplement interface. +func (r *ImplIndexScan) OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalProperty) (memo.Implementation, error) { + logicalScan := expr.ExprNode.(*plannercore.LogicalIndexScan) + is := logicalScan.GetPhysicalIndexScan(expr.Group.Prop.Schema, expr.Group.Prop.Stats.ScaleByExpectCnt(reqProp.ExpectedCnt)) + if !reqProp.IsEmpty() { + is.KeepOrder = true + if reqProp.Items[0].Desc { + is.Desc = true + } + } + return impl.NewIndexScanImpl(is, logicalScan.Source.TblColHists), nil +} + +// ImplShow is the implementation rule which implements LogicalShow to +// PhysicalShow. +type ImplShow struct { +} + +// Match implements ImplementationRule Match interface. +func (r *ImplShow) Match(expr *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { + return prop.IsEmpty() +} + +// OnImplement implements ImplementationRule OnImplement interface. +func (r *ImplShow) OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalProperty) (memo.Implementation, error) { + logicProp := expr.Group.Prop + show := expr.ExprNode.(*plannercore.LogicalShow) + + // TODO(zz-jason): unifying LogicalShow and PhysicalShow to a single + // struct. So that we don't need to create a new PhysicalShow object, which + // can help us to reduce the gc pressure of golang runtime and improve the + // overall performance. + showPhys := plannercore.PhysicalShow{ShowContents: show.ShowContents}.Init(show.SCtx()) + showPhys.SetSchema(logicProp.Schema) + return impl.NewShowImpl(showPhys), nil +} + +// ImplSelection is the implementation rule which implements LogicalSelection +// to PhysicalSelection. +type ImplSelection struct { +} + +// Match implements ImplementationRule Match interface. +func (r *ImplSelection) Match(expr *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { + return true +} + +// OnImplement implements ImplementationRule OnImplement interface. +func (r *ImplSelection) OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalProperty) (memo.Implementation, error) { + logicalSel := expr.ExprNode.(*plannercore.LogicalSelection) + physicalSel := plannercore.PhysicalSelection{ + Conditions: logicalSel.Conditions, + }.Init(logicalSel.SCtx(), expr.Group.Prop.Stats.ScaleByExpectCnt(reqProp.ExpectedCnt), reqProp.Clone()) + switch expr.Group.EngineType { + case memo.EngineTiDB: + return impl.NewTiDBSelectionImpl(physicalSel), nil + case memo.EngineTiKV: + return impl.NewTiKVSelectionImpl(physicalSel), nil + default: + return nil, plannercore.ErrInternal.GenWithStack("Unsupported EngineType '%s' for Selection.", expr.Group.EngineType.String()) + } +} + +// ImplSort is the implementation rule which implements LogicalSort +// to PhysicalSort or NominalSort. +type ImplSort struct { +} + +// Match implements ImplementationRule match interface. +func (r *ImplSort) Match(expr *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { + ls := expr.ExprNode.(*plannercore.LogicalSort) + return plannercore.MatchItems(prop, ls.ByItems) +} + +// OnImplement implements ImplementationRule OnImplement interface. +// If all of the sort items are columns, generate a NominalSort, otherwise +// generate a PhysicalSort. +func (r *ImplSort) OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalProperty) (memo.Implementation, error) { + ls := expr.ExprNode.(*plannercore.LogicalSort) + if newProp, canUseNominal := plannercore.GetPropByOrderByItems(ls.ByItems); canUseNominal { + newProp.ExpectedCnt = reqProp.ExpectedCnt + ns := plannercore.NominalSort{}.Init(ls.SCtx(), newProp) + return impl.NewNominalSortImpl(ns), nil + } + ps := plannercore.PhysicalSort{ByItems: ls.ByItems}.Init( + ls.SCtx(), + expr.Group.Prop.Stats.ScaleByExpectCnt(reqProp.ExpectedCnt), + &property.PhysicalProperty{ExpectedCnt: math.MaxFloat64}, + ) + return impl.NewSortImpl(ps), nil +} + +// ImplHashAgg is the implementation rule which implements LogicalAggregation +// to PhysicalHashAgg. +type ImplHashAgg struct { +} + +// Match implements ImplementationRule Match interface. +func (r *ImplHashAgg) Match(expr *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { + // TODO: deal with the hints when we have implemented StreamAgg. + return prop.IsEmpty() +} + +// OnImplement implements ImplementationRule OnImplement interface. +func (r *ImplHashAgg) OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalProperty) (memo.Implementation, error) { + la := expr.ExprNode.(*plannercore.LogicalAggregation) + hashAgg := plannercore.NewPhysicalHashAgg( + la, + expr.Group.Prop.Stats.ScaleByExpectCnt(reqProp.ExpectedCnt), + &property.PhysicalProperty{ExpectedCnt: math.MaxFloat64}, + ) + hashAgg.SetSchema(expr.Group.Prop.Schema.Clone()) + switch expr.Group.EngineType { + case memo.EngineTiDB: + return impl.NewTiDBHashAggImpl(hashAgg), nil + case memo.EngineTiKV: + return impl.NewTiKVHashAggImpl(hashAgg), nil + default: + return nil, plannercore.ErrInternal.GenWithStack("Unsupported EngineType '%s' for HashAggregation.", expr.Group.EngineType.String()) + } +} + +// ImplLimit is the implementation rule which implements LogicalLimit +// to PhysicalLimit. +type ImplLimit struct { +} + +// Match implements ImplementationRule Match interface. +func (r *ImplLimit) Match(expr *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { + return prop.IsEmpty() +} + +// OnImplement implements ImplementationRule OnImplement interface. +func (r *ImplLimit) OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalProperty) (memo.Implementation, error) { + logicalLimit := expr.ExprNode.(*plannercore.LogicalLimit) + newProp := &property.PhysicalProperty{ExpectedCnt: float64(logicalLimit.Count + logicalLimit.Offset)} + physicalLimit := plannercore.PhysicalLimit{ + Offset: logicalLimit.Offset, + Count: logicalLimit.Count, + }.Init(logicalLimit.SCtx(), expr.Group.Prop.Stats, newProp) + return impl.NewLimitImpl(physicalLimit), nil +} + +// ImplTopN is the implementation rule which implements LogicalTopN +// to PhysicalTopN. +type ImplTopN struct { +} + +// Match implements ImplementationRule Match interface. +func (r *ImplTopN) Match(expr *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { + topN := expr.ExprNode.(*plannercore.LogicalTopN) + return plannercore.MatchItems(prop, topN.ByItems) +} + +// OnImplement implements ImplementationRule OnImplement interface. +func (r *ImplTopN) OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalProperty) (memo.Implementation, error) { + lt := expr.ExprNode.(*plannercore.LogicalTopN) + resultProp := &property.PhysicalProperty{ExpectedCnt: math.MaxFloat64} + topN := plannercore.PhysicalTopN{ + ByItems: lt.ByItems, + Count: lt.Count, + Offset: lt.Offset, + }.Init(lt.SCtx(), expr.Group.Prop.Stats, resultProp) + switch expr.Group.EngineType { + case memo.EngineTiDB: + return impl.NewTiDBTopNImpl(topN), nil + default: + // TODO: return TiKVTopNImpl after we have implemented push topN down gather. + return nil, plannercore.ErrInternal.GenWithStack("Unsupported EngineType '%s' for TopN.", expr.Group.EngineType.String()) + } +} + +// ImplTopNAsLimit is the implementation rule which implements LogicalTopN +// as PhysicalLimit with required order property. +type ImplTopNAsLimit struct { +} + +// Match implements ImplementationRule Match interface. +func (r *ImplTopNAsLimit) Match(expr *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { + topN := expr.ExprNode.(*plannercore.LogicalTopN) + _, canUseLimit := plannercore.GetPropByOrderByItems(topN.ByItems) + return canUseLimit && plannercore.MatchItems(prop, topN.ByItems) +} + +// OnImplement implements ImplementationRule OnImplement interface. +func (r *ImplTopNAsLimit) OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalProperty) (memo.Implementation, error) { + lt := expr.ExprNode.(*plannercore.LogicalTopN) + newProp := &property.PhysicalProperty{ExpectedCnt: float64(lt.Count + lt.Offset)} + newProp.Items = make([]property.Item, len(lt.ByItems)) + for i, item := range lt.ByItems { + newProp.Items[i].Col = item.Expr.(*expression.Column) + newProp.Items[i].Desc = item.Desc + } + physicalLimit := plannercore.PhysicalLimit{ + Offset: lt.Offset, + Count: lt.Count, + }.Init(lt.SCtx(), expr.Group.Prop.Stats, newProp) + return impl.NewLimitImpl(physicalLimit), nil +} + +func getImplForHashJoin(expr *memo.GroupExpr, prop *property.PhysicalProperty, innerIdx int) memo.Implementation { + join := expr.ExprNode.(*plannercore.LogicalJoin) + chReqProps := make([]*property.PhysicalProperty, 2) + chReqProps[0] = &property.PhysicalProperty{ExpectedCnt: math.MaxFloat64} + chReqProps[1] = &property.PhysicalProperty{ExpectedCnt: math.MaxFloat64} + stats := expr.Group.Prop.Stats + if prop.ExpectedCnt < stats.RowCount { + expCntScale := prop.ExpectedCnt / stats.RowCount + chReqProps[1-innerIdx].ExpectedCnt = expr.Children[1-innerIdx].Prop.Stats.RowCount * expCntScale + } + hashJoin := plannercore.NewPhysicalHashJoin(join, innerIdx, stats.ScaleByExpectCnt(prop.ExpectedCnt), chReqProps...) + hashJoin.SetSchema(expr.Group.Prop.Schema) + return impl.NewHashJoinImpl(hashJoin) +} + +// ImplHashJoinBuildLeft implements LogicalJoin to PhysicalHashJoin which uses the left child to build hash table. +type ImplHashJoinBuildLeft struct { +} + +// Match implements ImplementationRule Match interface. +func (r *ImplHashJoinBuildLeft) Match(expr *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { + return prop.IsEmpty() +} + +// OnImplement implements ImplementationRule OnImplement interface. +func (r *ImplHashJoinBuildLeft) OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalProperty) (memo.Implementation, error) { + join := expr.ExprNode.(*plannercore.LogicalJoin) + switch join.JoinType { + case plannercore.InnerJoin: + return getImplForHashJoin(expr, reqProp, 0), nil + default: + // TODO: deal with other join type. + return nil, nil + } +} + +// ImplHashJoinBuildRight implements LogicalJoin to PhysicalHashJoin which uses the right child to build hash table. +type ImplHashJoinBuildRight struct { +} + +// Match implements ImplementationRule Match interface. +func (r *ImplHashJoinBuildRight) Match(expr *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { + return prop.IsEmpty() +} + +// OnImplement implements ImplementationRule OnImplement interface. +func (r *ImplHashJoinBuildRight) OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalProperty) (memo.Implementation, error) { + join := expr.ExprNode.(*plannercore.LogicalJoin) + switch join.JoinType { + case plannercore.InnerJoin: + return getImplForHashJoin(expr, reqProp, 1), nil + default: + // TODO: deal with other join type. + return nil, nil + } +} diff --git a/planner/cascades/integration_test.go b/planner/cascades/integration_test.go new file mode 100644 index 0000000..00071c3 --- /dev/null +++ b/planner/cascades/integration_test.go @@ -0,0 +1,212 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package cascades_test + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/util/testkit" + "github.com/pingcap/tidb/util/testutil" +) + +var _ = Suite(&testIntegrationSuite{}) + +type testIntegrationSuite struct { + store kv.Storage + testData testutil.TestData +} + +func newStoreWithBootstrap() (kv.Storage, error) { + store, err := mockstore.NewMockTikvStore() + if err != nil { + return nil, err + } + _, err = session.BootstrapSession(store) + return store, err +} + +func (s *testIntegrationSuite) SetUpSuite(c *C) { + var err error + s.store, err = newStoreWithBootstrap() + c.Assert(err, IsNil) + s.testData, err = testutil.LoadTestSuiteData("testdata", "integration_suite") + c.Assert(err, IsNil) +} + +func (s *testIntegrationSuite) TearDownSuite(c *C) { + c.Assert(s.testData.GenerateOutputIfNeeded(), IsNil) + s.store.Close() +} + +func (s *testIntegrationSuite) TestSimpleProjDual(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("set session tidb_enable_cascades_planner = 1") + tk.MustQuery("explain select 1").Check(testkit.Rows( + "Projection_3 1.00 root 1->Column#1", + "└─TableDual_4 1.00 root rows:1", + )) + tk.MustQuery("select 1").Check(testkit.Rows( + "1", + )) +} + +func (s *testIntegrationSuite) TestPKIsHandleRangeScan(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int primary key, b int)") + tk.MustExec("insert into t values(1,2),(3,4),(5,6)") + tk.MustExec("set session tidb_enable_cascades_planner = 1") + + var input []string + var output []struct { + SQL string + Plan []string + Result []string + } + s.testData.GetTestCases(c, &input, &output) + for i, sql := range input { + s.testData.OnRecord(func() { + output[i].SQL = sql + output[i].Plan = s.testData.ConvertRowsToStrings(tk.MustQuery("explain " + sql).Rows()) + output[i].Result = s.testData.ConvertRowsToStrings(tk.MustQuery(sql).Rows()) + }) + tk.MustQuery("explain " + sql).Check(testkit.Rows(output[i].Plan...)) + tk.MustQuery(sql).Check(testkit.Rows(output[i].Result...)) + } +} + +func (s *testIntegrationSuite) TestIndexScan(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int primary key, b int, c int, d int, index idx_b(b), index idx_c_b(c, b))") + tk.MustExec("insert into t values(1,2,3,100),(4,5,6,200),(7,8,9,300)") + tk.MustExec("set session tidb_enable_cascades_planner = 1") + var input []string + var output []struct { + SQL string + Plan []string + Result []string + } + s.testData.GetTestCases(c, &input, &output) + for i, sql := range input { + s.testData.OnRecord(func() { + output[i].SQL = sql + output[i].Plan = s.testData.ConvertRowsToStrings(tk.MustQuery("explain " + sql).Rows()) + output[i].Result = s.testData.ConvertRowsToStrings(tk.MustQuery(sql).Rows()) + }) + tk.MustQuery("explain " + sql).Check(testkit.Rows(output[i].Plan...)) + tk.MustQuery(sql).Check(testkit.Rows(output[i].Result...)) + } +} + +func (s *testIntegrationSuite) TestSort(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int primary key, b int)") + tk.MustExec("insert into t values (1, 11), (4, 44), (2, 22), (3, 33)") + tk.MustExec("set session tidb_enable_cascades_planner = 1") + var input []string + var output []struct { + SQL string + Plan []string + Result []string + } + s.testData.GetTestCases(c, &input, &output) + for i, sql := range input { + s.testData.OnRecord(func() { + output[i].SQL = sql + output[i].Plan = s.testData.ConvertRowsToStrings(tk.MustQuery("explain " + sql).Rows()) + output[i].Result = s.testData.ConvertRowsToStrings(tk.MustQuery(sql).Rows()) + }) + tk.MustQuery("explain " + sql).Check(testkit.Rows(output[i].Plan...)) + tk.MustQuery(sql).Check(testkit.Rows(output[i].Result...)) + } +} + +func (s *testIntegrationSuite) TestAggregation(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int primary key, b int)") + tk.MustExec("insert into t values (1, 11), (4, 44), (2, 22), (3, 33)") + tk.MustExec("set session tidb_enable_cascades_planner = 1") + var input []string + var output []struct { + SQL string + Plan []string + Result []string + } + s.testData.GetTestCases(c, &input, &output) + for i, sql := range input { + s.testData.OnRecord(func() { + output[i].SQL = sql + output[i].Plan = s.testData.ConvertRowsToStrings(tk.MustQuery("explain " + sql).Rows()) + output[i].Result = s.testData.ConvertRowsToStrings(tk.MustQuery(sql).Rows()) + }) + tk.MustQuery("explain " + sql).Check(testkit.Rows(output[i].Plan...)) + tk.MustQuery(sql).Check(testkit.Rows(output[i].Result...)) + } +} + +func (s *testIntegrationSuite) TestSimplePlans(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int primary key, b int)") + tk.MustExec("insert into t values (1, 11), (4, 44), (2, 22), (3, 33)") + tk.MustExec("set session tidb_enable_cascades_planner = 1") + var input []string + var output []struct { + SQL string + Plan []string + Result []string + } + s.testData.GetTestCases(c, &input, &output) + for i, sql := range input { + s.testData.OnRecord(func() { + output[i].SQL = sql + output[i].Plan = s.testData.ConvertRowsToStrings(tk.MustQuery("explain " + sql).Rows()) + output[i].Result = s.testData.ConvertRowsToStrings(tk.MustQuery(sql).Rows()) + }) + tk.MustQuery("explain " + sql).Check(testkit.Rows(output[i].Plan...)) + tk.MustQuery(sql).Check(testkit.Rows(output[i].Result...)) + } +} + +func (s *testIntegrationSuite) TestJoin(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("drop table if exists t1") + tk.MustExec("drop table if exists t2") + tk.MustExec("create table t1(a int primary key, b int)") + tk.MustExec("create table t2(a int primary key, b int)") + tk.MustExec("insert into t1 values (1, 11), (4, 44), (2, 22), (3, 33)") + tk.MustExec("insert into t2 values (1, 111), (2, 222), (3, 333)") + tk.MustExec("set session tidb_enable_cascades_planner = 1") + var input []string + var output []struct { + SQL string + Plan []string + Result []string + } + s.testData.GetTestCases(c, &input, &output) + for i, sql := range input { + s.testData.OnRecord(func() { + output[i].SQL = sql + output[i].Plan = s.testData.ConvertRowsToStrings(tk.MustQuery("explain " + sql).Rows()) + output[i].Result = s.testData.ConvertRowsToStrings(tk.MustQuery(sql).Rows()) + }) + tk.MustQuery("explain " + sql).Check(testkit.Rows(output[i].Plan...)) + tk.MustQuery(sql).Check(testkit.Rows(output[i].Result...)) + } +} diff --git a/planner/cascades/optimize.go b/planner/cascades/optimize.go new file mode 100644 index 0000000..4a5f0ea --- /dev/null +++ b/planner/cascades/optimize.go @@ -0,0 +1,390 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package cascades + +import ( + "container/list" + "math" + + "github.com/pingcap/tidb/expression" + plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/planner/memo" + "github.com/pingcap/tidb/planner/property" + "github.com/pingcap/tidb/sessionctx" +) + +// DefaultOptimizer is the optimizer which contains all of the default +// transformation and implementation rules. +var DefaultOptimizer = NewOptimizer() + +// Optimizer is the struct for cascades optimizer. +type Optimizer struct { + transformationRuleMap map[memo.Operand][]Transformation + implementationRuleMap map[memo.Operand][]ImplementationRule +} + +// NewOptimizer returns a cascades optimizer with default transformation +// rules and implementation rules. +func NewOptimizer() *Optimizer { + return &Optimizer{ + transformationRuleMap: defaultTransformationMap, + implementationRuleMap: defaultImplementationMap, + } +} + +// ResetTransformationRules resets the transformationRuleMap of the optimizer, and returns the optimizer. +func (opt *Optimizer) ResetTransformationRules(rules map[memo.Operand][]Transformation) *Optimizer { + opt.transformationRuleMap = rules + return opt +} + +// ResetImplementationRules resets the implementationRuleMap of the optimizer, and returns the optimizer. +func (opt *Optimizer) ResetImplementationRules(rules map[memo.Operand][]ImplementationRule) *Optimizer { + opt.implementationRuleMap = rules + return opt +} + +// GetTransformationRules gets the all the candidate Transformation rules of the optimizer +// based on the logical plan node. +func (opt *Optimizer) GetTransformationRules(node plannercore.LogicalPlan) []Transformation { + return opt.transformationRuleMap[memo.GetOperand(node)] +} + +// GetImplementationRules gets all the candidate implementation rules of the optimizer +// for the logical plan node. +func (opt *Optimizer) GetImplementationRules(node plannercore.LogicalPlan) []ImplementationRule { + return opt.implementationRuleMap[memo.GetOperand(node)] +} + +// FindBestPlan is the optimization entrance of the cascades planner. The +// optimization is composed of 3 phases: preprocessing, exploration and implementation. +// +//------------------------------------------------------------------------------ +// Phase 1: Preprocessing +//------------------------------------------------------------------------------ +// +// The target of this phase is to preprocess the plan tree by some heuristic +// rules which should always be beneficial, for example Column Pruning. +// +//------------------------------------------------------------------------------ +// Phase 2: Exploration +//------------------------------------------------------------------------------ +// +// The target of this phase is to explore all the logically equivalent +// expressions by exploring all the equivalent group expressions of each group. +// +// At the very beginning, there is only one group expression in a Group. After +// applying some transformation rules on certain expressions of the Group, all +// the equivalent expressions are found and stored in the Group. This procedure +// can be regarded as searching for a weak connected component in a directed +// graph, where nodes are expressions and directed edges are the transformation +// rules. +// +//------------------------------------------------------------------------------ +// Phase 3: Implementation +//------------------------------------------------------------------------------ +// +// The target of this phase is to search the best physical plan for a Group +// which satisfies a certain required physical property. +// +// In this phase, we need to enumerate all the applicable implementation rules +// for each expression in each group under the required physical property. A +// memo structure is used for a group to reduce the repeated search on the same +// required physical property. +func (opt *Optimizer) FindBestPlan(sctx sessionctx.Context, logical plannercore.LogicalPlan) (p plannercore.PhysicalPlan, err error) { + logical, err = opt.onPhasePreprocessing(sctx, logical) + if err != nil { + return nil, err + } + rootGroup := memo.Convert2Group(logical) + err = opt.onPhaseExploration(sctx, rootGroup) + if err != nil { + return nil, err + } + p, err = opt.onPhaseImplementation(sctx, rootGroup) + if err != nil { + return nil, err + } + err = p.ResolveIndices() + return p, err +} + +func (opt *Optimizer) onPhasePreprocessing(sctx sessionctx.Context, plan plannercore.LogicalPlan) (plannercore.LogicalPlan, error) { + err := plan.PruneColumns(plan.Schema().Columns) + if err != nil { + return nil, err + } + return plan, nil +} + +func (opt *Optimizer) onPhaseExploration(sctx sessionctx.Context, g *memo.Group) error { + for !g.Explored { + err := opt.exploreGroup(g) + if err != nil { + return err + } + } + return nil +} + +func (opt *Optimizer) exploreGroup(g *memo.Group) error { + if g.Explored { + return nil + } + g.Explored = true + + for elem := g.Equivalents.Front(); elem != nil; elem = elem.Next() { + curExpr := elem.Value.(*memo.GroupExpr) + if curExpr.Explored { + continue + } + curExpr.Explored = true + + // Explore child groups firstly. + for _, childGroup := range curExpr.Children { + for !childGroup.Explored { + if err := opt.exploreGroup(childGroup); err != nil { + return err + } + } + } + + eraseCur, err := opt.findMoreEquiv(g, elem) + if err != nil { + return err + } + if eraseCur { + g.Delete(curExpr) + } + } + return nil +} + +// findMoreEquiv finds and applies the matched transformation rules. +func (opt *Optimizer) findMoreEquiv(g *memo.Group, elem *list.Element) (eraseCur bool, err error) { + expr := elem.Value.(*memo.GroupExpr) + for _, rule := range opt.GetTransformationRules(expr.ExprNode) { + pattern := rule.GetPattern() + if !pattern.Operand.Match(memo.GetOperand(expr.ExprNode)) { + continue + } + // Create a binding of the current Group expression and the pattern of + // the transformation rule to enumerate all the possible expressions. + iter := memo.NewExprIterFromGroupElem(elem, pattern) + for ; iter != nil && iter.Matched(); iter.Next() { + if !rule.Match(iter) { + continue + } + + newExprs, eraseOld, eraseAll, err := rule.OnTransform(iter) + if err != nil { + return false, err + } + + if eraseAll { + g.DeleteAll() + for _, e := range newExprs { + g.Insert(e) + } + // If we delete all of the other GroupExprs, we can break the search. + g.Explored = true + return false, nil + } + + eraseCur = eraseCur || eraseOld + for _, e := range newExprs { + if !g.Insert(e) { + continue + } + // If the new Group expression is successfully inserted into the + // current Group, mark the Group as unexplored to enable the exploration + // on the new Group expressions. + g.Explored = false + } + } + } + return eraseCur, nil +} + +// fillGroupStats computes Stats property for each Group recursively. +func (opt *Optimizer) fillGroupStats(g *memo.Group) (err error) { + if g.Prop.Stats != nil { + return nil + } + // All GroupExpr in a Group should share same LogicalProperty, so just use + // first one to compute Stats property. + elem := g.Equivalents.Front() + expr := elem.Value.(*memo.GroupExpr) + childStats := make([]*property.StatsInfo, len(expr.Children)) + childSchema := make([]*expression.Schema, len(expr.Children)) + for i, childGroup := range expr.Children { + err = opt.fillGroupStats(childGroup) + if err != nil { + return err + } + childStats[i] = childGroup.Prop.Stats + childSchema[i] = childGroup.Prop.Schema + } + planNode := expr.ExprNode + g.Prop.Stats, err = planNode.DeriveStats(childStats, g.Prop.Schema, childSchema) + return err +} + +// onPhaseImplementation starts implementation physical operators from given root Group. +func (opt *Optimizer) onPhaseImplementation(sctx sessionctx.Context, g *memo.Group) (plannercore.PhysicalPlan, error) { + prop := &property.PhysicalProperty{ + ExpectedCnt: math.MaxFloat64, + } + preparePossibleProperties(g, make(map[*memo.Group][][]*expression.Column)) + // TODO replace MaxFloat64 costLimit by variable from sctx, or other sources. + impl, err := opt.implGroup(g, prop, math.MaxFloat64) + if err != nil { + return nil, err + } + if impl == nil { + return nil, plannercore.ErrInternal.GenWithStackByArgs("Can't find a proper physical plan for this query") + } + return impl.GetPlan(), nil +} + +// implGroup finds the best Implementation which satisfies the required +// physical property for a Group. The best Implementation should have the +// lowest cost among all the applicable Implementations. +// +// g: the Group to be implemented. +// reqPhysProp: the required physical property. +// costLimit: the maximum cost of all the Implementations. +func (opt *Optimizer) implGroup(g *memo.Group, reqPhysProp *property.PhysicalProperty, costLimit float64) (memo.Implementation, error) { + groupImpl := g.GetImpl(reqPhysProp) + if groupImpl != nil { + if groupImpl.GetCost() <= costLimit { + return groupImpl, nil + } + return nil, nil + } + // Handle implementation rules for each equivalent GroupExpr. + var cumCost float64 + var childImpls []memo.Implementation + err := opt.fillGroupStats(g) + if err != nil { + return nil, err + } + outCount := math.Min(g.Prop.Stats.RowCount, reqPhysProp.ExpectedCnt) + for elem := g.Equivalents.Front(); elem != nil; elem = elem.Next() { + curExpr := elem.Value.(*memo.GroupExpr) + impls, err := opt.implGroupExpr(curExpr, reqPhysProp) + if err != nil { + return nil, err + } + for _, impl := range impls { + cumCost = 0.0 + childImpls = childImpls[:0] + for i, childGroup := range curExpr.Children { + childImpl, err := opt.implGroup(childGroup, impl.GetPlan().GetChildReqProps(i), impl.ScaleCostLimit(costLimit)-cumCost) + if err != nil { + return nil, err + } + if childImpl == nil { + impl.SetCost(math.MaxFloat64) + break + } + cumCost += childImpl.GetCost() + childImpls = append(childImpls, childImpl) + } + if impl.GetCost() == math.MaxFloat64 { + continue + } + cumCost = impl.CalcCost(outCount, childImpls...) + if cumCost > costLimit { + continue + } + if groupImpl == nil || groupImpl.GetCost() > cumCost { + groupImpl = impl.AttachChildren(childImpls...) + costLimit = cumCost + } + } + } + // Handle enforcer rules for required physical property. + for _, rule := range GetEnforcerRules(g, reqPhysProp) { + newReqPhysProp := rule.NewProperty(reqPhysProp) + enforceCost := rule.GetEnforceCost(g) + childImpl, err := opt.implGroup(g, newReqPhysProp, costLimit-enforceCost) + if err != nil { + return nil, err + } + if childImpl == nil { + continue + } + impl := rule.OnEnforce(reqPhysProp, childImpl) + cumCost = enforceCost + childImpl.GetCost() + impl.SetCost(cumCost) + if groupImpl == nil || groupImpl.GetCost() > cumCost { + groupImpl = impl + costLimit = cumCost + } + } + if groupImpl == nil || groupImpl.GetCost() == math.MaxFloat64 { + return nil, nil + } + g.InsertImpl(reqPhysProp, groupImpl) + return groupImpl, nil +} + +func (opt *Optimizer) implGroupExpr(cur *memo.GroupExpr, reqPhysProp *property.PhysicalProperty) (impls []memo.Implementation, err error) { + for _, rule := range opt.GetImplementationRules(cur.ExprNode) { + if !rule.Match(cur, reqPhysProp) { + continue + } + impl, err := rule.OnImplement(cur, reqPhysProp) + if err != nil { + return nil, err + } + if impl != nil { + impls = append(impls, impl) + } + } + return impls, nil +} + +// preparePossibleProperties recursively calls LogicalPlan PreparePossibleProperties +// interface. It will fulfill the the possible properties fields of LogicalAggregation +// and LogicalJoin. +func preparePossibleProperties(g *memo.Group, propertyMap map[*memo.Group][][]*expression.Column) [][]*expression.Column { + if prop, ok := propertyMap[g]; ok { + return prop + } + groupPropertyMap := make(map[string][]*expression.Column) + for elem := g.Equivalents.Front(); elem != nil; elem = elem.Next() { + expr := elem.Value.(*memo.GroupExpr) + childrenProperties := make([][][]*expression.Column, len(expr.Children)) + for i, child := range expr.Children { + childrenProperties[i] = preparePossibleProperties(child, propertyMap) + } + exprProperties := expr.ExprNode.PreparePossibleProperties(expr.Schema(), childrenProperties...) + for _, newPropCols := range exprProperties { + // Check if the prop has already been in `groupPropertyMap`. + newProp := property.PhysicalProperty{Items: property.ItemsFromCols(newPropCols, true)} + key := newProp.HashCode() + if _, ok := groupPropertyMap[string(key)]; !ok { + groupPropertyMap[string(key)] = newPropCols + } + } + } + resultProps := make([][]*expression.Column, 0, len(groupPropertyMap)) + for _, prop := range groupPropertyMap { + resultProps = append(resultProps, prop) + } + propertyMap[g] = resultProps + return resultProps +} diff --git a/planner/cascades/optimize_test.go b/planner/cascades/optimize_test.go new file mode 100644 index 0000000..566f7a8 --- /dev/null +++ b/planner/cascades/optimize_test.go @@ -0,0 +1,166 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package cascades + +import ( + "context" + "math" + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/parser" + "github.com/pingcap/tidb/parser/model" + plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/planner/memo" + "github.com/pingcap/tidb/planner/property" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/util/testleak" +) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + TestingT(t) +} + +var _ = Suite(&testCascadesSuite{}) + +type testCascadesSuite struct { + *parser.Parser + is infoschema.InfoSchema + sctx sessionctx.Context + optimizer *Optimizer +} + +func (s *testCascadesSuite) SetUpSuite(c *C) { + testleak.BeforeTest() + s.is = infoschema.MockInfoSchema([]*model.TableInfo{plannercore.MockSignedTable()}) + s.sctx = plannercore.MockContext() + s.Parser = parser.New() + s.optimizer = NewOptimizer() +} + +func (s *testCascadesSuite) TearDownSuite(c *C) { + testleak.AfterTest(c)() +} + +func (s *testCascadesSuite) TestImplGroupZeroCost(c *C) { + stmt, err := s.ParseOneStmt("select t1.a, t2.a from t as t1 left join t as t2 on t1.a = t2.a where t1.a < 1.0", "", "") + c.Assert(err, IsNil) + p, _, err := plannercore.BuildLogicalPlan(context.Background(), s.sctx, stmt, s.is) + c.Assert(err, IsNil) + logic, ok := p.(plannercore.LogicalPlan) + c.Assert(ok, IsTrue) + rootGroup := memo.Convert2Group(logic) + prop := &property.PhysicalProperty{ + ExpectedCnt: math.MaxFloat64, + } + impl, err := s.optimizer.implGroup(rootGroup, prop, 0.0) + c.Assert(impl, IsNil) + c.Assert(err, IsNil) +} + +func (s *testCascadesSuite) TestInitGroupSchema(c *C) { + stmt, err := s.ParseOneStmt("select a from t", "", "") + c.Assert(err, IsNil) + p, _, err := plannercore.BuildLogicalPlan(context.Background(), s.sctx, stmt, s.is) + c.Assert(err, IsNil) + logic, ok := p.(plannercore.LogicalPlan) + c.Assert(ok, IsTrue) + g := memo.Convert2Group(logic) + c.Assert(g, NotNil) + c.Assert(g.Prop, NotNil) + c.Assert(g.Prop.Schema.Len(), Equals, 1) + c.Assert(g.Prop.Stats, IsNil) +} + +func (s *testCascadesSuite) TestFillGroupStats(c *C) { + stmt, err := s.ParseOneStmt("select * from t t1 join t t2 on t1.a = t2.a", "", "") + c.Assert(err, IsNil) + p, _, err := plannercore.BuildLogicalPlan(context.Background(), s.sctx, stmt, s.is) + c.Assert(err, IsNil) + logic, ok := p.(plannercore.LogicalPlan) + c.Assert(ok, IsTrue) + rootGroup := memo.Convert2Group(logic) + err = s.optimizer.fillGroupStats(rootGroup) + c.Assert(err, IsNil) + c.Assert(rootGroup.Prop.Stats, NotNil) +} + +func (s *testCascadesSuite) TestPreparePossibleProperties(c *C) { + s.optimizer.ResetTransformationRules(map[memo.Operand][]Transformation{ + memo.OperandDataSource: { + NewRuleEnumeratePaths(), + }, + }) + defer func() { + s.optimizer.ResetTransformationRules(defaultTransformationMap) + }() + stmt, err := s.ParseOneStmt("select f, sum(a) from t group by f", "", "") + c.Assert(err, IsNil) + p, _, err := plannercore.BuildLogicalPlan(context.Background(), s.sctx, stmt, s.is) + c.Assert(err, IsNil) + logic, ok := p.(plannercore.LogicalPlan) + c.Assert(ok, IsTrue) + logic, err = s.optimizer.onPhasePreprocessing(s.sctx, logic) + c.Assert(err, IsNil) + // collect the target columns: f, a + ds, ok := logic.Children()[0].Children()[0].(*plannercore.DataSource) + c.Assert(ok, IsTrue) + var columnF, columnA *expression.Column + for i, col := range ds.Columns { + if col.Name.L == "f" { + columnF = ds.Schema().Columns[i] + } else if col.Name.L == "a" { + columnA = ds.Schema().Columns[i] + } + } + c.Assert(columnF, NotNil) + c.Assert(columnA, NotNil) + + agg, ok := logic.Children()[0].(*plannercore.LogicalAggregation) + c.Assert(ok, IsTrue) + group := memo.Convert2Group(agg) + err = s.optimizer.onPhaseExploration(s.sctx, group) + c.Assert(err, IsNil) + // The memo looks like this: + // Group#0 Schema:[Column#13,test.t.f] + // Aggregation_2 input:[Group#1], group by:test.t.f, funcs:sum(test.t.a), firstrow(test.t.f) + // Group#1 Schema:[test.t.a,test.t.f] + // TiKVSingleGather_5 input:[Group#2], table:t + // TiKVSingleGather_9 input:[Group#3], table:t, index:f_g + // TiKVSingleGather_7 input:[Group#4], table:t, index:f + // Group#2 Schema:[test.t.a,test.t.f] + // TableScan_4 table:t, pk col:test.t.a + // Group#3 Schema:[test.t.a,test.t.f] + // IndexScan_8 table:t, index:f, g + // Group#4 Schema:[test.t.a,test.t.f] + // IndexScan_6 table:t, index:f + propMap := make(map[*memo.Group][][]*expression.Column) + aggProp := preparePossibleProperties(group, propMap) + // We only have one prop for Group0 : f + c.Assert(len(aggProp), Equals, 1) + c.Assert(aggProp[0][0].Equal(nil, columnF), IsTrue) + + gatherGroup := group.Equivalents.Front().Value.(*memo.GroupExpr).Children[0] + gatherProp, ok := propMap[gatherGroup] + c.Assert(ok, IsTrue) + // We have 2 props for Group1: [f], [a] + c.Assert(len(gatherProp), Equals, 2) + for _, prop := range gatherProp { + c.Assert(len(prop), Equals, 1) + c.Assert(prop[0].Equal(nil, columnA) || prop[0].Equal(nil, columnF), IsTrue) + } +} diff --git a/planner/cascades/stringer.go b/planner/cascades/stringer.go new file mode 100644 index 0000000..5a78c02 --- /dev/null +++ b/planner/cascades/stringer.go @@ -0,0 +1,117 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package cascades + +import ( + "bytes" + "fmt" + "strings" + + "github.com/pingcap/tidb/planner/memo" +) + +// ToString stringifies a Group Tree. +func ToString(g *memo.Group) []string { + idMap := make(map[*memo.Group]int) + idMap[g] = 0 + return toString(g, idMap, map[*memo.Group]struct{}{}, []string{}) +} + +// toString recursively stringifies a Group Tree using a preorder traversal method. +func toString(g *memo.Group, idMap map[*memo.Group]int, visited map[*memo.Group]struct{}, strs []string) []string { + if _, exists := visited[g]; exists { + return strs + } + visited[g] = struct{}{} + // Add new Groups to idMap. + for item := g.Equivalents.Front(); item != nil; item = item.Next() { + expr := item.Value.(*memo.GroupExpr) + for _, childGroup := range expr.Children { + if _, exists := idMap[childGroup]; !exists { + idMap[childGroup] = len(idMap) + } + } + } + // Visit self first. + strs = append(strs, groupToString(g, idMap)...) + // Visit children then. + for item := g.Equivalents.Front(); item != nil; item = item.Next() { + expr := item.Value.(*memo.GroupExpr) + for _, childGroup := range expr.Children { + strs = toString(childGroup, idMap, visited, strs) + } + } + return strs +} + +// groupToString only stringifies a single Group. +// Format: +// Group#1 Column: [Column#1,Column#2,Column#13] Unique key: [] +// Selection_4 input:[Group#2], eq(Column#13, Column#2), gt(Column#1, 10) +// Projection_15 input:Group#3 Column#1, Column#2 +func groupToString(g *memo.Group, idMap map[*memo.Group]int) []string { + schema := g.Prop.Schema + colStrs := make([]string, 0, len(schema.Columns)) + for _, col := range schema.Columns { + colStrs = append(colStrs, col.String()) + } + + groupLine := bytes.NewBufferString("") + fmt.Fprintf(groupLine, "Group#%d Schema:[%s]", idMap[g], strings.Join(colStrs, ",")) + + if len(g.Prop.Schema.Keys) > 0 { + ukStrs := make([]string, 0, len(schema.Keys)) + for _, key := range schema.Keys { + ukColStrs := make([]string, 0, len(key)) + for _, col := range key { + ukColStrs = append(ukColStrs, col.String()) + } + ukStrs = append(ukStrs, strings.Join(ukColStrs, ",")) + } + fmt.Fprintf(groupLine, ", UniqueKey:[%s]", strings.Join(ukStrs, ",")) + } + + result := make([]string, 0, g.Equivalents.Len()+1) + result = append(result, groupLine.String()) + for item := g.Equivalents.Front(); item != nil; item = item.Next() { + expr := item.Value.(*memo.GroupExpr) + result = append(result, " "+groupExprToString(expr, idMap)) + } + return result +} + +// groupExprToString stringifies a groupExpr(or a LogicalPlan). +// Format: +// Selection_13 input:Group#2 gt(Column#1, Column#4) +func groupExprToString(expr *memo.GroupExpr, idMap map[*memo.Group]int) string { + buffer := bytes.NewBufferString(expr.ExprNode.ExplainID().String()) + if len(expr.Children) == 0 { + fmt.Fprintf(buffer, " %s", expr.ExprNode.ExplainInfo()) + } else { + fmt.Fprintf(buffer, " %s", getChildrenGroupID(expr, idMap)) + explainInfo := expr.ExprNode.ExplainInfo() + if len(explainInfo) != 0 { + fmt.Fprintf(buffer, ", %s", explainInfo) + } + } + return buffer.String() +} + +func getChildrenGroupID(expr *memo.GroupExpr, idMap map[*memo.Group]int) string { + children := make([]string, 0, len(expr.Children)) + for _, child := range expr.Children { + children = append(children, fmt.Sprintf("Group#%d", idMap[child])) + } + return "input:[" + strings.Join(children, ",") + "]" +} diff --git a/planner/cascades/stringer_test.go b/planner/cascades/stringer_test.go new file mode 100644 index 0000000..1eeae08 --- /dev/null +++ b/planner/cascades/stringer_test.go @@ -0,0 +1,91 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package cascades + +import ( + "context" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/parser" + "github.com/pingcap/tidb/parser/model" + plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/planner/memo" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/util/testutil" +) + +var _ = Suite(&testStringerSuite{}) + +type testStringerSuite struct { + *parser.Parser + is infoschema.InfoSchema + sctx sessionctx.Context + testData testutil.TestData + optimizer *Optimizer +} + +func (s *testStringerSuite) SetUpSuite(c *C) { + s.is = infoschema.MockInfoSchema([]*model.TableInfo{plannercore.MockSignedTable()}) + s.sctx = plannercore.MockContext() + s.Parser = parser.New() + s.optimizer = NewOptimizer() + var err error + s.testData, err = testutil.LoadTestSuiteData("testdata", "stringer_suite") + c.Assert(err, IsNil) +} + +func (s *testStringerSuite) TearDownSuite(c *C) { + c.Assert(s.testData.GenerateOutputIfNeeded(), IsNil) +} + +func (s *testStringerSuite) TestGroupStringer(c *C) { + s.optimizer.ResetTransformationRules(map[memo.Operand][]Transformation{ + memo.OperandSelection: { + NewRulePushSelDownTiKVSingleGather(), + NewRulePushSelDownTableScan(), + }, + memo.OperandDataSource: { + NewRuleEnumeratePaths(), + }, + }) + defer func() { + s.optimizer.ResetTransformationRules(defaultTransformationMap) + }() + var input []string + var output []struct { + SQL string + Result []string + } + s.testData.GetTestCases(c, &input, &output) + for i, sql := range input { + stmt, err := s.ParseOneStmt(sql, "", "") + c.Assert(err, IsNil) + p, _, err := plannercore.BuildLogicalPlan(context.Background(), s.sctx, stmt, s.is) + c.Assert(err, IsNil) + logic, ok := p.(plannercore.LogicalPlan) + c.Assert(ok, IsTrue) + logic, err = s.optimizer.onPhasePreprocessing(s.sctx, logic) + c.Assert(err, IsNil) + group := memo.Convert2Group(logic) + err = s.optimizer.onPhaseExploration(s.sctx, group) + c.Assert(err, IsNil) + group.BuildKeyInfo() + s.testData.OnRecord(func() { + output[i].SQL = sql + output[i].Result = ToString(group) + }) + c.Assert(ToString(group), DeepEquals, output[i].Result) + } +} diff --git a/planner/cascades/testdata/integration_suite_in.json b/planner/cascades/testdata/integration_suite_in.json new file mode 100644 index 0000000..31526e1 --- /dev/null +++ b/planner/cascades/testdata/integration_suite_in.json @@ -0,0 +1,57 @@ +[ + { + "name": "TestPKIsHandleRangeScan", + "cases": [ + "select b from t where a > 1", + "select b from t where a > 1 and a < 3", + "select b from t where a > 1 and b < 6", + "select a from t where a * 3 + 1 > 9 and a < 5" + ] + }, + { + "name": "TestSort", + "cases": [ + "select a from t order by a", + "select b from t order by b", + "select b from t order by a+b", + "select b from t order by b, a+b, a" + ] + }, + { + "name": "TestAggregation", + "cases": [ + "select sum(a) from t", + "select max(a), min(b) from t", + "select b, sum(a) from t group by b order by b", + "select max(a+b) from t" + ] + }, + { + "name": "TestSimplePlans", + "cases": [ + "select a from t limit 2", + "select a from t limit 1 offset 2", + "select b from t order by b limit 3", + "select a from t order by a limit 1 offset 2" + ] + }, + { + "name": "TestIndexScan", + "cases": [ + "select b from t", + "select a from t order by b", + "select c from t", + "select a from t order by c", + "select a, b from t where b > 5 order by b", + "select a, b, c from t where c = 3 and b > 1 order by b", + "select a, b from t where c > 1 and b > 1 order by c" + ] + }, + { + "name": "TestJoin", + "cases": [ + "select t1.a, t1.b from t1, t2 where t1.a = t2.a and t1.a > 2", + "select t1.a, t1.b from t1, t2 where t1.a > t2.a and t2.b > 200" + ] + } +] diff --git a/planner/cascades/testdata/integration_suite_out.json b/planner/cascades/testdata/integration_suite_out.json new file mode 100644 index 0000000..ef5d94e --- /dev/null +++ b/planner/cascades/testdata/integration_suite_out.json @@ -0,0 +1,354 @@ +[ + { + "Name": "TestPKIsHandleRangeScan", + "Cases": [ + { + "SQL": "select b from t where a > 1", + "Plan": [ + "Projection_8 3333.33 root test.t.b", + "└─TableReader_9 3333.33 root data:TableScan_10", + " └─TableScan_10 3333.33 cop table:t, range:(1,+inf], keep order:false, stats:pseudo" + ], + "Result": [ + "4", + "6" + ] + }, + { + "SQL": "select b from t where a > 1 and a < 3", + "Plan": [ + "Projection_8 2.00 root test.t.b", + "└─TableReader_9 2.00 root data:TableScan_10", + " └─TableScan_10 2.00 cop table:t, range:(1,3), keep order:false, stats:pseudo" + ], + "Result": null + }, + { + "SQL": "select b from t where a > 1 and b < 6", + "Plan": [ + "Projection_9 2666.67 root test.t.b", + "└─TableReader_10 2666.67 root data:Selection_11", + " └─Selection_11 2666.67 cop lt(test.t.b, 6)", + " └─TableScan_12 3333.33 cop table:t, range:(1,+inf], keep order:false, stats:pseudo" + ], + "Result": [ + "4" + ] + }, + { + "SQL": "select a from t where a * 3 + 1 > 9 and a < 5", + "Plan": [ + "TableReader_9 4.00 root data:Selection_10", + "└─Selection_10 4.00 cop gt(plus(mul(test.t.a, 3), 1), 9)", + " └─TableScan_11 5.00 cop table:t, range:[-inf,5), keep order:false, stats:pseudo" + ], + "Result": [ + "3" + ] + } + ] + }, + { + "Name": "TestSort", + "Cases": [ + { + "SQL": "select a from t order by a", + "Plan": [ + "TableReader_7 10000.00 root data:TableScan_8", + "└─TableScan_8 10000.00 cop table:t, range:[-inf,+inf], keep order:true, stats:pseudo" + ], + "Result": [ + "1", + "2", + "3", + "4" + ] + }, + { + "SQL": "select b from t order by b", + "Plan": [ + "Sort_11 10000.00 root test.t.b:asc", + "└─TableReader_9 10000.00 root data:TableScan_10", + " └─TableScan_10 10000.00 cop table:t, range:[-inf,+inf], keep order:false, stats:pseudo" + ], + "Result": [ + "11", + "22", + "33", + "44" + ] + }, + { + "SQL": "select b from t order by a+b", + "Plan": [ + "Projection_7 10000.00 root test.t.b", + "└─Projection_12 10000.00 root test.t.b, test.t.a", + " └─Sort_8 10000.00 root Column#4:asc", + " └─Projection_13 10000.00 root test.t.b, test.t.a, plus(test.t.a, test.t.b)->Column#4", + " └─Projection_9 10000.00 root test.t.b, test.t.a", + " └─TableReader_10 10000.00 root data:TableScan_11", + " └─TableScan_11 10000.00 cop table:t, range:[-inf,+inf], keep order:false, stats:pseudo" + ], + "Result": [ + "11", + "22", + "33", + "44" + ] + }, + { + "SQL": "select b from t order by b, a+b, a", + "Plan": [ + "Projection_7 10000.00 root test.t.b", + "└─Projection_12 10000.00 root test.t.b, test.t.a", + " └─Sort_8 10000.00 root test.t.b:asc, Column#4:asc, test.t.a:asc", + " └─Projection_13 10000.00 root test.t.b, test.t.a, plus(test.t.a, test.t.b)->Column#4", + " └─Projection_9 10000.00 root test.t.b, test.t.a", + " └─TableReader_10 10000.00 root data:TableScan_11", + " └─TableScan_11 10000.00 cop table:t, range:[-inf,+inf], keep order:false, stats:pseudo" + ], + "Result": [ + "11", + "22", + "33", + "44" + ] + } + ] + }, + { + "Name": "TestAggregation", + "Cases": [ + { + "SQL": "select sum(a) from t", + "Plan": [ + "HashAgg_11 1.00 root funcs:sum(Column#4)->Column#3", + "└─TableReader_12 1.00 root data:HashAgg_13", + " └─HashAgg_13 1.00 cop funcs:sum(test.t.a)->Column#4", + " └─TableScan_10 10000.00 cop table:t, range:[-inf,+inf], keep order:false, stats:pseudo" + ], + "Result": [ + "10" + ] + }, + { + "SQL": "select max(a), min(b) from t", + "Plan": [ + "HashAgg_11 1.00 root funcs:max(Column#5)->Column#3, funcs:min(Column#6)->Column#4", + "└─TableReader_12 1.00 root data:HashAgg_13", + " └─HashAgg_13 1.00 cop funcs:max(test.t.a)->Column#5, funcs:min(test.t.b)->Column#6", + " └─TableScan_10 10000.00 cop table:t, range:[-inf,+inf], keep order:false, stats:pseudo" + ], + "Result": [ + "4 11" + ] + }, + { + "SQL": "select b, sum(a) from t group by b order by b", + "Plan": [ + "Projection_10 8000.00 root test.t.b, Column#3", + "└─Sort_18 8000.00 root test.t.b:asc", + " └─HashAgg_15 8000.00 root group by:test.t.b, funcs:sum(Column#4)->Column#3, funcs:firstrow(test.t.b)->test.t.b", + " └─TableReader_16 8000.00 root data:HashAgg_17", + " └─HashAgg_17 8000.00 cop group by:test.t.b, funcs:sum(test.t.a)->Column#4", + " └─TableScan_14 10000.00 cop table:t, range:[-inf,+inf], keep order:false, stats:pseudo" + ], + "Result": [ + "11 1", + "22 2", + "33 3", + "44 4" + ] + }, + { + "SQL": "select max(a+b) from t", + "Plan": [ + "HashAgg_12 1.00 root funcs:max(Column#4)->Column#3", + "└─TableReader_13 1.00 root data:HashAgg_14", + " └─HashAgg_14 1.00 cop funcs:max(plus(test.t.a, test.t.b))->Column#4", + " └─TableScan_10 10000.00 cop table:t, range:[-inf,+inf], keep order:false, stats:pseudo" + ], + "Result": [ + "48" + ] + } + ] + }, + { + "Name": "TestSimplePlans", + "Cases": [ + { + "SQL": "select a from t limit 2", + "Plan": [ + "Limit_6 2.00 root offset:0, count:2", + "└─TableReader_7 2.00 root data:TableScan_8", + " └─TableScan_8 2.00 cop table:t, range:[-inf,+inf], keep order:false, stats:pseudo" + ], + "Result": [ + "1", + "2" + ] + }, + { + "SQL": "select a from t limit 1 offset 2", + "Plan": [ + "Limit_6 1.00 root offset:2, count:1", + "└─TableReader_7 3.00 root data:TableScan_8", + " └─TableScan_8 3.00 cop table:t, range:[-inf,+inf], keep order:false, stats:pseudo" + ], + "Result": [ + "3" + ] + }, + { + "SQL": "select b from t order by b limit 3", + "Plan": [ + "TopN_8 3.00 root test.t.b:asc, offset:0, count:3", + "└─TableReader_10 10000.00 root data:TableScan_11", + " └─TableScan_11 10000.00 cop table:t, range:[-inf,+inf], keep order:false, stats:pseudo" + ], + "Result": [ + "11", + "22", + "33" + ] + }, + { + "SQL": "select a from t order by a limit 1 offset 2", + "Plan": [ + "Limit_9 1.00 root offset:2, count:1", + "└─TableReader_12 3.00 root data:TableScan_13", + " └─TableScan_13 3.00 cop table:t, range:[-inf,+inf], keep order:true, stats:pseudo" + ], + "Result": [ + "3" + ] + } + ] + }, + { + "Name": "TestIndexScan", + "Cases": [ + { + "SQL": "select b from t", + "Plan": [ + "IndexReader_11 10000.00 root index:IndexScan_12", + "└─IndexScan_12 10000.00 cop table:t, index:b, range:[NULL,+inf], keep order:false, stats:pseudo" + ], + "Result": [ + "2", + "5", + "8" + ] + }, + { + "SQL": "select a from t order by b", + "Plan": [ + "Projection_11 10000.00 root test.t.a", + "└─IndexReader_14 10000.00 root index:IndexScan_15", + " └─IndexScan_15 10000.00 cop table:t, index:b, range:[NULL,+inf], keep order:true, stats:pseudo" + ], + "Result": [ + "1", + "4", + "7" + ] + }, + { + "SQL": "select c from t", + "Plan": [ + "IndexReader_9 10000.00 root index:IndexScan_10", + "└─IndexScan_10 10000.00 cop table:t, index:c, b, range:[NULL,+inf], keep order:false, stats:pseudo" + ], + "Result": [ + "3", + "6", + "9" + ] + }, + { + "SQL": "select a from t order by c", + "Plan": [ + "Projection_9 10000.00 root test.t.a", + "└─IndexReader_12 10000.00 root index:IndexScan_13", + " └─IndexScan_13 10000.00 cop table:t, index:c, b, range:[NULL,+inf], keep order:true, stats:pseudo" + ], + "Result": [ + "1", + "4", + "7" + ] + }, + { + "SQL": "select a, b from t where b > 5 order by b", + "Plan": [ + "IndexReader_20 8000.00 root index:IndexScan_21", + "└─IndexScan_21 3333.33 cop table:t, index:b, range:(5,+inf], keep order:true, stats:pseudo" + ], + "Result": [ + "7 8" + ] + }, + { + "SQL": "select a, b, c from t where c = 3 and b > 1 order by b", + "Plan": [ + "IndexReader_15 8000.00 root index:IndexScan_16", + "└─IndexScan_16 33.33 cop table:t, index:c, b, range:(3 1,3 +inf], keep order:true, stats:pseudo" + ], + "Result": [ + "1 2 3" + ] + }, + { + "SQL": "select a, b from t where c > 1 and b > 1 order by c", + "Plan": [ + "Projection_14 8000.00 root test.t.a, test.t.b", + "└─IndexReader_18 8000.00 root index:Selection_19", + " └─Selection_19 2666.67 cop gt(test.t.b, 1)", + " └─IndexScan_20 3333.33 cop table:t, index:c, b, range:(1,+inf], keep order:true, stats:pseudo" + ], + "Result": [ + "1 2", + "4 5", + "7 8" + ] + } + ] + }, + { + "Name": "TestJoin", + "Cases": [ + { + "SQL": "select t1.a, t1.b from t1, t2 where t1.a = t2.a and t1.a > 2", + "Plan": [ + "Projection_16 4166.67 root test.t1.a, test.t1.b", + "└─HashRightJoin_17 4166.67 root inner join, equal:[eq(test.t1.a, test.t2.a)]", + " ├─TableReader_19 3333.33 root data:TableScan_20", + " │ └─TableScan_20 3333.33 cop table:t1, range:(2,+inf], keep order:false, stats:pseudo", + " └─TableReader_21 3333.33 root data:TableScan_22", + " └─TableScan_22 3333.33 cop table:t2, range:(2,+inf], keep order:false, stats:pseudo" + ], + "Result": [ + "3 33" + ] + }, + { + "SQL": "select t1.a, t1.b from t1, t2 where t1.a > t2.a and t2.b > 200", + "Plan": [ + "Projection_12 80000000.00 root test.t1.a, test.t1.b", + "└─HashLeftJoin_14 80000000.00 root CARTESIAN inner join, other cond:gt(test.t1.a, test.t2.a)", + " ├─TableReader_15 10000.00 root data:TableScan_16", + " │ └─TableScan_16 10000.00 cop table:t1, range:[-inf,+inf], keep order:false, stats:pseudo", + " └─TableReader_17 8000.00 root data:Selection_18", + " └─Selection_18 8000.00 cop gt(test.t2.b, 200)", + " └─TableScan_19 10000.00 cop table:t2, range:[-inf,+inf], keep order:false, stats:pseudo" + ], + "Result": [ + "3 33", + "4 44", + "4 44" + ] + } + ] + } +] diff --git a/planner/cascades/testdata/stringer_suite_in.json b/planner/cascades/testdata/stringer_suite_in.json new file mode 100644 index 0000000..e7debca --- /dev/null +++ b/planner/cascades/testdata/stringer_suite_in.json @@ -0,0 +1,23 @@ +[ + { + "name": "TestGroupStringer", + "cases": [ + // Simple query. + "select b from t where a > 1 and b < 1", + // TableDual. + "select 3 * 10 + 1", + // Join. + "select t1.b from t t1, t t2 where t2.a=t1.b and t1.a > 10", + // Aggregation. + "select max(b), sum(a) from t where c > 10 group by d", + // Aggregation without group by. + "select avg(b) from t where b > 10", + // Join + Agg. + "select sum(t1.a) from t t1, t t2 where t1.a = t2.b", + // Limit. + "select a from t where a > 10 limit 3", + // Order by. + "select a from t where b > 1 order by c" + ] + } +] diff --git a/planner/cascades/testdata/stringer_suite_out.json b/planner/cascades/testdata/stringer_suite_out.json new file mode 100644 index 0000000..99940c7 --- /dev/null +++ b/planner/cascades/testdata/stringer_suite_out.json @@ -0,0 +1,195 @@ +[ + { + "Name": "TestGroupStringer", + "Cases": [ + { + "SQL": "select b from t where a > 1 and b < 1", + "Result": [ + "Group#0 Schema:[test.t.b]", + " Projection_3 input:[Group#1], test.t.b", + "Group#1 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", + " TiKVSingleGather_5 input:[Group#2], table:t", + "Group#2 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", + " Selection_8 input:[Group#3], lt(test.t.b, 1)", + "Group#3 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", + " TableScan_7 table:t, pk col:test.t.a, cond:[gt(test.t.a, 1)]" + ] + }, + { + "SQL": "select 3 * 10 + 1", + "Result": [ + "Group#0 Schema:[Column#1]", + " Projection_2 input:[Group#1], 31->Column#1", + "Group#1 Schema:[]", + " TableDual_1 rowcount:1" + ] + }, + { + "SQL": "select t1.b from t t1, t t2 where t2.a=t1.b and t1.a > 10", + "Result": [ + "Group#0 Schema:[test.t.b]", + " Projection_5 input:[Group#1], test.t.b", + "Group#1 Schema:[test.t.a,test.t.b,test.t.a]", + " Selection_4 input:[Group#2], eq(test.t.a, test.t.b), gt(test.t.a, 10)", + "Group#2 Schema:[test.t.a,test.t.b,test.t.a]", + " Join_3 input:[Group#3,Group#4], inner join", + "Group#3 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", + " TiKVSingleGather_7 input:[Group#5], table:t1", + "Group#5 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", + " TableScan_6 table:t1, pk col:test.t.a", + "Group#4 Schema:[test.t.a], UniqueKey:[test.t.a]", + " TiKVSingleGather_9 input:[Group#6], table:t2", + " TiKVSingleGather_21 input:[Group#7], table:t2, index:e_d_c_str_prefix", + " TiKVSingleGather_19 input:[Group#8], table:t2, index:c_d_e_str", + " TiKVSingleGather_17 input:[Group#9], table:t2, index:f_g", + " TiKVSingleGather_15 input:[Group#10], table:t2, index:g", + " TiKVSingleGather_13 input:[Group#11], table:t2, index:f", + " TiKVSingleGather_11 input:[Group#12], table:t2, index:c_d_e", + "Group#6 Schema:[test.t.a], UniqueKey:[test.t.a]", + " TableScan_8 table:t2, pk col:test.t.a", + "Group#7 Schema:[test.t.a]", + " IndexScan_20 table:t2, index:e_str, d_str, c_str", + "Group#8 Schema:[test.t.a]", + " IndexScan_18 table:t2, index:c_str, d_str, e_str", + "Group#9 Schema:[test.t.a]", + " IndexScan_16 table:t2, index:f, g", + "Group#10 Schema:[test.t.a]", + " IndexScan_14 table:t2, index:g", + "Group#11 Schema:[test.t.a]", + " IndexScan_12 table:t2, index:f", + "Group#12 Schema:[test.t.a]", + " IndexScan_10 table:t2, index:c, d, e" + ] + }, + { + "SQL": "select max(b), sum(a) from t where c > 10 group by d", + "Result": [ + "Group#0 Schema:[Column#13,Column#14]", + " Projection_4 input:[Group#1], Column#13, Column#14", + "Group#1 Schema:[Column#13,Column#14]", + " Aggregation_3 input:[Group#2], group by:test.t.d, funcs:max(test.t.b), sum(test.t.a)", + "Group#2 Schema:[test.t.a,test.t.b,test.t.c,test.t.d], UniqueKey:[test.t.a]", + " TiKVSingleGather_6 input:[Group#3], table:t", + "Group#3 Schema:[test.t.a,test.t.b,test.t.c,test.t.d], UniqueKey:[test.t.a]", + " Selection_7 input:[Group#4], gt(test.t.c, 10)", + "Group#4 Schema:[test.t.a,test.t.b,test.t.c,test.t.d], UniqueKey:[test.t.a]", + " TableScan_5 table:t, pk col:test.t.a" + ] + }, + { + "SQL": "select avg(b) from t where b > 10", + "Result": [ + "Group#0 Schema:[Column#13]", + " Projection_4 input:[Group#1], Column#13", + "Group#1 Schema:[Column#13]", + " Aggregation_3 input:[Group#2], funcs:avg(test.t.b)", + "Group#2 Schema:[test.t.b]", + " TiKVSingleGather_6 input:[Group#3], table:t", + "Group#3 Schema:[test.t.b]", + " Selection_7 input:[Group#4], gt(test.t.b, 10)", + "Group#4 Schema:[test.t.b]", + " TableScan_5 table:t" + ] + }, + { + "SQL": "select sum(t1.a) from t t1, t t2 where t1.a = t2.b", + "Result": [ + "Group#0 Schema:[Column#25]", + " Projection_6 input:[Group#1], Column#25", + "Group#1 Schema:[Column#25]", + " Aggregation_5 input:[Group#2], funcs:sum(test.t.a)", + "Group#2 Schema:[test.t.a,test.t.b]", + " Selection_4 input:[Group#3], eq(test.t.a, test.t.b)", + "Group#3 Schema:[test.t.a,test.t.b]", + " Join_3 input:[Group#4,Group#5], inner join", + "Group#4 Schema:[test.t.a], UniqueKey:[test.t.a]", + " TiKVSingleGather_8 input:[Group#6], table:t1", + " TiKVSingleGather_20 input:[Group#7], table:t1, index:e_d_c_str_prefix", + " TiKVSingleGather_18 input:[Group#8], table:t1, index:c_d_e_str", + " TiKVSingleGather_16 input:[Group#9], table:t1, index:f_g", + " TiKVSingleGather_14 input:[Group#10], table:t1, index:g", + " TiKVSingleGather_12 input:[Group#11], table:t1, index:f", + " TiKVSingleGather_10 input:[Group#12], table:t1, index:c_d_e", + "Group#6 Schema:[test.t.a], UniqueKey:[test.t.a]", + " TableScan_7 table:t1, pk col:test.t.a", + "Group#7 Schema:[test.t.a]", + " IndexScan_19 table:t1, index:e_str, d_str, c_str", + "Group#8 Schema:[test.t.a]", + " IndexScan_17 table:t1, index:c_str, d_str, e_str", + "Group#9 Schema:[test.t.a]", + " IndexScan_15 table:t1, index:f, g", + "Group#10 Schema:[test.t.a]", + " IndexScan_13 table:t1, index:g", + "Group#11 Schema:[test.t.a]", + " IndexScan_11 table:t1, index:f", + "Group#12 Schema:[test.t.a]", + " IndexScan_9 table:t1, index:c, d, e", + "Group#5 Schema:[test.t.b]", + " TiKVSingleGather_22 input:[Group#13], table:t2", + "Group#13 Schema:[test.t.b]", + " TableScan_21 table:t2" + ] + }, + { + "SQL": "select a from t where a > 10 limit 3", + "Result": [ + "Group#0 Schema:[test.t.a], UniqueKey:[test.t.a]", + " Limit_4 input:[Group#1], offset:0, count:3", + "Group#1 Schema:[test.t.a], UniqueKey:[test.t.a]", + " Projection_3 input:[Group#2], test.t.a", + "Group#2 Schema:[test.t.a], UniqueKey:[test.t.a]", + " TiKVSingleGather_6 input:[Group#3], table:t", + " TiKVSingleGather_8 input:[Group#4], table:t, index:c_d_e", + " TiKVSingleGather_10 input:[Group#5], table:t, index:f", + " TiKVSingleGather_12 input:[Group#6], table:t, index:g", + " TiKVSingleGather_14 input:[Group#7], table:t, index:f_g", + " TiKVSingleGather_16 input:[Group#8], table:t, index:c_d_e_str", + " TiKVSingleGather_18 input:[Group#9], table:t, index:e_d_c_str_prefix", + "Group#3 Schema:[test.t.a], UniqueKey:[test.t.a]", + " TableScan_26 table:t, pk col:test.t.a, cond:[gt(test.t.a, 10)]", + "Group#4 Schema:[test.t.a]", + " Selection_25 input:[Group#10], gt(test.t.a, 10)", + "Group#10 Schema:[test.t.a]", + " IndexScan_7 table:t, index:c, d, e", + "Group#5 Schema:[test.t.a]", + " Selection_24 input:[Group#11], gt(test.t.a, 10)", + "Group#11 Schema:[test.t.a]", + " IndexScan_9 table:t, index:f", + "Group#6 Schema:[test.t.a]", + " Selection_23 input:[Group#12], gt(test.t.a, 10)", + "Group#12 Schema:[test.t.a]", + " IndexScan_11 table:t, index:g", + "Group#7 Schema:[test.t.a]", + " Selection_22 input:[Group#13], gt(test.t.a, 10)", + "Group#13 Schema:[test.t.a]", + " IndexScan_13 table:t, index:f, g", + "Group#8 Schema:[test.t.a]", + " Selection_21 input:[Group#14], gt(test.t.a, 10)", + "Group#14 Schema:[test.t.a]", + " IndexScan_15 table:t, index:c_str, d_str, e_str", + "Group#9 Schema:[test.t.a]", + " Selection_20 input:[Group#15], gt(test.t.a, 10)", + "Group#15 Schema:[test.t.a]", + " IndexScan_17 table:t, index:e_str, d_str, c_str" + ] + }, + { + "SQL": "select a from t where b > 1 order by c", + "Result": [ + "Group#0 Schema:[test.t.a], UniqueKey:[test.t.a]", + " Projection_5 input:[Group#1], test.t.a", + "Group#1 Schema:[test.t.a,test.t.c], UniqueKey:[test.t.a]", + " Sort_4 input:[Group#2], test.t.c:asc", + "Group#2 Schema:[test.t.a,test.t.c], UniqueKey:[test.t.a]", + " Projection_3 input:[Group#3], test.t.a, test.t.c", + "Group#3 Schema:[test.t.a,test.t.b,test.t.c], UniqueKey:[test.t.a]", + " TiKVSingleGather_7 input:[Group#4], table:t", + "Group#4 Schema:[test.t.a,test.t.b,test.t.c], UniqueKey:[test.t.a]", + " Selection_8 input:[Group#5], gt(test.t.b, 1)", + "Group#5 Schema:[test.t.a,test.t.b,test.t.c], UniqueKey:[test.t.a]", + " TableScan_6 table:t, pk col:test.t.a" + ] + } + ] + } +] diff --git a/planner/cascades/testdata/transformation_rules_suite_in.json b/planner/cascades/testdata/transformation_rules_suite_in.json new file mode 100644 index 0000000..87d2712 --- /dev/null +++ b/planner/cascades/testdata/transformation_rules_suite_in.json @@ -0,0 +1,46 @@ +[ + { + "name": "TestPredicatePushDown", + "cases": [ + "select a, max(b) from t group by a having a > 1", + "select a, avg(b) from t group by a having a > 1 and max(b) > 10", + "select t1.a, t1.b, t2.b from t t1, t t2 where t1.a = t2.a and t2.b = t1.b and t1.a > 10 and t2.b > 10 and t1.a > t2.b", + "select t1.a, t1.b from t t1, t t2 where t1.a = t2.a and t1.a = 10 and t2.a = 5", + "select a, f from t where f > 1", + "select a, f from t where g > 1 and f > 1", + "select t1.a, t1.b from t t1, t t2 where t1.a = t2.a and t1.a = 10 and t2.a = 5" + ] + }, + { + "name": "TestAggPushDownGather", + "cases": [ + "select b, sum(a) from t group by b", + "select b, sum(a) from t group by c, b" + ] + }, + { + "name": "TestTopNRules", + "cases": [ + "select b from t order by a limit 2", + "select a+b from t order by a limit 1 offset 2", + "select c from t order by t.a limit 1", + "select c from t order by t.a + t.b limit 1" + ] + }, + { + "name": "TestProjectionElimination", + "cases": [ + "select a, b from (select a, b from t) as t2", + "select a+b from (select a, b from t) as t2", + "select a from (select a, b from (select a, b, c from t) as t2) as t3" + ] + }, + { + "name": "TestMergeAggregationProjection", + "cases": [ + "select b, max(a) from (select a, c+d as b from t as t1) as t2 group by b", + "select max(a) from (select c+d as b, a+c as a from t as t1) as t2", + "select b, max(a) from (select a, c+d as b, @i:=0 from t as t1) as t2 group by b" + ] + } +] diff --git a/planner/cascades/testdata/transformation_rules_suite_out.json b/planner/cascades/testdata/transformation_rules_suite_out.json new file mode 100644 index 0000000..1990b15 --- /dev/null +++ b/planner/cascades/testdata/transformation_rules_suite_out.json @@ -0,0 +1,290 @@ +[ + { + "Name": "TestPredicatePushDown", + "Cases": [ + { + "SQL": "select a, max(b) from t group by a having a > 1", + "Result": [ + "Group#0 Schema:[test.t.a,Column#13]", + " Projection_3 input:[Group#1], test.t.a, Column#13", + "Group#1 Schema:[Column#13,test.t.a]", + " Aggregation_2 input:[Group#2], group by:test.t.a, funcs:max(test.t.b), firstrow(test.t.a)", + "Group#2 Schema:[test.t.a,test.t.b]", + " TiKVSingleGather_6 input:[Group#3], table:t", + "Group#3 Schema:[test.t.a,test.t.b]", + " TableScan_10 table:t, pk col:test.t.a, cond:[gt(test.t.a, 1)]" + ] + }, + { + "SQL": "select a, avg(b) from t group by a having a > 1 and max(b) > 10", + "Result": [ + "Group#0 Schema:[test.t.a,Column#16]", + " Projection_5 input:[Group#1], test.t.a, Column#13", + "Group#1 Schema:[test.t.a,Column#13,Column#14]", + " Projection_3 input:[Group#2], test.t.a, Column#13, Column#14", + "Group#2 Schema:[Column#13,Column#14,test.t.a]", + " Selection_10 input:[Group#3], gt(Column#14, 10)", + "Group#3 Schema:[Column#13,Column#14,test.t.a]", + " Aggregation_2 input:[Group#4], group by:test.t.a, funcs:avg(test.t.b), max(test.t.b), firstrow(test.t.a)", + "Group#4 Schema:[test.t.a,test.t.b]", + " TiKVSingleGather_7 input:[Group#5], table:t", + "Group#5 Schema:[test.t.a,test.t.b]", + " TableScan_12 table:t, pk col:test.t.a, cond:[gt(test.t.a, 1)]" + ] + }, + { + "SQL": "select t1.a, t1.b, t2.b from t t1, t t2 where t1.a = t2.a and t2.b = t1.b and t1.a > 10 and t2.b > 10 and t1.a > t2.b", + "Result": [ + "Group#0 Schema:[test.t.a,test.t.b,test.t.b]", + " Projection_5 input:[Group#1], test.t.a, test.t.b, test.t.b", + "Group#1 Schema:[test.t.a,test.t.b,test.t.a,test.t.b]", + " Join_3 input:[Group#2,Group#3], inner join, equal:[eq(test.t.a, test.t.a) eq(test.t.b, test.t.b)], other cond:gt(test.t.a, test.t.b)", + "Group#2 Schema:[test.t.a,test.t.b]", + " TiKVSingleGather_7 input:[Group#4], table:t1", + "Group#4 Schema:[test.t.a,test.t.b]", + " Selection_14 input:[Group#5], gt(test.t.a, test.t.b), gt(test.t.b, 10)", + "Group#5 Schema:[test.t.a,test.t.b]", + " TableScan_13 table:t1, pk col:test.t.a, cond:[gt(test.t.a, 10)]", + "Group#3 Schema:[test.t.a,test.t.b]", + " TiKVSingleGather_9 input:[Group#6], table:t2", + "Group#6 Schema:[test.t.a,test.t.b]", + " Selection_17 input:[Group#7], gt(test.t.a, test.t.b), gt(test.t.b, 10)", + "Group#7 Schema:[test.t.a,test.t.b]", + " TableScan_16 table:t2, pk col:test.t.a, cond:[gt(test.t.a, 10)]" + ] + }, + { + "SQL": "select t1.a, t1.b from t t1, t t2 where t1.a = t2.a and t1.a = 10 and t2.a = 5", + "Result": [ + "Group#0 Schema:[test.t.a,test.t.b]", + " Projection_5 input:[Group#1], test.t.a, test.t.b", + "Group#1 Schema:[test.t.a,test.t.b,test.t.a]", + " TableDual_22 rowcount:0" + ] + }, + { + "SQL": "select a, f from t where f > 1", + "Result": [ + "Group#0 Schema:[test.t.a,test.t.f]", + " Projection_3 input:[Group#1], test.t.a, test.t.f", + "Group#1 Schema:[test.t.a,test.t.f]", + " TiKVSingleGather_5 input:[Group#2], table:t", + " TiKVSingleGather_7 input:[Group#3], table:t, index:f", + " TiKVSingleGather_9 input:[Group#4], table:t, index:f_g", + "Group#2 Schema:[test.t.a,test.t.f]", + " Selection_10 input:[Group#5], gt(test.t.f, 1)", + "Group#5 Schema:[test.t.a,test.t.f]", + " TableScan_4 table:t, pk col:test.t.a", + "Group#3 Schema:[test.t.a,test.t.f]", + " IndexScan_13 table:t, index:f, cond:[gt(test.t.f, 1)]", + "Group#4 Schema:[test.t.a,test.t.f]", + " IndexScan_14 table:t, index:f, g, cond:[gt(test.t.f, 1)]" + ] + }, + { + "SQL": "select a, f from t where g > 1 and f > 1", + "Result": [ + "Group#0 Schema:[test.t.a,test.t.f]", + " Projection_3 input:[Group#1], test.t.a, test.t.f", + "Group#1 Schema:[test.t.a,test.t.f,test.t.g]", + " TiKVSingleGather_5 input:[Group#2], table:t", + " TiKVSingleGather_7 input:[Group#3], table:t, index:f_g", + "Group#2 Schema:[test.t.a,test.t.f,test.t.g]", + " Selection_8 input:[Group#4], gt(test.t.f, 1), gt(test.t.g, 1)", + "Group#4 Schema:[test.t.a,test.t.f,test.t.g]", + " TableScan_4 table:t, pk col:test.t.a", + "Group#3 Schema:[test.t.a,test.t.f,test.t.g]", + " Selection_11 input:[Group#5], gt(test.t.g, 1)", + "Group#5 Schema:[test.t.a,test.t.f,test.t.g]", + " IndexScan_10 table:t, index:f, g, cond:[gt(test.t.f, 1)]" + ] + }, + { + "SQL": "select t1.a, t1.b from t t1, t t2 where t1.a = t2.a and t1.a = 10 and t2.a = 5", + "Result": [ + "Group#0 Schema:[test.t.a,test.t.b]", + " Projection_5 input:[Group#1], test.t.a, test.t.b", + "Group#1 Schema:[test.t.a,test.t.b,test.t.a]", + " TableDual_22 rowcount:0" + ] + } + ] + }, + { + "Name": "TestAggPushDownGather", + "Cases": [ + { + "SQL": "select b, sum(a) from t group by b", + "Result": [ + "Group#0 Schema:[test.t.b,Column#13], UniqueKey:[test.t.b]", + " Projection_3 input:[Group#1], test.t.b, Column#13", + "Group#1 Schema:[Column#13,test.t.b], UniqueKey:[test.t.b]", + " Aggregation_2 input:[Group#2], group by:test.t.b, funcs:sum(test.t.a), firstrow(test.t.b)", + " Aggregation_7 input:[Group#3], group by:test.t.b, funcs:sum(Column#14), firstrow(test.t.b)", + "Group#2 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", + " TiKVSingleGather_5 input:[Group#4], table:t", + "Group#4 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", + " TableScan_4 table:t, pk col:test.t.a", + "Group#3 Schema:[Column#14,test.t.b]", + " TiKVSingleGather_5 input:[Group#5], table:t", + "Group#5 Schema:[Column#14,test.t.b]", + " Aggregation_6 input:[Group#4], group by:test.t.b, funcs:sum(test.t.a)" + ] + }, + { + "SQL": "select b, sum(a) from t group by c, b", + "Result": [ + "Group#0 Schema:[test.t.b,Column#13]", + " Projection_3 input:[Group#1], test.t.b, Column#13", + "Group#1 Schema:[Column#13,test.t.b]", + " Aggregation_2 input:[Group#2], group by:test.t.b, test.t.c, funcs:sum(test.t.a), firstrow(test.t.b)", + " Aggregation_7 input:[Group#3], group by:test.t.b, test.t.c, funcs:sum(Column#14), firstrow(test.t.b)", + "Group#2 Schema:[test.t.a,test.t.b,test.t.c], UniqueKey:[test.t.a]", + " TiKVSingleGather_5 input:[Group#4], table:t", + "Group#4 Schema:[test.t.a,test.t.b,test.t.c], UniqueKey:[test.t.a]", + " TableScan_4 table:t, pk col:test.t.a", + "Group#3 Schema:[Column#14,test.t.c,test.t.b]", + " TiKVSingleGather_5 input:[Group#5], table:t", + "Group#5 Schema:[Column#14,test.t.c,test.t.b]", + " Aggregation_6 input:[Group#4], group by:test.t.b, test.t.c, funcs:sum(test.t.a)" + ] + } + ] + }, + { + "Name": "TestTopNRules", + "Cases": [ + { + "SQL": "select b from t order by a limit 2", + "Result": [ + "Group#0 Schema:[test.t.b]", + " Projection_5 input:[Group#1], test.t.b", + "Group#1 Schema:[test.t.b,test.t.a]", + " Projection_2 input:[Group#2], test.t.b, test.t.a", + "Group#2 Schema:[test.t.a,test.t.b]", + " TopN_9 input:[Group#3], test.t.a:asc, offset:0, count:2", + "Group#3 Schema:[test.t.a,test.t.b]", + " TiKVSingleGather_7 input:[Group#4], table:t", + "Group#4 Schema:[test.t.a,test.t.b]", + " TableScan_6 table:t, pk col:test.t.a" + ] + }, + { + "SQL": "select a+b from t order by a limit 1 offset 2", + "Result": [ + "Group#0 Schema:[Column#14]", + " Projection_5 input:[Group#1], Column#13", + "Group#1 Schema:[Column#13,test.t.a]", + " Projection_2 input:[Group#2], plus(test.t.a, test.t.b)->Column#13, test.t.a", + "Group#2 Schema:[test.t.a,test.t.b]", + " TopN_9 input:[Group#3], test.t.a:asc, offset:2, count:1", + "Group#3 Schema:[test.t.a,test.t.b]", + " TiKVSingleGather_7 input:[Group#4], table:t", + "Group#4 Schema:[test.t.a,test.t.b]", + " TableScan_6 table:t, pk col:test.t.a" + ] + }, + { + "SQL": "select c from t order by t.a limit 1", + "Result": [ + "Group#0 Schema:[test.t.c]", + " Projection_5 input:[Group#1], test.t.c", + "Group#1 Schema:[test.t.c,test.t.a]", + " Projection_2 input:[Group#2], test.t.c, test.t.a", + "Group#2 Schema:[test.t.a,test.t.c]", + " TopN_11 input:[Group#3], test.t.a:asc, offset:0, count:1", + "Group#3 Schema:[test.t.a,test.t.c]", + " TiKVSingleGather_7 input:[Group#4], table:t", + " TiKVSingleGather_9 input:[Group#5], table:t, index:c_d_e", + "Group#4 Schema:[test.t.a,test.t.c]", + " TableScan_6 table:t, pk col:test.t.a", + "Group#5 Schema:[test.t.a,test.t.c]", + " IndexScan_8 table:t, index:c, d, e" + ] + }, + { + "SQL": "select c from t order by t.a + t.b limit 1", + "Result": [ + "Group#0 Schema:[test.t.c]", + " Projection_5 input:[Group#1], test.t.c", + "Group#1 Schema:[test.t.c,test.t.a,test.t.b]", + " Projection_2 input:[Group#2], test.t.c, test.t.a, test.t.b", + "Group#2 Schema:[test.t.a,test.t.b,test.t.c]", + " TopN_9 input:[Group#3], plus(test.t.a, test.t.b):asc, offset:0, count:1", + "Group#3 Schema:[test.t.a,test.t.b,test.t.c]", + " TiKVSingleGather_7 input:[Group#4], table:t", + "Group#4 Schema:[test.t.a,test.t.b,test.t.c]", + " TableScan_6 table:t, pk col:test.t.a" + ] + } + ] + }, + { + "Name": "TestProjectionElimination", + "Cases": [ + { + "SQL": "select a, b from (select a, b from t) as t2", + "Result": [ + "Group#0 Schema:[test.t.a,test.t.b]", + " TableScan_1 table:t" + ] + }, + { + "SQL": "select a+b from (select a, b from t) as t2", + "Result": [ + "Group#0 Schema:[Column#13]", + " Projection_3 input:[Group#1], plus(test.t.a, test.t.b)->Column#13", + "Group#1 Schema:[test.t.a,test.t.b]", + " TableScan_1 table:t" + ] + }, + { + "SQL": "select a from (select a, b from (select a, b, c from t) as t2) as t3", + "Result": [ + "Group#0 Schema:[test.t.a]", + " TableScan_1 table:t" + ] + } + ] + }, + { + "Name": "TestMergeAggregationProjection", + "Cases": [ + { + "SQL": "select b, max(a) from (select a, c+d as b from t as t1) as t2 group by b", + "Result": [ + "Group#0 Schema:[Column#13,Column#14]", + " Projection_4 input:[Group#1], Column#13, Column#14", + "Group#1 Schema:[Column#14,Column#13]", + " Aggregation_5 input:[Group#2], group by:plus(test.t.c, test.t.d), funcs:max(test.t.a), firstrow(plus(test.t.c, test.t.d))", + "Group#2 Schema:[test.t.a,test.t.c,test.t.d]", + " TableScan_1 table:t1" + ] + }, + { + "SQL": "select max(a) from (select c+d as b, a+c as a from t as t1) as t2", + "Result": [ + "Group#0 Schema:[Column#15]", + " Projection_4 input:[Group#1], Column#15", + "Group#1 Schema:[Column#15]", + " Aggregation_5 input:[Group#2], funcs:max(plus(test.t.a, test.t.c))", + "Group#2 Schema:[test.t.a,test.t.c]", + " TableScan_1 table:t1" + ] + }, + { + "SQL": "select b, max(a) from (select a, c+d as b, @i:=0 from t as t1) as t2 group by b", + "Result": [ + "Group#0 Schema:[Column#13,Column#15]", + " Projection_4 input:[Group#1], Column#13, Column#15", + "Group#1 Schema:[Column#15,Column#13]", + " Aggregation_3 input:[Group#2], group by:Column#13, funcs:max(test.t.a), firstrow(Column#13)", + "Group#2 Schema:[test.t.a,Column#13,Column#14]", + " Projection_2 input:[Group#3], test.t.a, plus(test.t.c, test.t.d)->Column#13, setvar(i, 0)->Column#14", + "Group#3 Schema:[test.t.a,test.t.c,test.t.d]", + " TableScan_1 table:t1" + ] + } + ] + } +] diff --git a/planner/cascades/transformation_rules.go b/planner/cascades/transformation_rules.go new file mode 100644 index 0000000..52c1d2b --- /dev/null +++ b/planner/cascades/transformation_rules.go @@ -0,0 +1,802 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package cascades + +import ( + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/expression/aggregation" + plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/planner/memo" + "github.com/pingcap/tidb/util/ranger" +) + +// Transformation defines the interface for the transformation rules. +type Transformation interface { + // GetPattern gets the cached pattern of the rule. + GetPattern() *memo.Pattern + // Match is used to check whether the GroupExpr satisfies all the requirements of the transformation rule. + // + // The pattern only identifies the operator type, some transformation rules also need + // detailed information for certain plan operators to decide whether it is applicable. + Match(expr *memo.ExprIter) bool + // OnTransform does the real work of the optimization rule. + // + // newExprs indicates the new GroupExprs generated by the transformationrule. Multiple GroupExprs may be + // returned, e.g, EnumeratePath would convert DataSource to several possible assess paths. + // + // eraseOld indicates that the returned GroupExpr must be better than the old one, so we can remove it from Group. + // + // eraseAll indicates that the returned GroupExpr must be better than all other candidates in the Group, e.g, we can + // prune all other access paths if we found the filter is constantly false. + OnTransform(old *memo.ExprIter) (newExprs []*memo.GroupExpr, eraseOld bool, eraseAll bool, err error) +} + +var defaultTransformationMap = map[memo.Operand][]Transformation{ + memo.OperandSelection: { + NewRulePushSelDownTableScan(), + NewRulePushSelDownTiKVSingleGather(), + NewRulePushSelDownSort(), + NewRulePushSelDownProjection(), + NewRulePushSelDownAggregation(), + NewRulePushSelDownJoin(), + NewRulePushSelDownIndexScan(), + }, + memo.OperandDataSource: { + NewRuleEnumeratePaths(), + }, + memo.OperandAggregation: { + NewRulePushAggDownGather(), + NewRuleMergeAggregationProjection(), + }, + memo.OperandLimit: { + NewRuleTransformLimitToTopN(), + }, + memo.OperandProjection: { + NewRuleEliminateProjection(), + NewRuleMergeAdjacentProjection(), + }, + memo.OperandTopN: { + NewRulePushTopNDownProjection(), + }, +} + +type baseRule struct { + pattern *memo.Pattern +} + +// Match implements Transformation Interface. +func (r *baseRule) Match(expr *memo.ExprIter) bool { + return true +} + +// GetPattern implements Transformation Interface. +func (r *baseRule) GetPattern() *memo.Pattern { + return r.pattern +} + +// PushSelDownTableScan pushes the selection down to TableScan. +type PushSelDownTableScan struct { + baseRule +} + +// NewRulePushSelDownTableScan creates a new Transformation PushSelDownTableScan. +// The pattern of this rule is: `Selection -> TableScan` +func NewRulePushSelDownTableScan() Transformation { + rule := &PushSelDownTableScan{} + ts := memo.NewPattern(memo.OperandTableScan, memo.EngineTiKVOnly) + p := memo.BuildPattern(memo.OperandSelection, memo.EngineTiKVOnly, ts) + rule.pattern = p + return rule +} + +// OnTransform implements Transformation interface. +// +// It transforms `sel -> ts` to one of the following new exprs: +// 1. `newSel -> newTS` +// 2. `newTS` +// +// Filters of the old `sel` operator are removed if they are used to calculate +// the key ranges of the `ts` operator. +func (r *PushSelDownTableScan) OnTransform(old *memo.ExprIter) (newExprs []*memo.GroupExpr, eraseOld bool, eraseAll bool, err error) { + sel := old.GetExpr().ExprNode.(*plannercore.LogicalSelection) + ts := old.Children[0].GetExpr().ExprNode.(*plannercore.LogicalTableScan) + if ts.Handle == nil { + return nil, false, false, nil + } + accesses, remained := ranger.DetachCondsForColumn(ts.SCtx(), sel.Conditions, ts.Handle) + if accesses == nil { + return nil, false, false, nil + } + newTblScan := plannercore.LogicalTableScan{ + Source: ts.Source, + Handle: ts.Handle, + AccessConds: ts.AccessConds.Shallow(), + }.Init(ts.SCtx()) + newTblScan.AccessConds = append(newTblScan.AccessConds, accesses...) + tblScanExpr := memo.NewGroupExpr(newTblScan) + if len(remained) == 0 { + // `sel -> ts` is transformed to `newTS`. + return []*memo.GroupExpr{tblScanExpr}, true, false, nil + } + schema := old.GetExpr().Group.Prop.Schema + tblScanGroup := memo.NewGroupWithSchema(tblScanExpr, schema) + newSel := plannercore.LogicalSelection{Conditions: remained}.Init(sel.SCtx()) + selExpr := memo.NewGroupExpr(newSel) + selExpr.Children = append(selExpr.Children, tblScanGroup) + // `sel -> ts` is transformed to `newSel ->newTS`. + return []*memo.GroupExpr{selExpr}, true, false, nil +} + +// PushSelDownIndexScan pushes a Selection down to IndexScan. +type PushSelDownIndexScan struct { + baseRule +} + +// NewRulePushSelDownIndexScan creates a new Transformation PushSelDownIndexScan. +// The pattern of this rule is `Selection -> IndexScan`. +func NewRulePushSelDownIndexScan() Transformation { + rule := &PushSelDownIndexScan{} + rule.pattern = memo.BuildPattern( + memo.OperandSelection, + memo.EngineTiKVOnly, + memo.NewPattern(memo.OperandIndexScan, memo.EngineTiKVOnly), + ) + return rule +} + +// OnTransform implements Transformation interface. +// It will transform `Selection -> IndexScan` to: +// `IndexScan(with a new access range)` or +// `Selection -> IndexScan(with a new access range)` +// or just keep the two GroupExprs unchanged. +func (r *PushSelDownIndexScan) OnTransform(old *memo.ExprIter) (newExprs []*memo.GroupExpr, eraseOld bool, eraseAll bool, err error) { + sel := old.GetExpr().ExprNode.(*plannercore.LogicalSelection) + is := old.Children[0].GetExpr().ExprNode.(*plannercore.LogicalIndexScan) + if len(is.IdxCols) == 0 { + return nil, false, false, nil + } + conditions := sel.Conditions + if is.AccessConds != nil { + // If we have already pushed some conditions down here, + // we merge old AccessConds with new conditions, + // to make sure this rule can be applied more than once. + conditions = make([]expression.Expression, len(sel.Conditions)+len(is.AccessConds)) + copy(conditions, sel.Conditions) + copy(conditions[len(sel.Conditions):], is.AccessConds) + } + res, err := ranger.DetachCondAndBuildRangeForIndex(is.SCtx(), conditions, is.IdxCols, is.IdxColLens) + if err != nil { + return nil, false, false, err + } + if len(res.AccessConds) == len(is.AccessConds) { + // There is no condition can be pushed down as range, + // or the pushed down conditions are the same with before. + sameConds := true + for i := range res.AccessConds { + if !res.AccessConds[i].Equal(is.SCtx(), is.AccessConds[i]) { + sameConds = false + break + } + } + if sameConds { + return nil, false, false, nil + } + } + // TODO: `res` still has some unused fields: EqOrInCount, IsDNFCond. + newIs := plannercore.LogicalIndexScan{ + Source: is.Source, + IsDoubleRead: is.IsDoubleRead, + EqCondCount: res.EqCondCount, + AccessConds: res.AccessConds, + Ranges: res.Ranges, + Index: is.Index, + Columns: is.Columns, + FullIdxCols: is.FullIdxCols, + FullIdxColLens: is.FullIdxColLens, + IdxCols: is.IdxCols, + IdxColLens: is.IdxColLens, + }.Init(is.SCtx()) + isExpr := memo.NewGroupExpr(newIs) + + if len(res.RemainedConds) == 0 { + return []*memo.GroupExpr{isExpr}, true, false, nil + } + isGroup := memo.NewGroupWithSchema(isExpr, old.Children[0].GetExpr().Group.Prop.Schema) + newSel := plannercore.LogicalSelection{Conditions: res.RemainedConds}.Init(sel.SCtx()) + selExpr := memo.NewGroupExpr(newSel) + selExpr.SetChildren(isGroup) + return []*memo.GroupExpr{selExpr}, true, false, nil +} + +// PushSelDownTiKVSingleGather pushes the selection down to child of TiKVSingleGather. +type PushSelDownTiKVSingleGather struct { + baseRule +} + +// NewRulePushSelDownTiKVSingleGather creates a new Transformation PushSelDownTiKVSingleGather. +// The pattern of this rule is `Selection -> TiKVSingleGather -> Any`. +func NewRulePushSelDownTiKVSingleGather() Transformation { + any := memo.NewPattern(memo.OperandAny, memo.EngineTiKVOnly) + tg := memo.BuildPattern(memo.OperandTiKVSingleGather, memo.EngineTiDBOnly, any) + p := memo.BuildPattern(memo.OperandSelection, memo.EngineTiDBOnly, tg) + + rule := &PushSelDownTiKVSingleGather{} + rule.pattern = p + return rule +} + +// OnTransform implements Transformation interface. +// +// It transforms `oldSel -> oldTg -> any` to one of the following new exprs: +// 1. `newTg -> pushedSel -> any` +// 2. `remainedSel -> newTg -> pushedSel -> any` +func (r *PushSelDownTiKVSingleGather) OnTransform(old *memo.ExprIter) (newExprs []*memo.GroupExpr, eraseOld bool, eraseAll bool, err error) { + sel := old.GetExpr().ExprNode.(*plannercore.LogicalSelection) + sg := old.Children[0].GetExpr().ExprNode.(*plannercore.TiKVSingleGather) + childGroup := old.Children[0].Children[0].Group + var pushed, remained []expression.Expression + sctx := sg.SCtx() + _, pushed, remained = expression.ExpressionsToPB(sctx.GetSessionVars().StmtCtx, sel.Conditions, sctx.GetClient()) + if len(pushed) == 0 { + return nil, false, false, nil + } + pushedSel := plannercore.LogicalSelection{Conditions: pushed}.Init(sctx) + pushedSelExpr := memo.NewGroupExpr(pushedSel) + pushedSelExpr.Children = append(pushedSelExpr.Children, childGroup) + pushedSelGroup := memo.NewGroupWithSchema(pushedSelExpr, childGroup.Prop.Schema).SetEngineType(childGroup.EngineType) + // The field content of TiKVSingleGather would not be modified currently, so we + // just reference the same tg instead of making a copy of it. + // + // TODO: if we save pushed filters later in TiKVSingleGather, in order to do partition + // pruning or skyline pruning, we need to make a copy of the TiKVSingleGather here. + tblGatherExpr := memo.NewGroupExpr(sg) + tblGatherExpr.Children = append(tblGatherExpr.Children, pushedSelGroup) + if len(remained) == 0 { + // `oldSel -> oldTg -> any` is transformed to `newTg -> pushedSel -> any`. + return []*memo.GroupExpr{tblGatherExpr}, true, false, nil + } + tblGatherGroup := memo.NewGroupWithSchema(tblGatherExpr, pushedSelGroup.Prop.Schema) + remainedSel := plannercore.LogicalSelection{Conditions: remained}.Init(sel.SCtx()) + remainedSelExpr := memo.NewGroupExpr(remainedSel) + remainedSelExpr.Children = append(remainedSelExpr.Children, tblGatherGroup) + // `oldSel -> oldTg -> any` is transformed to `remainedSel -> newTg -> pushedSel -> any`. + return []*memo.GroupExpr{remainedSelExpr}, true, false, nil +} + +// EnumeratePaths converts DataSource to table scan and index scans. +type EnumeratePaths struct { + baseRule +} + +// NewRuleEnumeratePaths creates a new Transformation EnumeratePaths. +// The pattern of this rule is: `DataSource`. +func NewRuleEnumeratePaths() Transformation { + rule := &EnumeratePaths{} + rule.pattern = memo.NewPattern(memo.OperandDataSource, memo.EngineTiDBOnly) + return rule +} + +// OnTransform implements Transformation interface. +func (r *EnumeratePaths) OnTransform(old *memo.ExprIter) (newExprs []*memo.GroupExpr, eraseOld bool, eraseAll bool, err error) { + ds := old.GetExpr().ExprNode.(*plannercore.DataSource) + gathers := ds.Convert2Gathers() + for _, gather := range gathers { + expr := memo.Convert2GroupExpr(gather) + expr.Children[0].SetEngineType(memo.EngineTiKV) + newExprs = append(newExprs, expr) + } + return newExprs, true, false, nil +} + +// PushAggDownGather splits Aggregation to two stages, final and partial1, +// and pushed the partial Aggregation down to the child of TiKVSingleGather. +type PushAggDownGather struct { + baseRule +} + +// NewRulePushAggDownGather creates a new Transformation PushAggDownGather. +// The pattern of this rule is: `Aggregation -> TiKVSingleGather`. +func NewRulePushAggDownGather() Transformation { + rule := &PushAggDownGather{} + rule.pattern = memo.BuildPattern( + memo.OperandAggregation, + memo.EngineTiDBOnly, + memo.NewPattern(memo.OperandTiKVSingleGather, memo.EngineTiDBOnly), + ) + return rule +} + +// Match implements Transformation interface. +func (r *PushAggDownGather) Match(expr *memo.ExprIter) bool { + agg := expr.GetExpr().ExprNode.(*plannercore.LogicalAggregation) + for _, aggFunc := range agg.AggFuncs { + if aggFunc.Mode != aggregation.CompleteMode { + return false + } + } + return plannercore.CheckAggCanPushCop(agg.SCtx(), agg.AggFuncs, agg.GroupByItems) +} + +// OnTransform implements Transformation interface. +// It will transform `Agg->Gather` to `Agg(Final) -> Gather -> Agg(Partial1)`. +func (r *PushAggDownGather) OnTransform(old *memo.ExprIter) (newExprs []*memo.GroupExpr, eraseOld bool, eraseAll bool, err error) { + agg := old.GetExpr().ExprNode.(*plannercore.LogicalAggregation) + aggSchema := old.GetExpr().Group.Prop.Schema + gather := old.Children[0].GetExpr().ExprNode.(*plannercore.TiKVSingleGather) + childGroup := old.Children[0].GetExpr().Children[0] + // The old Aggregation should stay unchanged for other transformation. + // So we build a new LogicalAggregation for the partialAgg. + partialAggFuncs := make([]*aggregation.AggFuncDesc, len(agg.AggFuncs)) + for i, aggFunc := range agg.AggFuncs { + newAggFunc := &aggregation.AggFuncDesc{ + Mode: aggregation.Partial1Mode, + } + newAggFunc.Name = aggFunc.Name + newAggFunc.RetTp = aggFunc.RetTp + // The args will be changed below, so that we have to build a new slice for it. + newArgs := make([]expression.Expression, len(aggFunc.Args)) + copy(newArgs, aggFunc.Args) + newAggFunc.Args = newArgs + partialAggFuncs[i] = newAggFunc + } + partialGbyItems := make([]expression.Expression, len(agg.GroupByItems)) + copy(partialGbyItems, agg.GroupByItems) + partialAgg := plannercore.LogicalAggregation{ + AggFuncs: partialAggFuncs, + GroupByItems: partialGbyItems, + }.Init(agg.SCtx()) + + finalAggFuncs, finalGbyItems, partialSchema := + plannercore.BuildFinalModeAggregation(partialAgg.SCtx(), partialAgg.AggFuncs, partialAgg.GroupByItems, aggSchema) + // Remove unnecessary FirstRow. + partialAgg.AggFuncs = + plannercore.RemoveUnnecessaryFirstRow(partialAgg.SCtx(), finalAggFuncs, finalGbyItems, partialAgg.AggFuncs, partialAgg.GroupByItems, partialSchema) + finalAgg := plannercore.LogicalAggregation{ + AggFuncs: finalAggFuncs, + GroupByItems: finalGbyItems, + }.Init(agg.SCtx()) + + partialAggExpr := memo.NewGroupExpr(partialAgg) + partialAggExpr.SetChildren(childGroup) + partialAggGroup := memo.NewGroupWithSchema(partialAggExpr, partialSchema).SetEngineType(childGroup.EngineType) + gatherExpr := memo.NewGroupExpr(gather) + gatherExpr.SetChildren(partialAggGroup) + gatherGroup := memo.NewGroupWithSchema(gatherExpr, partialSchema) + finalAggExpr := memo.NewGroupExpr(finalAgg) + finalAggExpr.SetChildren(gatherGroup) + // We don't erase the old complete mode Aggregation because + // this transformation would not always be better. + return []*memo.GroupExpr{finalAggExpr}, false, false, nil +} + +// PushSelDownSort pushes the Selection down to the child of Sort. +type PushSelDownSort struct { + baseRule +} + +// NewRulePushSelDownSort creates a new Transformation PushSelDownSort. +// The pattern of this rule is: `Selection -> Sort`. +func NewRulePushSelDownSort() Transformation { + rule := &PushSelDownSort{} + rule.pattern = memo.BuildPattern( + memo.OperandSelection, + memo.EngineTiDBOnly, + memo.NewPattern(memo.OperandSort, memo.EngineTiDBOnly), + ) + return rule +} + +// OnTransform implements Transformation interface. +// It will transform `sel->sort->x` to `sort->sel->x`. +func (r *PushSelDownSort) OnTransform(old *memo.ExprIter) (newExprs []*memo.GroupExpr, eraseOld bool, eraseAll bool, err error) { + sel := old.GetExpr().ExprNode.(*plannercore.LogicalSelection) + sort := old.Children[0].GetExpr().ExprNode.(*plannercore.LogicalSort) + childGroup := old.Children[0].GetExpr().Children[0] + + newSelExpr := memo.NewGroupExpr(sel) + newSelExpr.Children = append(newSelExpr.Children, childGroup) + newSelGroup := memo.NewGroupWithSchema(newSelExpr, childGroup.Prop.Schema) + + newSortExpr := memo.NewGroupExpr(sort) + newSortExpr.Children = append(newSortExpr.Children, newSelGroup) + return []*memo.GroupExpr{newSortExpr}, true, false, nil +} + +// PushSelDownProjection pushes the Selection down to the child of Projection. +type PushSelDownProjection struct { + baseRule +} + +// NewRulePushSelDownProjection creates a new Transformation PushSelDownProjection. +// The pattern of this rule is: `Selection -> Projection`. +func NewRulePushSelDownProjection() Transformation { + rule := &PushSelDownProjection{} + rule.pattern = memo.BuildPattern( + memo.OperandSelection, + memo.EngineTiDBOnly, + memo.NewPattern(memo.OperandProjection, memo.EngineTiDBOnly), + ) + return rule +} + +// OnTransform implements Transformation interface. +// It will transform `selection -> projection -> x` to +// 1. `projection -> selection -> x` or +// 2. `selection -> projection -> selection -> x` or +// 3. just keep unchanged. +func (r *PushSelDownProjection) OnTransform(old *memo.ExprIter) (newExprs []*memo.GroupExpr, eraseOld bool, eraseAll bool, err error) { + sel := old.GetExpr().ExprNode.(*plannercore.LogicalSelection) + proj := old.Children[0].GetExpr().ExprNode.(*plannercore.LogicalProjection) + projSchema := old.Children[0].Prop.Schema + childGroup := old.Children[0].GetExpr().Children[0] + for _, expr := range proj.Exprs { + if expression.HasAssignSetVarFunc(expr) { + return nil, false, false, nil + } + } + canBePushed := make([]expression.Expression, 0, len(sel.Conditions)) + canNotBePushed := make([]expression.Expression, 0, len(sel.Conditions)) + for _, cond := range sel.Conditions { + if !expression.HasGetSetVarFunc(cond) { + canBePushed = append(canBePushed, expression.ColumnSubstitute(cond, projSchema, proj.Exprs)) + } else { + canNotBePushed = append(canNotBePushed, cond) + } + } + if len(canBePushed) == 0 { + return nil, false, false, nil + } + newBottomSel := plannercore.LogicalSelection{Conditions: canBePushed}.Init(sel.SCtx()) + newBottomSelExpr := memo.NewGroupExpr(newBottomSel) + newBottomSelExpr.SetChildren(childGroup) + newBottomSelGroup := memo.NewGroupWithSchema(newBottomSelExpr, childGroup.Prop.Schema) + newProjExpr := memo.NewGroupExpr(proj) + newProjExpr.SetChildren(newBottomSelGroup) + if len(canNotBePushed) == 0 { + return []*memo.GroupExpr{newProjExpr}, true, false, nil + } + newProjGroup := memo.NewGroupWithSchema(newProjExpr, projSchema) + newTopSel := plannercore.LogicalSelection{Conditions: canNotBePushed}.Init(sel.SCtx()) + newTopSelExpr := memo.NewGroupExpr(newTopSel) + newTopSelExpr.SetChildren(newProjGroup) + return []*memo.GroupExpr{newTopSelExpr}, true, false, nil +} + +// PushSelDownAggregation pushes Selection down to the child of Aggregation. +type PushSelDownAggregation struct { + baseRule +} + +// NewRulePushSelDownAggregation creates a new Transformation PushSelDownAggregation. +// The pattern of this rule is `Selection -> Aggregation`. +func NewRulePushSelDownAggregation() Transformation { + rule := &PushSelDownAggregation{} + rule.pattern = memo.BuildPattern( + memo.OperandSelection, + memo.EngineAll, + memo.NewPattern(memo.OperandAggregation, memo.EngineAll), + ) + return rule +} + +// OnTransform implements Transformation interface. +// It will transform `sel->agg->x` to `agg->sel->x` or `sel->agg->sel->x` +// or just keep the selection unchanged. +func (r *PushSelDownAggregation) OnTransform(old *memo.ExprIter) (newExprs []*memo.GroupExpr, eraseOld bool, eraseAll bool, err error) { + // TODO: implement the algo according to the header comment. + return []*memo.GroupExpr{old.GetExpr()}, false, false, nil +} + +// TransformLimitToTopN transforms Limit+Sort to TopN. +type TransformLimitToTopN struct { + baseRule +} + +// NewRuleTransformLimitToTopN creates a new Transformation TransformLimitToTopN. +// The pattern of this rule is `Limit -> Sort`. +func NewRuleTransformLimitToTopN() Transformation { + rule := &TransformLimitToTopN{} + rule.pattern = memo.BuildPattern( + memo.OperandLimit, + memo.EngineTiDBOnly, + memo.NewPattern(memo.OperandSort, memo.EngineTiDBOnly), + ) + return rule +} + +// OnTransform implements Transformation interface. +// This rule will transform `Limit -> Sort -> x` to `TopN -> x`. +func (r *TransformLimitToTopN) OnTransform(old *memo.ExprIter) (newExprs []*memo.GroupExpr, eraseOld bool, eraseAll bool, err error) { + limit := old.GetExpr().ExprNode.(*plannercore.LogicalLimit) + sort := old.Children[0].GetExpr().ExprNode.(*plannercore.LogicalSort) + childGroup := old.Children[0].GetExpr().Children[0] + topN := plannercore.LogicalTopN{ + ByItems: sort.ByItems, + Offset: limit.Offset, + Count: limit.Count, + }.Init(limit.SCtx()) + topNExpr := memo.NewGroupExpr(topN) + topNExpr.SetChildren(childGroup) + return []*memo.GroupExpr{topNExpr}, true, false, nil +} + +// PushSelDownJoin pushes Selection through Join. +type PushSelDownJoin struct { + baseRule +} + +// NewRulePushSelDownJoin creates a new Transformation PushSelDownJoin. +// The pattern of this rule is `Selection -> Join`. +func NewRulePushSelDownJoin() Transformation { + rule := &PushSelDownJoin{} + rule.pattern = memo.BuildPattern( + memo.OperandSelection, + memo.EngineTiDBOnly, + memo.NewPattern(memo.OperandJoin, memo.EngineTiDBOnly), + ) + return rule +} + +// buildChildSelectionGroup builds a new childGroup if the pushed down condition is not empty. +func buildChildSelectionGroup( + oldSel *plannercore.LogicalSelection, + conditions []expression.Expression, + childGroup *memo.Group) *memo.Group { + if len(conditions) == 0 { + return childGroup + } + newSel := plannercore.LogicalSelection{Conditions: conditions}.Init(oldSel.SCtx()) + groupExpr := memo.NewGroupExpr(newSel) + groupExpr.SetChildren(childGroup) + newChild := memo.NewGroupWithSchema(groupExpr, childGroup.Prop.Schema) + return newChild +} + +// OnTransform implements Transformation interface. +// This rule tries to pushes the Selection through Join. Besides, this rule fulfills the `XXXConditions` field of Join. +func (r *PushSelDownJoin) OnTransform(old *memo.ExprIter) (newExprs []*memo.GroupExpr, eraseOld bool, eraseAll bool, err error) { + sel := old.GetExpr().ExprNode.(*plannercore.LogicalSelection) + joinExpr := old.Children[0].GetExpr() + // TODO: we need to create a new LogicalJoin here. + join := joinExpr.ExprNode.(*plannercore.LogicalJoin) + sctx := sel.SCtx() + leftGroup := old.Children[0].GetExpr().Children[0] + rightGroup := old.Children[0].GetExpr().Children[1] + var equalCond []*expression.ScalarFunction + var leftPushCond, rightPushCond, otherCond, leftCond, rightCond []expression.Expression + switch join.JoinType { + case plannercore.InnerJoin: + tempCond := make([]expression.Expression, 0, + len(join.LeftConditions)+len(join.RightConditions)+len(join.EqualConditions)+len(join.OtherConditions)+len(sel.Conditions)) + tempCond = append(tempCond, join.LeftConditions...) + tempCond = append(tempCond, join.RightConditions...) + tempCond = append(tempCond, expression.ScalarFuncs2Exprs(join.EqualConditions)...) + tempCond = append(tempCond, join.OtherConditions...) + tempCond = append(tempCond, sel.Conditions...) + tempCond = expression.ExtractFiltersFromDNFs(sctx, tempCond) + tempCond = expression.PropagateConstant(sctx, tempCond) + // Return table dual when filter is constant false or null. + dual := plannercore.Conds2TableDual(join, tempCond) + if dual != nil { + return []*memo.GroupExpr{memo.NewGroupExpr(dual)}, false, true, nil + } + equalCond, leftPushCond, rightPushCond, otherCond = join.ExtractOnCondition(tempCond, leftGroup.Prop.Schema, rightGroup.Prop.Schema, true, true) + join.LeftConditions = nil + join.RightConditions = nil + join.EqualConditions = equalCond + join.OtherConditions = otherCond + leftCond = leftPushCond + rightCond = rightPushCond + default: + // TODO: Enhance this rule to deal with LeftOuter/RightOuter/Semi/SmiAnti/LeftOuterSemi/LeftOuterSemiAnti Joins. + } + leftCond = expression.RemoveDupExprs(sctx, leftCond) + rightCond = expression.RemoveDupExprs(sctx, rightCond) + for _, eqCond := range join.EqualConditions { + join.LeftJoinKeys = append(join.LeftJoinKeys, eqCond.GetArgs()[0].(*expression.Column)) + join.RightJoinKeys = append(join.RightJoinKeys, eqCond.GetArgs()[1].(*expression.Column)) + } + // TODO: Update EqualConditions like what we have done in the method join.updateEQCond() before. + leftGroup = buildChildSelectionGroup(sel, leftCond, joinExpr.Children[0]) + rightGroup = buildChildSelectionGroup(sel, rightCond, joinExpr.Children[1]) + newJoinExpr := memo.NewGroupExpr(join) + newJoinExpr.SetChildren(leftGroup, rightGroup) + return []*memo.GroupExpr{newJoinExpr}, true, false, nil +} + +// EliminateProjection eliminates the projection. +type EliminateProjection struct { + baseRule +} + +// NewRuleEliminateProjection creates a new Transformation EliminateProjection. +// The pattern of this rule is `Projection -> Any`. +func NewRuleEliminateProjection() Transformation { + rule := &EliminateProjection{} + rule.pattern = memo.BuildPattern( + memo.OperandProjection, + memo.EngineTiDBOnly, + memo.NewPattern(memo.OperandAny, memo.EngineTiDBOnly), + ) + return rule +} + +// OnTransform implements Transformation interface. +// This rule tries to eliminate the projection whose output columns are the same with its child. +func (r *EliminateProjection) OnTransform(old *memo.ExprIter) (newExprs []*memo.GroupExpr, eraseOld bool, eraseAll bool, err error) { + child := old.Children[0] + if child.Group.Prop.Schema.Len() != old.GetExpr().Group.Prop.Schema.Len() { + return nil, false, false, nil + } + + oldCols := old.GetExpr().Group.Prop.Schema.Columns + for i, col := range child.Group.Prop.Schema.Columns { + if !col.Equal(nil, oldCols[i]) { + return nil, false, false, nil + } + } + + // Promote the children group's expression. + finalGroupExprs := make([]*memo.GroupExpr, 0, child.Group.Equivalents.Len()) + for elem := child.Group.Equivalents.Front(); elem != nil; elem = elem.Next() { + finalGroupExprs = append(finalGroupExprs, elem.Value.(*memo.GroupExpr)) + } + return finalGroupExprs, true, false, nil +} + +// MergeAdjacentProjection merge the adjacent projection. +type MergeAdjacentProjection struct { + baseRule +} + +// NewRuleMergeAdjacentProjection creates a new Transformation MergeAdjacentProjection. +// The pattern of this rule is `Projection -> Projection`. +func NewRuleMergeAdjacentProjection() Transformation { + rule := &MergeAdjacentProjection{} + rule.pattern = memo.BuildPattern( + memo.OperandProjection, + memo.EngineTiDBOnly, + memo.NewPattern(memo.OperandProjection, memo.EngineTiDBOnly), + ) + return rule +} + +// OnTransform implements Transformation interface. +// It will transform `proj->proj->x` to `proj->x` +// or just keep the adjacent projections unchanged. +func (r *MergeAdjacentProjection) OnTransform(old *memo.ExprIter) (newExprs []*memo.GroupExpr, eraseOld bool, eraseAll bool, err error) { + proj := old.GetExpr().ExprNode.(*plannercore.LogicalProjection) + childGroup := old.Children[0].Group + child := old.Children[0].GetExpr().ExprNode.(*plannercore.LogicalProjection) + if plannercore.ExprsHasSideEffects(child.Exprs) { + return nil, false, false, nil + } + + replace := make(map[string]*expression.Column) + for i, col := range childGroup.Prop.Schema.Columns { + if colOrigin, ok := child.Exprs[i].(*expression.Column); ok { + replace[string(col.HashCode(nil))] = colOrigin + } + } + + newProj := plannercore.LogicalProjection{Exprs: make([]expression.Expression, len(proj.Exprs))}.Init(proj.SCtx()) + newProj.SetSchema(old.GetExpr().Group.Prop.Schema) + for i, expr := range proj.Exprs { + newExpr := expr.Clone() + plannercore.ResolveExprAndReplace(newExpr, replace) + newProj.Exprs[i] = plannercore.ReplaceColumnOfExpr(newExpr, child, childGroup.Prop.Schema) + } + + newProjExpr := memo.NewGroupExpr(newProj) + newProjExpr.SetChildren(old.Children[0].GetExpr().Children[0]) + return []*memo.GroupExpr{newProjExpr}, true, false, nil +} + +// PushTopNDownProjection pushes TopN to Projection. +type PushTopNDownProjection struct { + baseRule +} + +// NewRulePushTopNDownProjection creates a new Transformation PushTopNDownProjection. +// The pattern of this rule is `TopN->Projection->X` to `Projection->TopN->X`. +func NewRulePushTopNDownProjection() Transformation { + rule := &PushTopNDownProjection{} + rule.pattern = memo.BuildPattern( + memo.OperandTopN, + memo.EngineTiDBOnly, + memo.NewPattern(memo.OperandProjection, memo.EngineTiDBOnly), + ) + return rule +} + +// Match implements Transformation interface. +func (r *PushTopNDownProjection) Match(expr *memo.ExprIter) bool { + proj := expr.Children[0].GetExpr().ExprNode.(*plannercore.LogicalProjection) + for _, expr := range proj.Exprs { + if expression.HasAssignSetVarFunc(expr) { + return false + } + } + return true +} + +// OnTransform implements Transformation interface. +// This rule tries to pushes the TopN through Projection. +func (r *PushTopNDownProjection) OnTransform(old *memo.ExprIter) (newExprs []*memo.GroupExpr, eraseOld bool, eraseAll bool, err error) { + topN := old.GetExpr().ExprNode.(*plannercore.LogicalTopN) + proj := old.Children[0].GetExpr().ExprNode.(*plannercore.LogicalProjection) + childGroup := old.Children[0].GetExpr().Children[0] + + newTopN := plannercore.LogicalTopN{ + Offset: topN.Offset, + Count: topN.Count, + }.Init(topN.SCtx()) + + newTopN.ByItems = make([]*plannercore.ByItems, 0, len(topN.ByItems)) + for _, by := range topN.ByItems { + newTopN.ByItems = append(newTopN.ByItems, &plannercore.ByItems{ + Expr: expression.ColumnSubstitute(by.Expr, old.Children[0].Group.Prop.Schema, proj.Exprs), + Desc: by.Desc, + }) + } + + // remove meaningless constant sort items. + for i := len(newTopN.ByItems) - 1; i >= 0; i-- { + switch newTopN.ByItems[i].Expr.(type) { + case *expression.Constant: + topN.ByItems = append(newTopN.ByItems[:i], newTopN.ByItems[i+1:]...) + } + } + projExpr := memo.NewGroupExpr(proj) + topNExpr := memo.NewGroupExpr(newTopN) + topNExpr.SetChildren(childGroup) + topNGroup := memo.NewGroupWithSchema(topNExpr, childGroup.Prop.Schema) + projExpr.SetChildren(topNGroup) + return []*memo.GroupExpr{projExpr}, true, false, nil +} + +// MergeAggregationProjection merges the Projection below an Aggregation as a new Aggregation. +// The Projection may be regenerated in the ImplementationPhase. But this rule allows the +// Aggregation to match other rules, such as MergeAdjacentAggregation. +type MergeAggregationProjection struct { + baseRule +} + +// NewRuleMergeAggregationProjection creates a new Transformation MergeAggregationProjection. +// The pattern of this rule is: `Aggregation -> Projection`. +func NewRuleMergeAggregationProjection() Transformation { + rule := &MergeAggregationProjection{} + rule.pattern = memo.BuildPattern( + memo.OperandAggregation, + memo.EngineTiDBOnly, + memo.NewPattern(memo.OperandProjection, memo.EngineTiDBOnly), + ) + return rule +} + +// Match implements Transformation interface. +func (r *MergeAggregationProjection) Match(old *memo.ExprIter) bool { + proj := old.Children[0].GetExpr().ExprNode.(*plannercore.LogicalProjection) + if plannercore.ExprsHasSideEffects(proj.Exprs) { + return false + } + return true +} + +// OnTransform implements Transformation interface. +// It will transform `Aggregation->Projection->X` to `Aggregation->X`. +func (r *MergeAggregationProjection) OnTransform(old *memo.ExprIter) (newExprs []*memo.GroupExpr, eraseOld bool, eraseAll bool, err error) { + // TODO: implement the body according to the header comment. + return []*memo.GroupExpr{old.GetExpr()}, false, false, nil +} diff --git a/planner/cascades/transformation_rules_test.go b/planner/cascades/transformation_rules_test.go new file mode 100644 index 0000000..ffa0bc2 --- /dev/null +++ b/planner/cascades/transformation_rules_test.go @@ -0,0 +1,200 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package cascades + +import ( + "context" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/parser" + "github.com/pingcap/tidb/parser/model" + plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/planner/memo" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/util/testutil" +) + +var _ = Suite(&testTransformationRuleSuite{}) + +type testTransformationRuleSuite struct { + *parser.Parser + is infoschema.InfoSchema + sctx sessionctx.Context + testData testutil.TestData + optimizer *Optimizer +} + +func (s *testTransformationRuleSuite) SetUpSuite(c *C) { + s.is = infoschema.MockInfoSchema([]*model.TableInfo{plannercore.MockSignedTable()}) + s.sctx = plannercore.MockContext() + s.Parser = parser.New() + s.optimizer = NewOptimizer() + var err error + s.testData, err = testutil.LoadTestSuiteData("testdata", "transformation_rules_suite") + c.Assert(err, IsNil) +} + +func (s *testTransformationRuleSuite) TearDownSuite(c *C) { + c.Assert(s.testData.GenerateOutputIfNeeded(), IsNil) +} + +func testGroupToString(input []string, output []struct { + SQL string + Result []string +}, s *testTransformationRuleSuite, c *C) { + for i, sql := range input { + stmt, err := s.ParseOneStmt(sql, "", "") + c.Assert(err, IsNil) + p, _, err := plannercore.BuildLogicalPlan(context.Background(), s.sctx, stmt, s.is) + c.Assert(err, IsNil) + logic, ok := p.(plannercore.LogicalPlan) + c.Assert(ok, IsTrue) + logic, err = s.optimizer.onPhasePreprocessing(s.sctx, logic) + c.Assert(err, IsNil) + group := memo.Convert2Group(logic) + err = s.optimizer.onPhaseExploration(s.sctx, group) + c.Assert(err, IsNil) + s.testData.OnRecord(func() { + output[i].SQL = sql + output[i].Result = ToString(group) + }) + c.Assert(ToString(group), DeepEquals, output[i].Result) + } +} + +func (s *testTransformationRuleSuite) TestAggPushDownGather(c *C) { + s.optimizer.ResetTransformationRules(map[memo.Operand][]Transformation{ + memo.OperandAggregation: { + NewRulePushAggDownGather(), + }, + memo.OperandDataSource: { + NewRuleEnumeratePaths(), + }, + }) + defer func() { + s.optimizer.ResetTransformationRules(defaultTransformationMap) + }() + var input []string + var output []struct { + SQL string + Result []string + } + s.testData.GetTestCases(c, &input, &output) + for i, sql := range input { + stmt, err := s.ParseOneStmt(sql, "", "") + c.Assert(err, IsNil) + p, _, err := plannercore.BuildLogicalPlan(context.Background(), s.sctx, stmt, s.is) + c.Assert(err, IsNil) + logic, ok := p.(plannercore.LogicalPlan) + c.Assert(ok, IsTrue) + logic, err = s.optimizer.onPhasePreprocessing(s.sctx, logic) + c.Assert(err, IsNil) + group := memo.Convert2Group(logic) + err = s.optimizer.onPhaseExploration(s.sctx, group) + c.Assert(err, IsNil) + // BuildKeyInfo here to test the KeyInfo for partialAgg. + group.BuildKeyInfo() + s.testData.OnRecord(func() { + output[i].SQL = sql + output[i].Result = ToString(group) + }) + c.Assert(ToString(group), DeepEquals, output[i].Result) + } +} + +func (s *testTransformationRuleSuite) TestPredicatePushDown(c *C) { + s.optimizer.ResetTransformationRules(map[memo.Operand][]Transformation{ + memo.OperandSelection: { + NewRulePushSelDownTableScan(), + NewRulePushSelDownTiKVSingleGather(), + NewRulePushSelDownSort(), + NewRulePushSelDownProjection(), + NewRulePushSelDownAggregation(), + NewRulePushSelDownJoin(), + NewRulePushSelDownIndexScan(), + }, + memo.OperandDataSource: { + NewRuleEnumeratePaths(), + }, + }) + defer func() { + s.optimizer.ResetTransformationRules(defaultTransformationMap) + }() + var input []string + var output []struct { + SQL string + Result []string + } + s.testData.GetTestCases(c, &input, &output) + testGroupToString(input, output, s, c) +} + +func (s *testTransformationRuleSuite) TestTopNRules(c *C) { + s.optimizer.ResetTransformationRules(map[memo.Operand][]Transformation{ + memo.OperandLimit: { + NewRuleTransformLimitToTopN(), + }, + memo.OperandDataSource: { + NewRuleEnumeratePaths(), + }, + memo.OperandTopN: { + NewRulePushTopNDownProjection(), + }, + }) + var input []string + var output []struct { + SQL string + Result []string + } + s.testData.GetTestCases(c, &input, &output) + testGroupToString(input, output, s, c) +} + +func (s *testTransformationRuleSuite) TestProjectionElimination(c *C) { + s.optimizer.ResetTransformationRules(map[memo.Operand][]Transformation{ + memo.OperandProjection: { + NewRuleEliminateProjection(), + NewRuleMergeAdjacentProjection(), + }, + }) + defer func() { + s.optimizer.ResetTransformationRules(defaultTransformationMap) + }() + var input []string + var output []struct { + SQL string + Result []string + } + s.testData.GetTestCases(c, &input, &output) + testGroupToString(input, output, s, c) +} + +func (s *testTransformationRuleSuite) TestMergeAggregationProjection(c *C) { + s.optimizer.ResetTransformationRules(map[memo.Operand][]Transformation{ + memo.OperandAggregation: { + NewRuleMergeAggregationProjection(), + }, + }) + defer func() { + s.optimizer.ResetTransformationRules(defaultTransformationMap) + }() + var input []string + var output []struct { + SQL string + Result []string + } + s.testData.GetTestCases(c, &input, &output) + testGroupToString(input, output, s, c) +} diff --git a/planner/core/cbo_test.go b/planner/core/cbo_test.go new file mode 100644 index 0000000..d32315d --- /dev/null +++ b/planner/core/cbo_test.go @@ -0,0 +1,328 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core_test + +import ( + "context" + "fmt" + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/planner" + "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/util/testkit" + "github.com/pingcap/tidb/util/testleak" + "github.com/pingcap/tidb/util/testutil" +) + +var _ = Suite(&testAnalyzeSuite{}) + +type testAnalyzeSuite struct { + testData testutil.TestData +} + +func (s *testAnalyzeSuite) SetUpSuite(c *C) { + var err error + s.testData, err = testutil.LoadTestSuiteData("testdata", "analyze_suite") + c.Assert(err, IsNil) +} + +func (s *testAnalyzeSuite) TearDownSuite(c *C) { + c.Assert(s.testData.GenerateOutputIfNeeded(), IsNil) +} + +func (s *testAnalyzeSuite) TestTableDual(c *C) { + defer testleak.AfterTest(c)() + store, dom, err := newStoreWithBootstrap() + c.Assert(err, IsNil) + defer func() { + dom.Close() + store.Close() + }() + + testKit := testkit.NewTestKit(c, store) + testKit.MustExec(`use test`) + testKit.MustExec(`create table t(a int)`) + testKit.MustExec("insert into t values (1), (2), (3), (4), (5), (6), (7), (8), (9), (10)") + testKit.MustExec("analyze table t") + + testKit.MustQuery(`explain select * from t where 1 = 0`).Check(testkit.Rows( + `TableDual_6 0.00 root rows:0`, + )) + + testKit.MustQuery(`explain select * from t where 1 = 1 limit 0`).Check(testkit.Rows( + `TableDual_5 0.00 root rows:0`, + )) +} + +func constructInsertSQL(i, n int) string { + sql := "insert into t (a,b,c,e)values " + for j := 0; j < n; j++ { + sql += fmt.Sprintf("(%d, %d, '%d', %d)", i*n+j, i, i+j, i*n+j) + if j != n-1 { + sql += ", " + } + } + return sql +} + +func (s *testAnalyzeSuite) TestEmptyTable(c *C) { + defer testleak.AfterTest(c)() + store, dom, err := newStoreWithBootstrap() + c.Assert(err, IsNil) + testKit := testkit.NewTestKit(c, store) + defer func() { + dom.Close() + store.Close() + }() + testKit.MustExec("use test") + testKit.MustExec("drop table if exists t, t1") + testKit.MustExec("create table t (c1 int)") + testKit.MustExec("create table t1 (c1 int)") + testKit.MustExec("analyze table t, t1") + var input, output []string + s.testData.GetTestCases(c, &input, &output) + for i, tt := range input { + ctx := testKit.Se.(sessionctx.Context) + stmts, err := session.Parse(ctx, tt) + c.Assert(err, IsNil) + c.Assert(stmts, HasLen, 1) + stmt := stmts[0] + is := domain.GetDomain(ctx).InfoSchema() + err = core.Preprocess(ctx, stmt, is) + c.Assert(err, IsNil) + p, _, err := planner.Optimize(context.TODO(), ctx, stmt, is) + c.Assert(err, IsNil) + planString := core.ToString(p) + s.testData.OnRecord(func() { + output[i] = planString + }) + c.Assert(planString, Equals, output[i], Commentf("for %s", tt)) + } +} + +func (s *testAnalyzeSuite) TestNullCount(c *C) { + defer testleak.AfterTest(c)() + store, dom, err := newStoreWithBootstrap() + c.Assert(err, IsNil) + testKit := testkit.NewTestKit(c, store) + defer func() { + dom.Close() + store.Close() + }() + testKit.MustExec("use test") + testKit.MustExec("drop table if exists t") + testKit.MustExec("create table t (a int, b int, index idx(a))") + testKit.MustExec("insert into t values (null, null), (null, null)") + testKit.MustExec("analyze table t") + var input []string + var output [][]string + s.testData.GetTestCases(c, &input, &output) + for i := 0; i < 2; i++ { + s.testData.OnRecord(func() { + output[i] = s.testData.ConvertRowsToStrings(testKit.MustQuery(input[i]).Rows()) + }) + testKit.MustQuery(input[i]).Check(testkit.Rows(output[i]...)) + } + h := dom.StatsHandle() + h.Clear() + c.Assert(h.Update(dom.InfoSchema()), IsNil) + for i := 2; i < 4; i++ { + s.testData.OnRecord(func() { + output[i] = s.testData.ConvertRowsToStrings(testKit.MustQuery(input[i]).Rows()) + }) + testKit.MustQuery(input[i]).Check(testkit.Rows(output[i]...)) + } +} + +func newStoreWithBootstrap() (kv.Storage, *domain.Domain, error) { + store, err := mockstore.NewMockTikvStore() + if err != nil { + return nil, nil, errors.Trace(err) + } + + session.SetSchemaLease(0) + session.DisableStats4Test() + + dom, err := session.BootstrapSession(store) + if err != nil { + return nil, nil, err + } + + return store, dom, errors.Trace(err) +} + +func BenchmarkOptimize(b *testing.B) { + c := &C{} + store, dom, err := newStoreWithBootstrap() + c.Assert(err, IsNil) + defer func() { + dom.Close() + store.Close() + }() + + testKit := testkit.NewTestKit(c, store) + testKit.MustExec("use test") + testKit.MustExec("drop table if exists t") + testKit.MustExec("create table t (a int primary key, b int, c varchar(200), d datetime DEFAULT CURRENT_TIMESTAMP, e int, ts timestamp DEFAULT CURRENT_TIMESTAMP)") + testKit.MustExec("create index b on t (b)") + testKit.MustExec("create index d on t (d)") + testKit.MustExec("create index e on t (e)") + testKit.MustExec("create index b_c on t (b,c)") + testKit.MustExec("create index ts on t (ts)") + for i := 0; i < 100; i++ { + testKit.MustExec(constructInsertSQL(i, 100)) + } + testKit.MustExec("analyze table t") + tests := []struct { + sql string + best string + }{ + { + sql: "select count(*) from t group by e", + best: "IndexReader(Index(t.e)[[NULL,+inf]])->StreamAgg", + }, + { + sql: "select count(*) from t where e <= 10 group by e", + best: "IndexReader(Index(t.e)[[-inf,10]])->StreamAgg", + }, + { + sql: "select count(*) from t where e <= 50", + best: "IndexReader(Index(t.e)[[-inf,50]]->HashAgg)->HashAgg", + }, + { + sql: "select count(*) from t where c > '1' group by b", + best: "IndexReader(Index(t.b_c)[[NULL,+inf]]->Sel([gt(test.t.c, 1)]))->StreamAgg", + }, + { + sql: "select count(*) from t where e = 1 group by b", + best: "IndexLookUp(Index(t.e)[[1,1]], Table(t)->HashAgg)->HashAgg", + }, + { + sql: "select count(*) from t where e > 1 group by b", + best: "TableReader(Table(t)->Sel([gt(test.t.e, 1)])->HashAgg)->HashAgg", + }, + { + sql: "select count(e) from t where t.b <= 20", + best: "IndexLookUp(Index(t.b)[[-inf,20]], Table(t)->HashAgg)->HashAgg", + }, + { + sql: "select count(e) from t where t.b <= 30", + best: "IndexLookUp(Index(t.b)[[-inf,30]], Table(t)->HashAgg)->HashAgg", + }, + { + sql: "select count(e) from t where t.b <= 40", + best: "IndexLookUp(Index(t.b)[[-inf,40]], Table(t)->HashAgg)->HashAgg", + }, + { + sql: "select count(e) from t where t.b <= 50", + best: "TableReader(Table(t)->Sel([le(test.t.b, 50)])->HashAgg)->HashAgg", + }, + { + sql: "select * from t where t.b <= 40", + best: "IndexLookUp(Index(t.b)[[-inf,40]], Table(t))", + }, + { + sql: "select * from t where t.b <= 50", + best: "TableReader(Table(t)->Sel([le(test.t.b, 50)]))", + }, + // test panic + { + sql: "select * from t where 1 and t.b <= 50", + best: "TableReader(Table(t)->Sel([le(test.t.b, 50)]))", + }, + { + sql: "select * from t where t.b <= 100 order by t.a limit 1", + best: "TableReader(Table(t)->Sel([le(test.t.b, 100)])->Limit)->Limit", + }, + { + sql: "select * from t where t.b <= 1 order by t.a limit 10", + best: "IndexLookUp(Index(t.b)[[-inf,1]]->TopN([test.t.a],0,10), Table(t))->TopN([test.t.a],0,10)", + }, + { + sql: "select * from t use index(b) where b = 1 order by a", + best: "IndexLookUp(Index(t.b)[[1,1]], Table(t))->Sort", + }, + // test datetime + { + sql: "select * from t where d < cast('1991-09-05' as datetime)", + best: "IndexLookUp(Index(t.d)[[-inf,1991-09-05 00:00:00)], Table(t))", + }, + // test timestamp + { + sql: "select * from t where ts < '1991-09-05'", + best: "IndexLookUp(Index(t.ts)[[-inf,1991-09-05 00:00:00)], Table(t))", + }, + } + for _, tt := range tests { + ctx := testKit.Se.(sessionctx.Context) + stmts, err := session.Parse(ctx, tt.sql) + c.Assert(err, IsNil) + c.Assert(stmts, HasLen, 1) + stmt := stmts[0] + is := domain.GetDomain(ctx).InfoSchema() + err = core.Preprocess(ctx, stmt, is) + c.Assert(err, IsNil) + + b.Run(tt.sql, func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, err := planner.Optimize(context.TODO(), ctx, stmt, is) + c.Assert(err, IsNil) + } + b.ReportAllocs() + }) + } +} + +func (s *testAnalyzeSuite) TestIssue9562(c *C) { + defer testleak.AfterTest(c)() + store, dom, err := newStoreWithBootstrap() + c.Assert(err, IsNil) + tk := testkit.NewTestKit(c, store) + defer func() { + dom.Close() + store.Close() + }() + + tk.MustExec("use test") + var input [][]string + var output []struct { + SQL []string + Plan []string + } + s.testData.GetTestCases(c, &input, &output) + for i, ts := range input { + for j, tt := range ts { + if j != len(ts)-1 { + tk.MustExec(tt) + } + s.testData.OnRecord(func() { + output[i].SQL = ts + if j == len(ts)-1 { + output[i].Plan = s.testData.ConvertRowsToStrings(tk.MustQuery(tt).Rows()) + } + }) + if j == len(ts)-1 { + tk.MustQuery(tt).Check(testkit.Rows(output[i].Plan...)) + } + } + } +} diff --git a/planner/core/common_plans.go b/planner/core/common_plans.go new file mode 100644 index 0000000..84b800b --- /dev/null +++ b/planner/core/common_plans.go @@ -0,0 +1,346 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/types" +) + +// ShowDDL is for showing DDL information. +type ShowDDL struct { + baseSchemaProducer +} + +// Set represents a plan for set stmt. +type Set struct { + baseSchemaProducer + + VarAssigns []*expression.VarAssignment +} + +// Simple represents a simple statement plan which doesn't need any optimization. +type Simple struct { + baseSchemaProducer + + Statement ast.StmtNode +} + +// Insert represents an insert plan. +type Insert struct { + baseSchemaProducer + + Table table.Table + tableSchema *expression.Schema + tableColNames types.NameSlice + Columns []*ast.ColumnName + Lists [][]expression.Expression + SetList []*expression.Assignment + + IsReplace bool + + // NeedFillDefaultValue is true when expr in value list reference other column. + NeedFillDefaultValue bool + + SelectPlan PhysicalPlan + + AllAssignmentsAreConstant bool +} + +// Delete represents a delete plan. +type Delete struct { + baseSchemaProducer + + SelectPlan PhysicalPlan + + TblColPosInfos TblColPosInfoSlice +} + +// analyzeInfo is used to store the database name, table name and partition name of analyze task. +type analyzeInfo struct { + DBName string + TableName string + PhysicalTableID int64 +} + +// AnalyzeColumnsTask is used for analyze columns. +type AnalyzeColumnsTask struct { + PKInfo *model.ColumnInfo + ColsInfo []*model.ColumnInfo + TblInfo *model.TableInfo + analyzeInfo +} + +// AnalyzeIndexTask is used for analyze index. +type AnalyzeIndexTask struct { + IndexInfo *model.IndexInfo + TblInfo *model.TableInfo + analyzeInfo +} + +// Analyze represents an analyze plan +type Analyze struct { + baseSchemaProducer + + ColTasks []AnalyzeColumnsTask + IdxTasks []AnalyzeIndexTask +} + +// DDL represents a DDL statement plan. +type DDL struct { + baseSchemaProducer + + Statement ast.DDLNode +} + +// Explain represents a explain plan. +type Explain struct { + baseSchemaProducer + + TargetPlan Plan + Format string + ExecStmt ast.StmtNode + + Rows [][]string + explainedPlans map[int]bool +} + +// prepareSchema prepares explain's result schema. +func (e *Explain) prepareSchema() error { + var fieldNames []string + format := strings.ToLower(e.Format) + + switch { + case format == ast.ExplainFormatROW: + fieldNames = []string{"id", "count", "task", "operator info"} + case format == ast.ExplainFormatDOT: + fieldNames = []string{"dot contents"} + default: + return errors.Errorf("explain format '%s' is not supported now", e.Format) + } + + cwn := &columnsWithNames{ + cols: make([]*expression.Column, 0, len(fieldNames)), + names: make([]*types.FieldName, 0, len(fieldNames)), + } + + for _, fieldName := range fieldNames { + cwn.Append(buildColumnWithName("", fieldName, mysql.TypeString, mysql.MaxBlobWidth)) + } + e.SetSchema(cwn.col2Schema()) + e.names = cwn.names + return nil +} + +// RenderResult renders the explain result as specified format. +func (e *Explain) RenderResult() error { + if e.TargetPlan == nil { + return nil + } + switch strings.ToLower(e.Format) { + case ast.ExplainFormatROW: + e.explainedPlans = map[int]bool{} + err := e.explainPlanInRowFormat(e.TargetPlan, "root", "", true) + if err != nil { + return err + } + case ast.ExplainFormatDOT: + e.prepareDotInfo(e.TargetPlan.(PhysicalPlan)) + default: + return errors.Errorf("explain format '%s' is not supported now", e.Format) + } + return nil +} + +// explainPlanInRowFormat generates explain information for root-tasks. +func (e *Explain) explainPlanInRowFormat(p Plan, taskType, indent string, isLastChild bool) (err error) { + e.prepareOperatorInfo(p, taskType, indent, isLastChild) + e.explainedPlans[p.ID()] = true + + // For every child we create a new sub-tree rooted by it. + childIndent := Indent4Child(indent, isLastChild) + + if physPlan, ok := p.(PhysicalPlan); ok { + for i, child := range physPlan.Children() { + if e.explainedPlans[child.ID()] { + continue + } + err = e.explainPlanInRowFormat(child, taskType, childIndent, i == len(physPlan.Children())-1) + if err != nil { + return + } + } + } + + switch x := p.(type) { + case *PhysicalTableReader: + err = e.explainPlanInRowFormat(x.tablePlan, "cop", childIndent, true) + case *PhysicalIndexReader: + err = e.explainPlanInRowFormat(x.indexPlan, "cop", childIndent, true) + case *PhysicalIndexLookUpReader: + err = e.explainPlanInRowFormat(x.indexPlan, "cop", childIndent, false) + err = e.explainPlanInRowFormat(x.tablePlan, "cop", childIndent, true) + case *Insert: + if x.SelectPlan != nil { + err = e.explainPlanInRowFormat(x.SelectPlan, "root", childIndent, true) + } + case *Delete: + if x.SelectPlan != nil { + err = e.explainPlanInRowFormat(x.SelectPlan, "root", childIndent, true) + } + } + return +} + +const ( + // TreeBody indicates the current operator sub-tree is not finished, still + // has child operators to be attached on. + TreeBody = '│' + // TreeMiddleNode indicates this operator is not the last child of the + // current sub-tree rooted by its parent. + TreeMiddleNode = '├' + // TreeLastNode indicates this operator is the last child of the current + // sub-tree rooted by its parent. + TreeLastNode = '└' + // TreeGap is used to represent the gap between the branches of the tree. + TreeGap = ' ' + // TreeNodeIdentifier is used to replace the treeGap once we need to attach + // a node to a sub-tree. + TreeNodeIdentifier = '─' +) + +// Indent4Child appends more blank to the `indent` string +func Indent4Child(indent string, isLastChild bool) string { + if !isLastChild { + return string(append([]rune(indent), TreeBody, TreeGap)) + } + + // If the current node is the last node of the current operator tree, we + // need to end this sub-tree by changing the closest treeBody to a treeGap. + indentBytes := []rune(indent) + for i := len(indentBytes) - 1; i >= 0; i-- { + if indentBytes[i] == TreeBody { + indentBytes[i] = TreeGap + break + } + } + + return string(append(indentBytes, TreeBody, TreeGap)) +} + +// PrettyIdentifier returns a pretty identifier which contains indent and tree node hierarchy indicator +func PrettyIdentifier(id, indent string, isLastChild bool) string { + if len(indent) == 0 { + return id + } + + indentBytes := []rune(indent) + for i := len(indentBytes) - 1; i >= 0; i-- { + if indentBytes[i] != TreeBody { + continue + } + + // Here we attach a new node to the current sub-tree by changing + // the closest treeBody to a: + // 1. treeLastNode, if this operator is the last child. + // 2. treeMiddleNode, if this operator is not the last child.. + if isLastChild { + indentBytes[i] = TreeLastNode + } else { + indentBytes[i] = TreeMiddleNode + } + break + } + + // Replace the treeGap between the treeBody and the node to a + // treeNodeIdentifier. + indentBytes[len(indentBytes)-1] = TreeNodeIdentifier + return string(indentBytes) + id +} + +// prepareOperatorInfo generates the following information for every plan: +// operator id, task type, operator info, and the estemated row count. +func (e *Explain) prepareOperatorInfo(p Plan, taskType string, indent string, isLastChild bool) { + operatorInfo := p.ExplainInfo() + count := "N/A" + if si := p.statsInfo(); si != nil { + count = strconv.FormatFloat(si.RowCount, 'f', 2, 64) + } + explainID := p.ExplainID().String() + row := []string{PrettyIdentifier(explainID, indent, isLastChild), count, taskType, operatorInfo} + e.Rows = append(e.Rows, row) +} + +func (e *Explain) prepareDotInfo(p PhysicalPlan) { + buffer := bytes.NewBufferString("") + fmt.Fprintf(buffer, "\ndigraph %s {\n", p.ExplainID()) + e.prepareTaskDot(p, "root", buffer) + buffer.WriteString("}\n") + + e.Rows = append(e.Rows, []string{buffer.String()}) +} + +func (e *Explain) prepareTaskDot(p PhysicalPlan, taskTp string, buffer *bytes.Buffer) { + fmt.Fprintf(buffer, "subgraph cluster%v{\n", p.ID()) + buffer.WriteString("node [style=filled, color=lightgrey]\n") + buffer.WriteString("color=black\n") + fmt.Fprintf(buffer, "label = \"%s\"\n", taskTp) + + if len(p.Children()) == 0 { + if taskTp == "cop" { + fmt.Fprintf(buffer, "\"%s\"\n}\n", p.ExplainID()) + return + } + fmt.Fprintf(buffer, "\"%s\"\n", p.ExplainID()) + } + + var copTasks []PhysicalPlan + var pipelines []string + + for planQueue := []PhysicalPlan{p}; len(planQueue) > 0; planQueue = planQueue[1:] { + curPlan := planQueue[0] + switch copPlan := curPlan.(type) { + case *PhysicalTableReader: + pipelines = append(pipelines, fmt.Sprintf("\"%s\" -> \"%s\"\n", copPlan.ExplainID(), copPlan.tablePlan.ExplainID())) + copTasks = append(copTasks, copPlan.tablePlan) + case *PhysicalIndexReader: + pipelines = append(pipelines, fmt.Sprintf("\"%s\" -> \"%s\"\n", copPlan.ExplainID(), copPlan.indexPlan.ExplainID())) + copTasks = append(copTasks, copPlan.indexPlan) + case *PhysicalIndexLookUpReader: + pipelines = append(pipelines, fmt.Sprintf("\"%s\" -> \"%s\"\n", copPlan.ExplainID(), copPlan.tablePlan.ExplainID())) + pipelines = append(pipelines, fmt.Sprintf("\"%s\" -> \"%s\"\n", copPlan.ExplainID(), copPlan.indexPlan.ExplainID())) + copTasks = append(copTasks, copPlan.tablePlan) + copTasks = append(copTasks, copPlan.indexPlan) + } + } + buffer.WriteString("}\n") + + for _, cop := range copTasks { + e.prepareTaskDot(cop.(PhysicalPlan), "cop", buffer) + } + + for i := range pipelines { + buffer.WriteString(pipelines[i]) + } +} diff --git a/planner/core/errors.go b/planner/core/errors.go new file mode 100644 index 0000000..5119d50 --- /dev/null +++ b/planner/core/errors.go @@ -0,0 +1,115 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" +) + +// error definitions. +var ( + ErrUnsupportedType = terror.ClassOptimizer.New(mysql.ErrUnsupportedType, mysql.MySQLErrName[mysql.ErrUnsupportedType]) + ErrAnalyzeMissIndex = terror.ClassOptimizer.New(mysql.ErrAnalyzeMissIndex, mysql.MySQLErrName[mysql.ErrAnalyzeMissIndex]) + ErrWrongParamCount = terror.ClassOptimizer.New(mysql.ErrWrongParamCount, mysql.MySQLErrName[mysql.ErrWrongParamCount]) + ErrSchemaChanged = terror.ClassOptimizer.New(mysql.ErrSchemaChanged, mysql.MySQLErrName[mysql.ErrSchemaChanged]) + ErrTablenameNotAllowedHere = terror.ClassOptimizer.New(mysql.ErrTablenameNotAllowedHere, mysql.MySQLErrName[mysql.ErrTablenameNotAllowedHere]) + ErrNotSupportedYet = terror.ClassOptimizer.New(mysql.ErrNotSupportedYet, mysql.MySQLErrName[mysql.ErrNotSupportedYet]) + ErrWrongUsage = terror.ClassOptimizer.New(mysql.ErrWrongUsage, mysql.MySQLErrName[mysql.ErrWrongUsage]) + ErrUnknown = terror.ClassOptimizer.New(mysql.ErrUnknown, mysql.MySQLErrName[mysql.ErrUnknown]) + ErrUnknownTable = terror.ClassOptimizer.New(mysql.ErrUnknownTable, mysql.MySQLErrName[mysql.ErrUnknownTable]) + ErrWrongArguments = terror.ClassOptimizer.New(mysql.ErrWrongArguments, mysql.MySQLErrName[mysql.ErrWrongArguments]) + ErrWrongNumberOfColumnsInSelect = terror.ClassOptimizer.New(mysql.ErrWrongNumberOfColumnsInSelect, mysql.MySQLErrName[mysql.ErrWrongNumberOfColumnsInSelect]) + ErrBadGeneratedColumn = terror.ClassOptimizer.New(mysql.ErrBadGeneratedColumn, mysql.MySQLErrName[mysql.ErrBadGeneratedColumn]) + ErrFieldNotInGroupBy = terror.ClassOptimizer.New(mysql.ErrFieldNotInGroupBy, mysql.MySQLErrName[mysql.ErrFieldNotInGroupBy]) + ErrBadTable = terror.ClassOptimizer.New(mysql.ErrBadTable, mysql.MySQLErrName[mysql.ErrBadTable]) + ErrKeyDoesNotExist = terror.ClassOptimizer.New(mysql.ErrKeyDoesNotExist, mysql.MySQLErrName[mysql.ErrKeyDoesNotExist]) + ErrOperandColumns = terror.ClassOptimizer.New(mysql.ErrOperandColumns, mysql.MySQLErrName[mysql.ErrOperandColumns]) + ErrInvalidGroupFuncUse = terror.ClassOptimizer.New(mysql.ErrInvalidGroupFuncUse, mysql.MySQLErrName[mysql.ErrInvalidGroupFuncUse]) + ErrIllegalReference = terror.ClassOptimizer.New(mysql.ErrIllegalReference, mysql.MySQLErrName[mysql.ErrIllegalReference]) + ErrNoDB = terror.ClassOptimizer.New(mysql.ErrNoDB, mysql.MySQLErrName[mysql.ErrNoDB]) + ErrUnknownExplainFormat = terror.ClassOptimizer.New(mysql.ErrUnknownExplainFormat, mysql.MySQLErrName[mysql.ErrUnknownExplainFormat]) + ErrWrongGroupField = terror.ClassOptimizer.New(mysql.ErrWrongGroupField, mysql.MySQLErrName[mysql.ErrWrongGroupField]) + ErrDupFieldName = terror.ClassOptimizer.New(mysql.ErrDupFieldName, mysql.MySQLErrName[mysql.ErrDupFieldName]) + ErrNonUpdatableTable = terror.ClassOptimizer.New(mysql.ErrNonUpdatableTable, mysql.MySQLErrName[mysql.ErrNonUpdatableTable]) + ErrInternal = terror.ClassOptimizer.New(mysql.ErrInternal, mysql.MySQLErrName[mysql.ErrInternal]) + ErrNonUniqTable = terror.ClassOptimizer.New(mysql.ErrNonuniqTable, mysql.MySQLErrName[mysql.ErrNonuniqTable]) + ErrPartitionClauseOnNonpartitioned = terror.ClassOptimizer.New(mysql.ErrPartitionClauseOnNonpartitioned, mysql.MySQLErrName[mysql.ErrPartitionClauseOnNonpartitioned]) + ErrUnsupportedOnGeneratedColumn = terror.ClassOptimizer.New(mysql.ErrUnsupportedOnGeneratedColumn, mysql.MySQLErrName[mysql.ErrUnsupportedOnGeneratedColumn]) + ErrPrivilegeCheckFail = terror.ClassOptimizer.New(mysql.ErrPrivilegeCheckFail, mysql.MySQLErrName[mysql.ErrPrivilegeCheckFail]) + ErrInvalidWildCard = terror.ClassOptimizer.New(mysql.ErrInvalidWildCard, mysql.MySQLErrName[mysql.ErrInvalidWildCard]) + ErrMixOfGroupFuncAndFields = terror.ClassOptimizer.New(mysql.ErrMixOfGroupFuncAndFieldsIncompatible, mysql.MySQLErrName[mysql.ErrMixOfGroupFuncAndFieldsIncompatible]) + ErrDBaccessDenied = terror.ClassOptimizer.New(mysql.ErrDBaccessDenied, mysql.MySQLErrName[mysql.ErrDBaccessDenied]) + ErrTableaccessDenied = terror.ClassOptimizer.New(mysql.ErrTableaccessDenied, mysql.MySQLErrName[mysql.ErrTableaccessDenied]) + ErrSpecificAccessDenied = terror.ClassOptimizer.New(mysql.ErrSpecificAccessDenied, mysql.MySQLErrName[mysql.ErrSpecificAccessDenied]) + ErrViewNoExplain = terror.ClassOptimizer.New(mysql.ErrViewNoExplain, mysql.MySQLErrName[mysql.ErrViewNoExplain]) + ErrWrongValueCountOnRow = terror.ClassOptimizer.New(mysql.ErrWrongValueCountOnRow, mysql.MySQLErrName[mysql.ErrWrongValueCountOnRow]) + ErrViewInvalid = terror.ClassOptimizer.New(mysql.ErrViewInvalid, mysql.MySQLErrName[mysql.ErrViewInvalid]) + ErrNoSuchThread = terror.ClassOptimizer.New(mysql.ErrNoSuchThread, mysql.MySQLErrName[mysql.ErrNoSuchThread]) + ErrUnknownColumn = terror.ClassOptimizer.New(mysql.ErrBadField, mysql.MySQLErrName[mysql.ErrBadField]) + ErrCartesianProductUnsupported = terror.ClassOptimizer.New(mysql.ErrCartesianProductUnsupported, mysql.MySQLErrName[mysql.ErrCartesianProductUnsupported]) + ErrStmtNotFound = terror.ClassOptimizer.New(mysql.ErrPreparedStmtNotFound, mysql.MySQLErrName[mysql.ErrPreparedStmtNotFound]) + ErrAmbiguous = terror.ClassOptimizer.New(mysql.ErrNonUniq, mysql.MySQLErrName[mysql.ErrNonUniq]) + // Since we cannot know if user loggined with a password, use message of ErrAccessDeniedNoPassword instead + ErrAccessDenied = terror.ClassOptimizer.New(mysql.ErrAccessDenied, mysql.MySQLErrName[mysql.ErrAccessDeniedNoPassword]) +) + +func init() { + mysqlErrCodeMap := map[terror.ErrCode]uint16{ + mysql.ErrViewInvalid: mysql.ErrViewInvalid, + mysql.ErrUnknown: mysql.ErrUnknown, + mysql.ErrTablenameNotAllowedHere: mysql.ErrTablenameNotAllowedHere, + mysql.ErrUnsupportedType: mysql.ErrUnsupportedType, + mysql.ErrAnalyzeMissIndex: mysql.ErrAnalyzeMissIndex, + mysql.ErrWrongParamCount: mysql.ErrWrongParamCount, + mysql.ErrSchemaChanged: mysql.ErrSchemaChanged, + mysql.ErrNotSupportedYet: mysql.ErrNotSupportedYet, + mysql.ErrWrongUsage: mysql.ErrWrongUsage, + mysql.ErrUnknownTable: mysql.ErrUnknownTable, + mysql.ErrWrongArguments: mysql.ErrWrongArguments, + mysql.ErrBadGeneratedColumn: mysql.ErrBadGeneratedColumn, + mysql.ErrFieldNotInGroupBy: mysql.ErrFieldNotInGroupBy, + mysql.ErrBadTable: mysql.ErrBadTable, + mysql.ErrKeyDoesNotExist: mysql.ErrKeyDoesNotExist, + mysql.ErrOperandColumns: mysql.ErrOperandColumns, + mysql.ErrInvalidGroupFuncUse: mysql.ErrInvalidGroupFuncUse, + mysql.ErrIllegalReference: mysql.ErrIllegalReference, + mysql.ErrNoDB: mysql.ErrNoDB, + mysql.ErrUnknownExplainFormat: mysql.ErrUnknownExplainFormat, + mysql.ErrWrongGroupField: mysql.ErrWrongGroupField, + mysql.ErrDupFieldName: mysql.ErrDupFieldName, + mysql.ErrNonUpdatableTable: mysql.ErrNonUpdatableTable, + mysql.ErrInternal: mysql.ErrInternal, + mysql.ErrMixOfGroupFuncAndFieldsIncompatible: mysql.ErrMixOfGroupFuncAndFieldsIncompatible, + mysql.ErrWrongNumberOfColumnsInSelect: mysql.ErrWrongNumberOfColumnsInSelect, + mysql.ErrWrongValueCountOnRow: mysql.ErrWrongValueCountOnRow, + mysql.ErrPartitionClauseOnNonpartitioned: mysql.ErrPartitionClauseOnNonpartitioned, + mysql.ErrDBaccessDenied: mysql.ErrDBaccessDenied, + mysql.ErrTableaccessDenied: mysql.ErrTableaccessDenied, + mysql.ErrSpecificAccessDenied: mysql.ErrSpecificAccessDenied, + mysql.ErrViewNoExplain: mysql.ErrViewNoExplain, + mysql.ErrUnsupportedOnGeneratedColumn: mysql.ErrUnsupportedOnGeneratedColumn, + mysql.ErrNoSuchThread: mysql.ErrNoSuchThread, + mysql.ErrAccessDenied: mysql.ErrAccessDenied, + mysql.ErrPrivilegeCheckFail: mysql.ErrPrivilegeCheckFail, + mysql.ErrCartesianProductUnsupported: mysql.ErrCartesianProductUnsupported, + mysql.ErrPreparedStmtNotFound: mysql.ErrPreparedStmtNotFound, + mysql.ErrNonUniq: mysql.ErrNonUniq, + mysql.ErrBadField: mysql.ErrBadField, + mysql.ErrNonuniqTable: mysql.ErrNonuniqTable, + mysql.ErrTooBigPrecision: mysql.ErrTooBigPrecision, + mysql.ErrInvalidWildCard: mysql.ErrInvalidWildCard, + } + terror.ErrClassToMySQLCodes[terror.ClassOptimizer] = mysqlErrCodeMap +} diff --git a/planner/core/errors_test.go b/planner/core/errors_test.go new file mode 100644 index 0000000..1d75b2a --- /dev/null +++ b/planner/core/errors_test.go @@ -0,0 +1,72 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" +) + +type testErrorSuite struct{} + +var _ = Suite(testErrorSuite{}) + +func (s testErrorSuite) TestError(c *C) { + kvErrs := []*terror.Error{ + ErrUnsupportedType, + ErrAnalyzeMissIndex, + ErrWrongParamCount, + ErrSchemaChanged, + ErrTablenameNotAllowedHere, + ErrNotSupportedYet, + ErrWrongUsage, + ErrUnknownTable, + ErrWrongArguments, + ErrWrongNumberOfColumnsInSelect, + ErrBadGeneratedColumn, + ErrFieldNotInGroupBy, + ErrBadTable, + ErrKeyDoesNotExist, + ErrOperandColumns, + ErrInvalidGroupFuncUse, + ErrIllegalReference, + ErrNoDB, + ErrUnknownExplainFormat, + ErrWrongGroupField, + ErrDupFieldName, + ErrNonUpdatableTable, + ErrInternal, + ErrNonUniqTable, + ErrPartitionClauseOnNonpartitioned, + ErrUnsupportedOnGeneratedColumn, + ErrPrivilegeCheckFail, + ErrInvalidWildCard, + ErrMixOfGroupFuncAndFields, + ErrDBaccessDenied, + ErrTableaccessDenied, + ErrSpecificAccessDenied, + ErrViewNoExplain, + ErrWrongValueCountOnRow, + ErrNoSuchThread, + ErrUnknownColumn, + ErrCartesianProductUnsupported, + ErrStmtNotFound, + ErrAmbiguous, + } + for _, err := range kvErrs { + code := err.ToSQLError().Code + c.Assert(code != mysql.ErrUnknown && code == uint16(err.Code()), IsTrue, Commentf("err: %v", err)) + } +} diff --git a/planner/core/exhaust_physical_plans.go b/planner/core/exhaust_physical_plans.go new file mode 100644 index 0000000..af73237 --- /dev/null +++ b/planner/core/exhaust_physical_plans.go @@ -0,0 +1,453 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "math" + + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/planner/property" + "github.com/pingcap/tidb/util/set" +) + +func (p *LogicalUnionScan) exhaustPhysicalPlans(prop *property.PhysicalProperty) []PhysicalPlan { + childProp := prop.Clone() + us := PhysicalUnionScan{ + Conditions: p.conditions, + HandleCol: p.handleCol, + }.Init(p.ctx, p.stats, childProp) + return []PhysicalPlan{us} +} + +func getMaxSortPrefix(sortCols, allCols []*expression.Column) []int { + tmpSchema := expression.NewSchema(allCols...) + sortColOffsets := make([]int, 0, len(sortCols)) + for _, sortCol := range sortCols { + offset := tmpSchema.ColumnIndex(sortCol) + if offset == -1 { + return sortColOffsets + } + sortColOffsets = append(sortColOffsets, offset) + } + return sortColOffsets +} + +func findMaxPrefixLen(candidates [][]*expression.Column, keys []*expression.Column) int { + maxLen := 0 + for _, candidateKeys := range candidates { + matchedLen := 0 + for i := range keys { + if i < len(candidateKeys) && keys[i].Equal(nil, candidateKeys[i]) { + matchedLen++ + } else { + break + } + } + if matchedLen > maxLen { + maxLen = matchedLen + } + } + return maxLen +} + +func (p *LogicalJoin) moveEqualToOtherConditions(offsets []int) []expression.Expression { + // Construct used equal condition set based on the equal condition offsets. + usedEqConds := set.NewIntSet() + for _, eqCondIdx := range offsets { + usedEqConds.Insert(eqCondIdx) + } + + // Construct otherConds, which is composed of the original other conditions + // and the remained unused equal conditions. + numOtherConds := len(p.OtherConditions) + len(p.EqualConditions) - len(offsets) + otherConds := make([]expression.Expression, len(p.OtherConditions), numOtherConds) + copy(otherConds, p.OtherConditions) + for eqCondIdx := range p.EqualConditions { + if !usedEqConds.Exist(eqCondIdx) { + otherConds = append(otherConds, p.EqualConditions[eqCondIdx]) + } + } + + return otherConds +} + +// Only if the input required prop is the prefix fo join keys, we can pass through this property. +func (p *PhysicalMergeJoin) tryToGetChildReqProp(prop *property.PhysicalProperty) ([]*property.PhysicalProperty, bool) { + lProp := property.NewPhysicalProperty(property.RootTaskType, p.LeftJoinKeys, false, math.MaxFloat64, false) + rProp := property.NewPhysicalProperty(property.RootTaskType, p.RightJoinKeys, false, math.MaxFloat64, false) + if !prop.IsEmpty() { + // sort merge join fits the cases of massive ordered data, so desc scan is always expensive. + all, desc := prop.AllSameOrder() + if !all || desc { + return nil, false + } + if !prop.IsPrefix(lProp) && !prop.IsPrefix(rProp) { + return nil, false + } + if prop.IsPrefix(rProp) && p.JoinType == LeftOuterJoin { + return nil, false + } + if prop.IsPrefix(lProp) && p.JoinType == RightOuterJoin { + return nil, false + } + } + + return []*property.PhysicalProperty{lProp, rProp}, true +} + +func (p *LogicalJoin) getMergeJoin(prop *property.PhysicalProperty) []PhysicalPlan { + joins := make([]PhysicalPlan, 0, len(p.leftProperties)+1) + // The leftProperties caches all the possible properties that are provided by its children. + for _, lhsChildProperty := range p.leftProperties { + offsets := getMaxSortPrefix(lhsChildProperty, p.LeftJoinKeys) + if len(offsets) == 0 { + continue + } + + leftKeys := lhsChildProperty[:len(offsets)] + rightKeys := expression.NewSchema(p.RightJoinKeys...).ColumnsByIndices(offsets) + + prefixLen := findMaxPrefixLen(p.rightProperties, rightKeys) + if prefixLen == 0 { + continue + } + + leftKeys = leftKeys[:prefixLen] + rightKeys = rightKeys[:prefixLen] + offsets = offsets[:prefixLen] + baseJoin := basePhysicalJoin{ + JoinType: p.JoinType, + LeftConditions: p.LeftConditions, + RightConditions: p.RightConditions, + DefaultValues: p.DefaultValues, + LeftJoinKeys: leftKeys, + RightJoinKeys: rightKeys, + } + mergeJoin := PhysicalMergeJoin{basePhysicalJoin: baseJoin}.Init(p.ctx, p.stats.ScaleByExpectCnt(prop.ExpectedCnt)) + mergeJoin.SetSchema(p.schema) + mergeJoin.OtherConditions = p.moveEqualToOtherConditions(offsets) + mergeJoin.initCompareFuncs() + if reqProps, ok := mergeJoin.tryToGetChildReqProp(prop); ok { + // Adjust expected count for children nodes. + if prop.ExpectedCnt < p.stats.RowCount { + expCntScale := prop.ExpectedCnt / p.stats.RowCount + reqProps[0].ExpectedCnt = p.children[0].statsInfo().RowCount * expCntScale + reqProps[1].ExpectedCnt = p.children[1].statsInfo().RowCount * expCntScale + } + mergeJoin.childrenReqProps = reqProps + joins = append(joins, mergeJoin) + } + } + // If TiDB_SMJ hint is existed, it should consider enforce merge join, + // because we can't trust lhsChildProperty completely. + if (p.preferJoinType & preferMergeJoin) > 0 { + joins = append(joins, p.getEnforcedMergeJoin(prop)...) + } + + return joins +} + +// Change JoinKeys order, by offsets array +// offsets array is generate by prop check +func getNewJoinKeysByOffsets(oldJoinKeys []*expression.Column, offsets []int) []*expression.Column { + newKeys := make([]*expression.Column, 0, len(oldJoinKeys)) + for _, offset := range offsets { + newKeys = append(newKeys, oldJoinKeys[offset]) + } + for pos, key := range oldJoinKeys { + isExist := false + for _, p := range offsets { + if p == pos { + isExist = true + break + } + } + if !isExist { + newKeys = append(newKeys, key) + } + } + return newKeys +} + +func (p *LogicalJoin) getEnforcedMergeJoin(prop *property.PhysicalProperty) []PhysicalPlan { + // Check whether SMJ can satisfy the required property + offsets := make([]int, 0, len(p.LeftJoinKeys)) + all, desc := prop.AllSameOrder() + if !all { + return nil + } + for _, item := range prop.Items { + isExist := false + for joinKeyPos := 0; joinKeyPos < len(p.LeftJoinKeys); joinKeyPos++ { + var key *expression.Column + if item.Col.Equal(p.ctx, p.LeftJoinKeys[joinKeyPos]) { + key = p.LeftJoinKeys[joinKeyPos] + } + if item.Col.Equal(p.ctx, p.RightJoinKeys[joinKeyPos]) { + key = p.RightJoinKeys[joinKeyPos] + } + if key == nil { + continue + } + for i := 0; i < len(offsets); i++ { + if offsets[i] == joinKeyPos { + isExist = true + break + } + } + if !isExist { + offsets = append(offsets, joinKeyPos) + } + isExist = true + break + } + if !isExist { + return nil + } + } + // Generate the enforced sort merge join + leftKeys := getNewJoinKeysByOffsets(p.LeftJoinKeys, offsets) + rightKeys := getNewJoinKeysByOffsets(p.RightJoinKeys, offsets) + lProp := property.NewPhysicalProperty(property.RootTaskType, leftKeys, desc, math.MaxFloat64, true) + rProp := property.NewPhysicalProperty(property.RootTaskType, rightKeys, desc, math.MaxFloat64, true) + baseJoin := basePhysicalJoin{ + JoinType: p.JoinType, + LeftConditions: p.LeftConditions, + RightConditions: p.RightConditions, + DefaultValues: p.DefaultValues, + LeftJoinKeys: leftKeys, + RightJoinKeys: rightKeys, + OtherConditions: p.OtherConditions, + } + enforcedPhysicalMergeJoin := PhysicalMergeJoin{basePhysicalJoin: baseJoin}.Init(p.ctx, p.stats.ScaleByExpectCnt(prop.ExpectedCnt)) + enforcedPhysicalMergeJoin.SetSchema(p.schema) + enforcedPhysicalMergeJoin.childrenReqProps = []*property.PhysicalProperty{lProp, rProp} + enforcedPhysicalMergeJoin.initCompareFuncs() + return []PhysicalPlan{enforcedPhysicalMergeJoin} +} + +func (p *PhysicalMergeJoin) initCompareFuncs() { + p.CompareFuncs = make([]expression.CompareFunc, 0, len(p.LeftJoinKeys)) + for i := range p.LeftJoinKeys { + p.CompareFuncs = append(p.CompareFuncs, expression.GetCmpFunction(p.LeftJoinKeys[i], p.RightJoinKeys[i])) + } +} + +func (p *LogicalJoin) getHashJoins(prop *property.PhysicalProperty) []PhysicalPlan { + if !prop.IsEmpty() { // hash join doesn't promise any orders + return nil + } + joins := make([]PhysicalPlan, 0, 2) + switch p.JoinType { + case LeftOuterJoin: + joins = append(joins, p.getHashJoin(prop, 1)) + case RightOuterJoin: + joins = append(joins, p.getHashJoin(prop, 0)) + case InnerJoin: + joins = append(joins, p.getHashJoin(prop, 1)) + joins = append(joins, p.getHashJoin(prop, 0)) + } + return joins +} + +func (p *LogicalJoin) getHashJoin(prop *property.PhysicalProperty, innerIdx int) *PhysicalHashJoin { + chReqProps := make([]*property.PhysicalProperty, 2) + chReqProps[innerIdx] = &property.PhysicalProperty{ExpectedCnt: math.MaxFloat64} + chReqProps[1-innerIdx] = &property.PhysicalProperty{ExpectedCnt: math.MaxFloat64} + if prop.ExpectedCnt < p.stats.RowCount { + expCntScale := prop.ExpectedCnt / p.stats.RowCount + chReqProps[1-innerIdx].ExpectedCnt = p.children[1-innerIdx].statsInfo().RowCount * expCntScale + } + hashJoin := NewPhysicalHashJoin(p, innerIdx, p.stats.ScaleByExpectCnt(prop.ExpectedCnt), chReqProps...) + hashJoin.SetSchema(p.schema) + return hashJoin +} + +// LogicalJoin can generates hash join, index join and sort merge join. +// Firstly we check the hint, if hint is figured by user, we force to choose the corresponding physical plan. +// If the hint is not matched, it will get other candidates. +// If the hint is not figured, we will pick all candidates. +func (p *LogicalJoin) exhaustPhysicalPlans(prop *property.PhysicalProperty) []PhysicalPlan { + mergeJoins := p.getMergeJoin(prop) + if (p.preferJoinType & preferMergeJoin) > 0 { + return mergeJoins + } + joins := make([]PhysicalPlan, 0, 3) + joins = append(joins, mergeJoins...) + + hashJoins := p.getHashJoins(prop) + if (p.preferJoinType & preferHashJoin) > 0 { + return hashJoins + } + joins = append(joins, hashJoins...) + return joins +} + +// TryToGetChildProp will check if this sort property can be pushed or not. +// When a sort column will be replaced by scalar function, we refuse it. +// When a sort column will be replaced by a constant, we just remove it. +func (p *LogicalProjection) TryToGetChildProp(prop *property.PhysicalProperty) (*property.PhysicalProperty, bool) { + newProp := &property.PhysicalProperty{TaskTp: property.RootTaskType, ExpectedCnt: prop.ExpectedCnt} + newCols := make([]property.Item, 0, len(prop.Items)) + for _, col := range prop.Items { + idx := p.schema.ColumnIndex(col.Col) + switch expr := p.Exprs[idx].(type) { + case *expression.Column: + newCols = append(newCols, property.Item{Col: expr, Desc: col.Desc}) + case *expression.ScalarFunction: + return nil, false + } + } + newProp.Items = newCols + return newProp, true +} + +func (p *LogicalProjection) exhaustPhysicalPlans(prop *property.PhysicalProperty) []PhysicalPlan { + newProp, ok := p.TryToGetChildProp(prop) + if !ok { + return nil + } + proj := PhysicalProjection{ + Exprs: p.Exprs, + }.Init(p.ctx, p.stats.ScaleByExpectCnt(prop.ExpectedCnt), newProp) + proj.SetSchema(p.schema) + return []PhysicalPlan{proj} +} + +func (lt *LogicalTopN) getPhysTopN() []PhysicalPlan { + ret := make([]PhysicalPlan, 0, 3) + for _, tp := range wholeTaskTypes { + resultProp := &property.PhysicalProperty{TaskTp: tp, ExpectedCnt: math.MaxFloat64} + topN := PhysicalTopN{ + ByItems: lt.ByItems, + Count: lt.Count, + Offset: lt.Offset, + }.Init(lt.ctx, lt.stats, resultProp) + ret = append(ret, topN) + } + return ret +} + +func (lt *LogicalTopN) getPhysLimits() []PhysicalPlan { + prop, canPass := GetPropByOrderByItems(lt.ByItems) + if !canPass { + return nil + } + ret := make([]PhysicalPlan, 0, 3) + for _, tp := range wholeTaskTypes { + resultProp := &property.PhysicalProperty{TaskTp: tp, ExpectedCnt: float64(lt.Count + lt.Offset), Items: prop.Items} + limit := PhysicalLimit{ + Count: lt.Count, + Offset: lt.Offset, + }.Init(lt.ctx, lt.stats, resultProp) + ret = append(ret, limit) + } + return ret +} + +// MatchItems checks if this prop's columns can match by items totally. +func MatchItems(p *property.PhysicalProperty, items []*ByItems) bool { + if len(items) < len(p.Items) { + return false + } + for i, col := range p.Items { + sortItem := items[i] + if sortItem.Desc != col.Desc || !sortItem.Expr.Equal(nil, col.Col) { + return false + } + } + return true +} + +func (lt *LogicalTopN) exhaustPhysicalPlans(prop *property.PhysicalProperty) []PhysicalPlan { + if MatchItems(prop, lt.ByItems) { + return append(lt.getPhysTopN(), lt.getPhysLimits()...) + } + return nil +} + +// exhaustPhysicalPlans is only for implementing interface. DataSource and Dual generate task in `findBestTask` directly. +func (p *baseLogicalPlan) exhaustPhysicalPlans(_ *property.PhysicalProperty) []PhysicalPlan { + panic("baseLogicalPlan.exhaustPhysicalPlans() should never be called.") +} + +func (la *LogicalAggregation) getHashAggs(prop *property.PhysicalProperty) []PhysicalPlan { + if !prop.IsEmpty() { + return nil + } + hashAggs := make([]PhysicalPlan, 0, len(wholeTaskTypes)) + taskTypes := []property.TaskType{property.CopSingleReadTaskType, property.CopDoubleReadTaskType, property.RootTaskType} + for _, taskTp := range taskTypes { + agg := NewPhysicalHashAgg(la, la.stats.ScaleByExpectCnt(prop.ExpectedCnt), &property.PhysicalProperty{ExpectedCnt: math.MaxFloat64, TaskTp: taskTp}) + agg.SetSchema(la.schema.Clone()) + hashAggs = append(hashAggs, agg) + } + return hashAggs +} + +func (la *LogicalAggregation) exhaustPhysicalPlans(prop *property.PhysicalProperty) []PhysicalPlan { + return la.getHashAggs(prop) +} + +func (p *LogicalSelection) exhaustPhysicalPlans(prop *property.PhysicalProperty) []PhysicalPlan { + childProp := prop.Clone() + sel := PhysicalSelection{ + Conditions: p.Conditions, + }.Init(p.ctx, p.stats.ScaleByExpectCnt(prop.ExpectedCnt), childProp) + return []PhysicalPlan{sel} +} + +func (p *LogicalLimit) exhaustPhysicalPlans(prop *property.PhysicalProperty) []PhysicalPlan { + if !prop.IsEmpty() { + return nil + } + ret := make([]PhysicalPlan, 0, len(wholeTaskTypes)) + for _, tp := range wholeTaskTypes { + resultProp := &property.PhysicalProperty{TaskTp: tp, ExpectedCnt: float64(p.Count + p.Offset)} + limit := PhysicalLimit{ + Offset: p.Offset, + Count: p.Count, + }.Init(p.ctx, p.stats, resultProp) + ret = append(ret, limit) + } + return ret +} + +func (ls *LogicalSort) getPhysicalSort(prop *property.PhysicalProperty) *PhysicalSort { + ps := PhysicalSort{ByItems: ls.ByItems}.Init(ls.ctx, ls.stats.ScaleByExpectCnt(prop.ExpectedCnt), &property.PhysicalProperty{ExpectedCnt: math.MaxFloat64}) + return ps +} + +func (ls *LogicalSort) getNominalSort(reqProp *property.PhysicalProperty) *NominalSort { + prop, canPass := GetPropByOrderByItems(ls.ByItems) + if !canPass { + return nil + } + prop.ExpectedCnt = reqProp.ExpectedCnt + ps := NominalSort{}.Init(ls.ctx, prop) + return ps +} + +func (ls *LogicalSort) exhaustPhysicalPlans(prop *property.PhysicalProperty) []PhysicalPlan { + if MatchItems(prop, ls.ByItems) { + ret := make([]PhysicalPlan, 0, 2) + ret = append(ret, ls.getPhysicalSort(prop)) + ns := ls.getNominalSort(prop) + if ns != nil { + ret = append(ret, ns) + } + return ret + } + return nil +} diff --git a/planner/core/explain.go b/planner/core/explain.go new file mode 100644 index 0000000..eea904d --- /dev/null +++ b/planner/core/explain.go @@ -0,0 +1,460 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "bytes" + "fmt" + "strings" + + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/expression/aggregation" + "github.com/pingcap/tidb/statistics" +) + +// ExplainInfo implements Plan interface. +func (p *PhysicalIndexScan) ExplainInfo() string { + return p.explainInfo(false) +} + +func (p *PhysicalIndexScan) explainInfo(normalized bool) string { + buffer := bytes.NewBufferString("") + tblName := p.Table.Name.O + if p.TableAsName != nil && p.TableAsName.O != "" { + tblName = p.TableAsName.O + } + fmt.Fprintf(buffer, "table:%s", tblName) + if len(p.Index.Columns) > 0 { + buffer.WriteString(", index:") + for i, idxCol := range p.Index.Columns { + buffer.WriteString(idxCol.Name.O) + if i+1 < len(p.Index.Columns) { + buffer.WriteString(", ") + } + } + } + if len(p.Ranges) > 0 { + if normalized { + fmt.Fprint(buffer, ", range:[?,?]") + } else { + fmt.Fprint(buffer, ", range:") + for i, idxRange := range p.Ranges { + fmt.Fprint(buffer, idxRange.String()) + if i+1 < len(p.Ranges) { + fmt.Fprint(buffer, ", ") + } + } + } + } + fmt.Fprintf(buffer, ", keep order:%v", p.KeepOrder) + if p.Desc { + buffer.WriteString(", desc") + } + if p.stats.StatsVersion == statistics.PseudoVersion && !normalized { + buffer.WriteString(", stats:pseudo") + } + return buffer.String() +} + +// ExplainNormalizedInfo implements Plan interface. +func (p *PhysicalIndexScan) ExplainNormalizedInfo() string { + return p.explainInfo(true) +} + +// ExplainInfo implements Plan interface. +func (p *PhysicalTableScan) ExplainInfo() string { + return p.explainInfo(false) +} + +// ExplainNormalizedInfo implements Plan interface. +func (p *PhysicalTableScan) ExplainNormalizedInfo() string { + return p.explainInfo(true) +} + +func (p *PhysicalTableScan) explainInfo(normalized bool) string { + buffer := bytes.NewBufferString("") + tblName := p.Table.Name.O + if p.TableAsName != nil && p.TableAsName.O != "" { + tblName = p.TableAsName.O + } + fmt.Fprintf(buffer, "table:%s", tblName) + if p.pkCol != nil { + fmt.Fprintf(buffer, ", pk col:%s", p.pkCol.ExplainInfo()) + } + if len(p.Ranges) > 0 { + if normalized { + fmt.Fprint(buffer, ", range:[?,?]") + } else { + fmt.Fprint(buffer, ", range:") + for i, idxRange := range p.Ranges { + fmt.Fprint(buffer, idxRange.String()) + if i+1 < len(p.Ranges) { + fmt.Fprint(buffer, ", ") + } + } + } + } + fmt.Fprintf(buffer, ", keep order:%v", p.KeepOrder) + if p.Desc { + buffer.WriteString(", desc") + } + if p.stats.StatsVersion == statistics.PseudoVersion && !normalized { + buffer.WriteString(", stats:pseudo") + } + return buffer.String() +} + +// ExplainInfo implements Plan interface. +func (p *PhysicalTableReader) ExplainInfo() string { + return "data:" + p.tablePlan.ExplainID().String() +} + +// ExplainNormalizedInfo implements Plan interface. +func (p *PhysicalTableReader) ExplainNormalizedInfo() string { + return p.ExplainInfo() +} + +// ExplainInfo implements Plan interface. +func (p *PhysicalIndexReader) ExplainInfo() string { + return "index:" + p.indexPlan.ExplainID().String() +} + +// ExplainNormalizedInfo implements Plan interface. +func (p *PhysicalIndexReader) ExplainNormalizedInfo() string { + return p.ExplainInfo() +} + +// ExplainInfo implements Plan interface. +func (p *PhysicalIndexLookUpReader) ExplainInfo() string { + return "" +} + +// ExplainInfo implements Plan interface. +func (p *PhysicalUnionScan) ExplainInfo() string { + return string(expression.SortedExplainExpressionList(p.Conditions)) +} + +// ExplainInfo implements Plan interface. +func (p *PhysicalSelection) ExplainInfo() string { + return string(expression.SortedExplainExpressionList(p.Conditions)) +} + +// ExplainNormalizedInfo implements Plan interface. +func (p *PhysicalSelection) ExplainNormalizedInfo() string { + return string(expression.SortedExplainNormalizedExpressionList(p.Conditions)) +} + +// ExplainInfo implements Plan interface. +func (p *PhysicalProjection) ExplainInfo() string { + return expression.ExplainExpressionList(p.Exprs, p.schema) +} + +// ExplainNormalizedInfo implements Plan interface. +func (p *PhysicalProjection) ExplainNormalizedInfo() string { + return string(expression.SortedExplainNormalizedExpressionList(p.Exprs)) +} + +// ExplainInfo implements Plan interface. +func (p *PhysicalTableDual) ExplainInfo() string { + return fmt.Sprintf("rows:%v", p.RowCount) +} + +// ExplainInfo implements Plan interface. +func (p *PhysicalSort) ExplainInfo() string { + buffer := bytes.NewBufferString("") + return explainByItems(buffer, p.ByItems).String() +} + +// ExplainInfo implements Plan interface. +func (p *PhysicalLimit) ExplainInfo() string { + return fmt.Sprintf("offset:%v, count:%v", p.Offset, p.Count) +} + +// ExplainInfo implements Plan interface. +func (p *basePhysicalAgg) ExplainInfo() string { + return p.explainInfo(false) +} + +func (p *basePhysicalAgg) explainInfo(normalized bool) string { + sortedExplainExpressionList := expression.SortedExplainExpressionList + if normalized { + sortedExplainExpressionList = expression.SortedExplainNormalizedExpressionList + } + + builder := &strings.Builder{} + if len(p.GroupByItems) > 0 { + fmt.Fprintf(builder, "group by:%s, ", + sortedExplainExpressionList(p.GroupByItems)) + } + for i := 0; i < len(p.AggFuncs); i++ { + builder.WriteString("funcs:") + fmt.Fprintf(builder, "%v->%v", aggregation.ExplainAggFunc(p.AggFuncs[i]), p.schema.Columns[i]) + if i+1 < len(p.AggFuncs) { + builder.WriteString(", ") + } + } + return builder.String() +} + +// ExplainNormalizedInfo implements Plan interface. +func (p *basePhysicalAgg) ExplainNormalizedInfo() string { + return p.explainInfo(true) +} + +// ExplainInfo implements Plan interface. +func (p *PhysicalHashJoin) ExplainInfo() string { + return p.explainInfo(false) +} + +// ExplainNormalizedInfo implements Plan interface. +func (p *PhysicalHashJoin) ExplainNormalizedInfo() string { + return p.explainInfo(true) +} + +func (p *PhysicalHashJoin) explainInfo(normalized bool) string { + sortedExplainExpressionList := expression.SortedExplainExpressionList + if normalized { + sortedExplainExpressionList = expression.SortedExplainNormalizedExpressionList + } + + buffer := new(bytes.Buffer) + + if len(p.EqualConditions) == 0 { + buffer.WriteString("CARTESIAN ") + } + + buffer.WriteString(p.JoinType.String()) + if len(p.EqualConditions) > 0 { + if normalized { + fmt.Fprintf(buffer, ", equal:%s", expression.SortedExplainNormalizedScalarFuncList(p.EqualConditions)) + } else { + fmt.Fprintf(buffer, ", equal:%v", p.EqualConditions) + } + } + if len(p.LeftConditions) > 0 { + if normalized { + fmt.Fprintf(buffer, ", left cond:%s", expression.SortedExplainNormalizedExpressionList(p.LeftConditions)) + } else { + fmt.Fprintf(buffer, ", left cond:%s", p.LeftConditions) + } + } + if len(p.RightConditions) > 0 { + fmt.Fprintf(buffer, ", right cond:%s", + sortedExplainExpressionList(p.RightConditions)) + } + if len(p.OtherConditions) > 0 { + fmt.Fprintf(buffer, ", other cond:%s", + sortedExplainExpressionList(p.OtherConditions)) + } + return buffer.String() +} + +// ExplainInfo implements Plan interface. +func (p *PhysicalMergeJoin) ExplainInfo() string { + return p.explainInfo(false) +} + +func (p *PhysicalMergeJoin) explainInfo(normalized bool) string { + sortedExplainExpressionList := expression.SortedExplainExpressionList + if normalized { + sortedExplainExpressionList = expression.SortedExplainNormalizedExpressionList + } + + buffer := bytes.NewBufferString(p.JoinType.String()) + if len(p.LeftJoinKeys) > 0 { + fmt.Fprintf(buffer, ", left key:%s", + expression.ExplainColumnList(p.LeftJoinKeys)) + } + if len(p.RightJoinKeys) > 0 { + fmt.Fprintf(buffer, ", right key:%s", + expression.ExplainColumnList(p.RightJoinKeys)) + } + if len(p.LeftConditions) > 0 { + if normalized { + fmt.Fprintf(buffer, ", left cond:%s", expression.SortedExplainNormalizedExpressionList(p.LeftConditions)) + } else { + fmt.Fprintf(buffer, ", left cond:%s", p.LeftConditions) + } + } + if len(p.RightConditions) > 0 { + fmt.Fprintf(buffer, ", right cond:%s", + sortedExplainExpressionList(p.RightConditions)) + } + if len(p.OtherConditions) > 0 { + fmt.Fprintf(buffer, ", other cond:%s", + sortedExplainExpressionList(p.OtherConditions)) + } + return buffer.String() +} + +// ExplainNormalizedInfo implements Plan interface. +func (p *PhysicalMergeJoin) ExplainNormalizedInfo() string { + return p.explainInfo(true) +} + +// ExplainInfo implements Plan interface. +func (p *PhysicalTopN) ExplainInfo() string { + buffer := bytes.NewBufferString("") + buffer = explainByItems(buffer, p.ByItems) + fmt.Fprintf(buffer, ", offset:%v, count:%v", p.Offset, p.Count) + return buffer.String() +} + +// ExplainInfo implements Plan interface. +func (p *LogicalJoin) ExplainInfo() string { + buffer := bytes.NewBufferString(p.JoinType.String()) + if len(p.EqualConditions) > 0 { + fmt.Fprintf(buffer, ", equal:%v", p.EqualConditions) + } + if len(p.LeftConditions) > 0 { + fmt.Fprintf(buffer, ", left cond:%s", + expression.SortedExplainExpressionList(p.LeftConditions)) + } + if len(p.RightConditions) > 0 { + fmt.Fprintf(buffer, ", right cond:%s", + expression.SortedExplainExpressionList(p.RightConditions)) + } + if len(p.OtherConditions) > 0 { + fmt.Fprintf(buffer, ", other cond:%s", + expression.SortedExplainExpressionList(p.OtherConditions)) + } + return buffer.String() +} + +// ExplainInfo implements Plan interface. +func (p *LogicalAggregation) ExplainInfo() string { + buffer := bytes.NewBufferString("") + if len(p.GroupByItems) > 0 { + fmt.Fprintf(buffer, "group by:%s, ", + expression.SortedExplainExpressionList(p.GroupByItems)) + } + if len(p.AggFuncs) > 0 { + buffer.WriteString("funcs:") + for i, agg := range p.AggFuncs { + buffer.WriteString(aggregation.ExplainAggFunc(agg)) + if i+1 < len(p.AggFuncs) { + buffer.WriteString(", ") + } + } + } + return buffer.String() +} + +// ExplainInfo implements Plan interface. +func (p *LogicalProjection) ExplainInfo() string { + return expression.ExplainExpressionList(p.Exprs, p.schema) +} + +// ExplainInfo implements Plan interface. +func (p *LogicalSelection) ExplainInfo() string { + return string(expression.SortedExplainExpressionList(p.Conditions)) +} + +// ExplainInfo implements Plan interface. +func (p *LogicalTableDual) ExplainInfo() string { + return fmt.Sprintf("rowcount:%d", p.RowCount) +} + +// ExplainInfo implements Plan interface. +func (p *DataSource) ExplainInfo() string { + buffer := bytes.NewBufferString("") + tblName := p.tableInfo.Name.O + if p.TableAsName != nil && p.TableAsName.O != "" { + tblName = p.TableAsName.O + } + fmt.Fprintf(buffer, "table:%s", tblName) + return buffer.String() +} + +// ExplainInfo implements Plan interface. +func (p *LogicalUnionScan) ExplainInfo() string { + buffer := bytes.NewBufferString("") + fmt.Fprintf(buffer, "conds:%s", + expression.SortedExplainExpressionList(p.conditions)) + fmt.Fprintf(buffer, ", handle:%s", p.handleCol.ExplainInfo()) + return buffer.String() +} + +func explainByItems(buffer *bytes.Buffer, byItems []*ByItems) *bytes.Buffer { + for i, item := range byItems { + order := "asc" + if item.Desc { + order = "desc" + } + fmt.Fprintf(buffer, "%s:%s", item.Expr.ExplainInfo(), order) + if i+1 < len(byItems) { + buffer.WriteString(", ") + } + } + return buffer +} + +// ExplainInfo implements Plan interface. +func (p *LogicalSort) ExplainInfo() string { + buffer := bytes.NewBufferString("") + return explainByItems(buffer, p.ByItems).String() +} + +// ExplainInfo implements Plan interface. +func (p *LogicalTopN) ExplainInfo() string { + buffer := bytes.NewBufferString("") + buffer = explainByItems(buffer, p.ByItems) + fmt.Fprintf(buffer, ", offset:%v, count:%v", p.Offset, p.Count) + return buffer.String() +} + +// ExplainInfo implements Plan interface. +func (p *LogicalLimit) ExplainInfo() string { + return fmt.Sprintf("offset:%v, count:%v", p.Offset, p.Count) +} + +// ExplainInfo implements Plan interface. +func (p *LogicalTableScan) ExplainInfo() string { + buffer := bytes.NewBufferString(p.Source.ExplainInfo()) + if p.Source.handleCol != nil { + fmt.Fprintf(buffer, ", pk col:%s", p.Source.handleCol.ExplainInfo()) + } + if len(p.AccessConds) > 0 { + fmt.Fprintf(buffer, ", cond:%v", p.AccessConds) + } + return buffer.String() +} + +// ExplainInfo implements Plan interface. +func (p *LogicalIndexScan) ExplainInfo() string { + buffer := bytes.NewBufferString(p.Source.ExplainInfo()) + index := p.Index + if len(index.Columns) > 0 { + buffer.WriteString(", index:") + for i, idxCol := range index.Columns { + buffer.WriteString(idxCol.Name.O) + if i+1 < len(index.Columns) { + buffer.WriteString(", ") + } + } + } + if len(p.AccessConds) > 0 { + fmt.Fprintf(buffer, ", cond:%v", p.AccessConds) + } + return buffer.String() +} + +// ExplainInfo implements Plan interface. +func (p *TiKVSingleGather) ExplainInfo() string { + buffer := bytes.NewBufferString(p.Source.ExplainInfo()) + if p.IsIndexGather { + buffer.WriteString(", index:" + p.Index.Name.String()) + } + return buffer.String() +} diff --git a/planner/core/expression_rewriter.go b/planner/core/expression_rewriter.go new file mode 100644 index 0000000..c213c77 --- /dev/null +++ b/planner/core/expression_rewriter.go @@ -0,0 +1,696 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "context" + "strings" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/opcode" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/types" + driver "github.com/pingcap/tidb/types/parser_driver" + "github.com/pingcap/tidb/util/chunk" +) + +// evalAstExpr evaluates ast expression directly. +func evalAstExpr(sctx sessionctx.Context, expr ast.ExprNode) (types.Datum, error) { + if val, ok := expr.(*driver.ValueExpr); ok { + return val.Datum, nil + } + var is infoschema.InfoSchema + if sctx.GetSessionVars().TxnCtx.InfoSchema != nil { + is = sctx.GetSessionVars().TxnCtx.InfoSchema.(infoschema.InfoSchema) + } + b := NewPlanBuilder(sctx, is) + fakePlan := LogicalTableDual{}.Init(sctx) + newExpr, _, err := b.rewrite(context.TODO(), expr, fakePlan, nil, true) + if err != nil { + return types.Datum{}, err + } + return newExpr.Eval(chunk.Row{}) +} + +// rewrite function rewrites ast expr to expression.Expression. +// aggMapper maps ast.AggregateFuncExpr to the columns offset in p's output schema. +// asScalar means whether this expression must be treated as a scalar expression. +// And this function returns a result expression, a new plan that may have apply or semi-join. +func (b *PlanBuilder) rewrite(ctx context.Context, exprNode ast.ExprNode, p LogicalPlan, aggMapper map[*ast.AggregateFuncExpr]int, asScalar bool) (expression.Expression, LogicalPlan, error) { + expr, resultPlan, err := b.rewriteWithPreprocess(ctx, exprNode, p, aggMapper, asScalar, nil) + return expr, resultPlan, err +} + +// rewriteWithPreprocess is for handling the situation that we need to adjust the input ast tree +// before really using its node in `expressionRewriter.Leave`. In that case, we first call +// er.preprocess(expr), which returns a new expr. Then we use the new expr in `Leave`. +func (b *PlanBuilder) rewriteWithPreprocess( + ctx context.Context, + exprNode ast.ExprNode, + p LogicalPlan, aggMapper map[*ast.AggregateFuncExpr]int, + asScalar bool, + preprocess func(ast.Node) ast.Node, +) (expression.Expression, LogicalPlan, error) { + b.rewriterCounter++ + defer func() { b.rewriterCounter-- }() + + rewriter := b.getExpressionRewriter(ctx, p) + // The rewriter maybe is obtained from "b.rewriterPool", "rewriter.err" is + // not nil means certain previous procedure has not handled this error. + // Here we give us one more chance to make a correct behavior by handling + // this missed error. + if rewriter.err != nil { + return nil, nil, rewriter.err + } + + rewriter.aggrMap = aggMapper + rewriter.asScalar = asScalar + rewriter.preprocess = preprocess + + expr, resultPlan, err := b.rewriteExprNode(rewriter, exprNode, asScalar) + return expr, resultPlan, err +} + +func (b *PlanBuilder) getExpressionRewriter(ctx context.Context, p LogicalPlan) (rewriter *expressionRewriter) { + defer func() { + if p != nil { + rewriter.schema = p.Schema() + rewriter.names = p.OutputNames() + } + }() + + if len(b.rewriterPool) < b.rewriterCounter { + rewriter = &expressionRewriter{p: p, b: b, sctx: b.ctx, ctx: ctx} + b.rewriterPool = append(b.rewriterPool, rewriter) + return + } + + rewriter = b.rewriterPool[b.rewriterCounter-1] + rewriter.p = p + rewriter.asScalar = false + rewriter.aggrMap = nil + rewriter.preprocess = nil + rewriter.insertPlan = nil + rewriter.ctxStack = rewriter.ctxStack[:0] + rewriter.ctxNameStk = rewriter.ctxNameStk[:0] + rewriter.ctx = ctx + return +} + +func (b *PlanBuilder) rewriteExprNode(rewriter *expressionRewriter, exprNode ast.ExprNode, asScalar bool) (expression.Expression, LogicalPlan, error) { + exprNode.Accept(rewriter) + if rewriter.err != nil { + return nil, nil, errors.Trace(rewriter.err) + } + if !asScalar && len(rewriter.ctxStack) == 0 { + return nil, rewriter.p, nil + } + if len(rewriter.ctxStack) != 1 { + return nil, nil, errors.Errorf("context len %v is invalid", len(rewriter.ctxStack)) + } + rewriter.err = expression.CheckArgsNotMultiColumnRow(rewriter.ctxStack[0]) + if rewriter.err != nil { + return nil, nil, errors.Trace(rewriter.err) + } + return rewriter.ctxStack[0], rewriter.p, nil +} + +type expressionRewriter struct { + ctxStack []expression.Expression + ctxNameStk []*types.FieldName + p LogicalPlan + schema *expression.Schema + names []*types.FieldName + err error + aggrMap map[*ast.AggregateFuncExpr]int + b *PlanBuilder + sctx sessionctx.Context + ctx context.Context + + // asScalar indicates the return value must be a scalar value. + // NOTE: This value can be changed during expression rewritten. + asScalar bool + + // preprocess is called for every ast.Node in Leave. + preprocess func(ast.Node) ast.Node + + // insertPlan is only used to rewrite the expressions inside the assignment + // of the "INSERT" statement. + insertPlan *Insert +} + +func (er *expressionRewriter) ctxStackLen() int { + return len(er.ctxStack) +} + +func (er *expressionRewriter) ctxStackPop(num int) { + l := er.ctxStackLen() + er.ctxStack = er.ctxStack[:l-num] + er.ctxNameStk = er.ctxNameStk[:l-num] +} + +func (er *expressionRewriter) ctxStackAppend(col expression.Expression, name *types.FieldName) { + er.ctxStack = append(er.ctxStack, col) + er.ctxNameStk = append(er.ctxNameStk, name) +} + +// constructBinaryOpFunction converts binary operator functions +// 1. If op are EQ or NE or NullEQ, constructBinaryOpFunctions converts (a0,a1,a2) op (b0,b1,b2) to (a0 op b0) and (a1 op b1) and (a2 op b2) +// 2. Else constructBinaryOpFunctions converts (a0,a1,a2) op (b0,b1,b2) to +// `IF( a0 NE b0, a0 op b0, +// IF ( isNull(a0 NE b0), Null, +// IF ( a1 NE b1, a1 op b1, +// IF ( isNull(a1 NE b1), Null, a2 op b2))))` +func (er *expressionRewriter) constructBinaryOpFunction(l expression.Expression, r expression.Expression, op string) (expression.Expression, error) { + lLen, rLen := expression.GetRowLen(l), expression.GetRowLen(r) + if lLen == 1 && rLen == 1 { + return er.newFunction(op, types.NewFieldType(mysql.TypeTiny), l, r) + } else if rLen != lLen { + return nil, expression.ErrOperandColumns.GenWithStackByArgs(lLen) + } + switch op { + case ast.EQ, ast.NE: + funcs := make([]expression.Expression, lLen) + for i := 0; i < lLen; i++ { + var err error + funcs[i], err = er.constructBinaryOpFunction(expression.GetFuncArg(l, i), expression.GetFuncArg(r, i), op) + if err != nil { + return nil, err + } + } + if op == ast.NE { + return expression.ComposeDNFCondition(er.sctx, funcs...), nil + } + return expression.ComposeCNFCondition(er.sctx, funcs...), nil + default: + larg0, rarg0 := expression.GetFuncArg(l, 0), expression.GetFuncArg(r, 0) + var expr1, expr2, expr3, expr4, expr5 expression.Expression + expr1 = expression.NewFunctionInternal(er.sctx, ast.NE, types.NewFieldType(mysql.TypeTiny), larg0, rarg0) + expr2 = expression.NewFunctionInternal(er.sctx, op, types.NewFieldType(mysql.TypeTiny), larg0, rarg0) + expr3 = expression.NewFunctionInternal(er.sctx, ast.IsNull, types.NewFieldType(mysql.TypeTiny), expr1) + var err error + l, err = expression.PopRowFirstArg(er.sctx, l) + if err != nil { + return nil, err + } + r, err = expression.PopRowFirstArg(er.sctx, r) + if err != nil { + return nil, err + } + expr4, err = er.constructBinaryOpFunction(l, r, op) + if err != nil { + return nil, err + } + expr5, err = er.newFunction(ast.If, types.NewFieldType(mysql.TypeTiny), expr3, expression.Null, expr4) + if err != nil { + return nil, err + } + return er.newFunction(ast.If, types.NewFieldType(mysql.TypeTiny), expr1, expr2, expr5) + } +} + +// Enter implements Visitor interface. +func (er *expressionRewriter) Enter(inNode ast.Node) (ast.Node, bool) { + switch v := inNode.(type) { + case *ast.AggregateFuncExpr: + index, ok := -1, false + if er.aggrMap != nil { + index, ok = er.aggrMap[v] + } + if !ok { + er.err = ErrInvalidGroupFuncUse + return inNode, true + } + er.ctxStackAppend(er.schema.Columns[index], er.names[index]) + return inNode, true + case *ast.ColumnNameExpr: + if index, ok := er.b.colMapper[v]; ok { + er.ctxStackAppend(er.schema.Columns[index], er.names[index]) + return inNode, true + } + case *ast.PatternInExpr: + if len(v.List) != 1 { + break + } + // For 10 in ((select * from t)), the parser won't set v.Sel. + // So we must process this case here. + x := v.List[0] + for { + switch y := x.(type) { + case *ast.ParenthesesExpr: + x = y.Expr + default: + return inNode, false + } + } + case *ast.ParenthesesExpr: + case *ast.ValuesExpr: + schema, names := er.schema, er.names + // NOTE: "er.insertPlan != nil" means that we are rewriting the + // expressions inside the assignment of "INSERT" statement. we have to + // use the "tableSchema" of that "insertPlan". + if er.insertPlan != nil { + schema = er.insertPlan.tableSchema + names = er.insertPlan.tableColNames + } + idx, err := expression.FindFieldName(names, v.Column.Name) + if err != nil { + er.err = err + return inNode, false + } + if idx < 0 { + er.err = ErrUnknownColumn.GenWithStackByArgs(v.Column.Name.OrigColName(), "field list") + return inNode, false + } + col := schema.Columns[idx] + er.ctxStackAppend(expression.NewValuesFunc(er.sctx, col.Index, col.RetType), types.EmptyName) + return inNode, true + case *ast.FuncCallExpr: + default: + er.asScalar = true + } + return inNode, false +} + +// Leave implements Visitor interface. +func (er *expressionRewriter) Leave(originInNode ast.Node) (retNode ast.Node, ok bool) { + if er.err != nil { + return retNode, false + } + var inNode = originInNode + if er.preprocess != nil { + inNode = er.preprocess(inNode) + } + switch v := inNode.(type) { + case *ast.AggregateFuncExpr, *ast.ColumnNameExpr, *ast.ParenthesesExpr, *ast.ValuesExpr: + case *driver.ValueExpr: + value := &expression.Constant{Value: v.Datum, RetType: &v.Type} + er.ctxStackAppend(value, types.EmptyName) + case *ast.VariableExpr: + er.rewriteVariable(v) + case *ast.FuncCallExpr: + er.funcCallToExpression(v) + case *ast.ColumnName: + er.toColumn(v) + case *ast.UnaryOperationExpr: + er.unaryOpToExpression(v) + case *ast.BinaryOperationExpr: + er.binaryOpToExpression(v) + case *ast.BetweenExpr: + er.betweenToExpression(v) + case *ast.RowExpr: + er.rowToScalarFunc(v) + case *ast.PatternInExpr: + er.inToExpression(len(v.List), v.Not, &v.Type) + case *ast.IsNullExpr: + er.isNullToExpression(v) + case *ast.DefaultExpr: + er.evalDefaultExpr(v) + default: + er.err = errors.Errorf("UnknownType: %T", v) + return retNode, false + } + + if er.err != nil { + return retNode, false + } + return originInNode, true +} + +func (er *expressionRewriter) newFunction(funcName string, retType *types.FieldType, args ...expression.Expression) (expression.Expression, error) { + return expression.NewFunction(er.sctx, funcName, retType, args...) +} + +func (er *expressionRewriter) rewriteVariable(v *ast.VariableExpr) { + stkLen := len(er.ctxStack) + name := strings.ToLower(v.Name) + sessionVars := er.b.ctx.GetSessionVars() + if !v.IsSystem { + if v.Value != nil { + er.ctxStack[stkLen-1], er.err = er.newFunction(ast.SetVar, + er.ctxStack[stkLen-1].GetType(), + expression.DatumToConstant(types.NewDatum(name), mysql.TypeString), + er.ctxStack[stkLen-1]) + er.ctxNameStk[stkLen-1] = types.EmptyName + return + } + f, err := er.newFunction(ast.GetVar, + // TODO: Here is wrong, the sessionVars should store a name -> Datum map. Will fix it later. + types.NewFieldType(mysql.TypeString), + expression.DatumToConstant(types.NewStringDatum(name), mysql.TypeString)) + if err != nil { + er.err = err + return + } + er.ctxStackAppend(f, types.EmptyName) + return + } + var val string + var err error + if v.ExplicitScope { + err = variable.ValidateGetSystemVar(name, v.IsGlobal) + if err != nil { + er.err = err + return + } + } + sysVar := variable.SysVars[name] + if sysVar == nil { + er.err = variable.ErrUnknownSystemVar.GenWithStackByArgs(name) + return + } + // Variable is @@gobal.variable_name or variable is only global scope variable. + if v.IsGlobal || sysVar.Scope == variable.ScopeGlobal { + val, err = variable.GetGlobalSystemVar(sessionVars, name) + } else { + val, err = variable.GetSessionSystemVar(sessionVars, name) + } + if err != nil { + er.err = err + return + } + e := expression.DatumToConstant(types.NewStringDatum(val), mysql.TypeVarString) + e.GetType().Charset, _ = er.sctx.GetSessionVars().GetSystemVar(variable.CharacterSetConnection) + e.GetType().Collate, _ = er.sctx.GetSessionVars().GetSystemVar(variable.CollationConnection) + er.ctxStackAppend(e, types.EmptyName) +} + +func (er *expressionRewriter) unaryOpToExpression(v *ast.UnaryOperationExpr) { + stkLen := len(er.ctxStack) + var op string + switch v.Op { + case opcode.Plus: + // expression (+ a) is equal to a + return + case opcode.Minus: + op = ast.UnaryMinus + case opcode.Not: + op = ast.UnaryNot + default: + er.err = errors.Errorf("Unknown Unary Op %T", v.Op) + return + } + if expression.GetRowLen(er.ctxStack[stkLen-1]) != 1 { + er.err = expression.ErrOperandColumns.GenWithStackByArgs(1) + return + } + er.ctxStack[stkLen-1], er.err = er.newFunction(op, &v.Type, er.ctxStack[stkLen-1]) + er.ctxNameStk[stkLen-1] = types.EmptyName +} + +func (er *expressionRewriter) binaryOpToExpression(v *ast.BinaryOperationExpr) { + stkLen := len(er.ctxStack) + var function expression.Expression + switch v.Op { + case opcode.EQ, opcode.NE, opcode.NullEQ, opcode.GT, opcode.GE, opcode.LT, opcode.LE: + function, er.err = er.constructBinaryOpFunction(er.ctxStack[stkLen-2], er.ctxStack[stkLen-1], + v.Op.String()) + default: + lLen := expression.GetRowLen(er.ctxStack[stkLen-2]) + rLen := expression.GetRowLen(er.ctxStack[stkLen-1]) + if lLen != 1 || rLen != 1 { + er.err = expression.ErrOperandColumns.GenWithStackByArgs(1) + return + } + function, er.err = er.newFunction(v.Op.String(), types.NewFieldType(mysql.TypeUnspecified), er.ctxStack[stkLen-2:]...) + } + if er.err != nil { + return + } + er.ctxStackPop(2) + er.ctxStackAppend(function, types.EmptyName) +} + +func (er *expressionRewriter) notToExpression(hasNot bool, op string, tp *types.FieldType, + args ...expression.Expression) expression.Expression { + opFunc, err := er.newFunction(op, tp, args...) + if err != nil { + er.err = err + return nil + } + if !hasNot { + return opFunc + } + + opFunc, err = er.newFunction(ast.UnaryNot, tp, opFunc) + if err != nil { + er.err = err + return nil + } + return opFunc +} + +func (er *expressionRewriter) isNullToExpression(v *ast.IsNullExpr) { + stkLen := len(er.ctxStack) + if expression.GetRowLen(er.ctxStack[stkLen-1]) != 1 { + er.err = expression.ErrOperandColumns.GenWithStackByArgs(1) + return + } + function := er.notToExpression(v.Not, ast.IsNull, &v.Type, er.ctxStack[stkLen-1]) + er.ctxStackPop(1) + er.ctxStackAppend(function, types.EmptyName) +} + +// inToExpression converts in expression to a scalar function. The argument lLen means the length of in list. +// The argument not means if the expression is not in. The tp stands for the expression type, which is always bool. +// a in (b, c, d) will be rewritten as `(a = b) or (a = c) or (a = d)`. +func (er *expressionRewriter) inToExpression(lLen int, not bool, tp *types.FieldType) { + stkLen := len(er.ctxStack) + l := expression.GetRowLen(er.ctxStack[stkLen-lLen-1]) + for i := 0; i < lLen; i++ { + if l != expression.GetRowLen(er.ctxStack[stkLen-lLen+i]) { + er.err = expression.ErrOperandColumns.GenWithStackByArgs(l) + return + } + } + args := er.ctxStack[stkLen-lLen-1:] + leftFt := args[0].GetType() + leftEt, leftIsNull := leftFt.EvalType(), leftFt.Tp == mysql.TypeNull + if leftIsNull { + er.ctxStackPop(lLen + 1) + er.ctxStackAppend(expression.Null.Clone(), types.EmptyName) + return + } + allSameType := true + for _, arg := range args[1:] { + if arg.GetType().Tp != mysql.TypeNull && expression.GetAccurateCmpType(args[0], arg) != leftEt { + allSameType = false + break + } + } + var function expression.Expression + if allSameType && l == 1 && lLen > 1 { + function = er.notToExpression(not, ast.In, tp, er.ctxStack[stkLen-lLen-1:]...) + } else { + eqFunctions := make([]expression.Expression, 0, lLen) + for i := stkLen - lLen; i < stkLen; i++ { + expr, err := er.constructBinaryOpFunction(args[0], er.ctxStack[i], ast.EQ) + if err != nil { + er.err = err + return + } + eqFunctions = append(eqFunctions, expr) + } + function = expression.ComposeDNFCondition(er.sctx, eqFunctions...) + if not { + var err error + function, err = er.newFunction(ast.UnaryNot, tp, function) + if err != nil { + er.err = err + return + } + } + } + er.ctxStackPop(lLen + 1) + er.ctxStackAppend(function, types.EmptyName) +} + +func (er *expressionRewriter) rowToScalarFunc(v *ast.RowExpr) { + stkLen := len(er.ctxStack) + length := len(v.Values) + rows := make([]expression.Expression, 0, length) + for i := stkLen - length; i < stkLen; i++ { + rows = append(rows, er.ctxStack[i]) + } + er.ctxStackPop(length) + function, err := er.newFunction(ast.RowFunc, rows[0].GetType(), rows...) + if err != nil { + er.err = err + return + } + er.ctxStackAppend(function, types.EmptyName) +} + +func (er *expressionRewriter) betweenToExpression(v *ast.BetweenExpr) { + stkLen := len(er.ctxStack) + er.err = expression.CheckArgsNotMultiColumnRow(er.ctxStack[stkLen-3:]...) + if er.err != nil { + return + } + + expr, lexp, rexp := er.ctxStack[stkLen-3], er.ctxStack[stkLen-2], er.ctxStack[stkLen-1] + + var op string + var l, r expression.Expression + l, er.err = er.newFunction(ast.GE, &v.Type, expr, lexp) + if er.err == nil { + r, er.err = er.newFunction(ast.LE, &v.Type, expr, rexp) + } + op = ast.LogicAnd + if er.err != nil { + return + } + function, err := er.newFunction(op, &v.Type, l, r) + if err != nil { + er.err = err + return + } + if v.Not { + function, err = er.newFunction(ast.UnaryNot, &v.Type, function) + if err != nil { + er.err = err + return + } + } + er.ctxStackPop(3) + er.ctxStackAppend(function, types.EmptyName) +} + +// rewriteFuncCall handles a FuncCallExpr and generates a customized function. +// It should return true if for the given FuncCallExpr a rewrite is performed so that original behavior is skipped. +// Otherwise it should return false to indicate (the caller) that original behavior needs to be performed. +func (er *expressionRewriter) rewriteFuncCall(v *ast.FuncCallExpr) bool { + switch v.FnName.L { + // when column is not null, ifnull on such column is not necessary. + case ast.Ifnull: + if len(v.Args) != 2 { + er.err = expression.ErrIncorrectParameterCount.GenWithStackByArgs(v.FnName.O) + return true + } + stackLen := len(er.ctxStack) + arg1 := er.ctxStack[stackLen-2] + col, isColumn := arg1.(*expression.Column) + // if expr1 is a column and column has not null flag, then we can eliminate ifnull on + // this column. + if isColumn && mysql.HasNotNullFlag(col.RetType.Flag) { + name := er.ctxNameStk[stackLen-2] + newCol := col.Clone().(*expression.Column) + er.ctxStackPop(len(v.Args)) + er.ctxStackAppend(newCol, name) + return true + } + + return false + default: + return false + } +} + +func (er *expressionRewriter) funcCallToExpression(v *ast.FuncCallExpr) { + stackLen := len(er.ctxStack) + args := er.ctxStack[stackLen-len(v.Args):] + er.err = expression.CheckArgsNotMultiColumnRow(args...) + if er.err != nil { + return + } + + if er.rewriteFuncCall(v) { + return + } + + var function expression.Expression + er.ctxStackPop(len(v.Args)) + function, er.err = er.newFunction(v.FnName.L, &v.Type, args...) + er.ctxStackAppend(function, types.EmptyName) +} + +func (er *expressionRewriter) toColumn(v *ast.ColumnName) { + idx, err := expression.FindFieldName(er.names, v) + if err != nil { + er.err = ErrAmbiguous.GenWithStackByArgs(v.Name, clauseMsg[fieldList]) + return + } + if idx >= 0 { + column := er.schema.Columns[idx] + er.ctxStackAppend(column, er.names[idx]) + return + } + if er.b.curClause == globalOrderByClause { + er.b.curClause = orderByClause + } + er.err = ErrUnknownColumn.GenWithStackByArgs(v.String(), clauseMsg[er.b.curClause]) +} + +func (er *expressionRewriter) evalDefaultExpr(v *ast.DefaultExpr) { + stkLen := len(er.ctxStack) + name := er.ctxNameStk[stkLen-1] + switch er.ctxStack[stkLen-1].(type) { + case *expression.Column: + default: + idx, err := expression.FindFieldName(er.names, v.Name) + if err != nil { + er.err = err + return + } + if er.err != nil { + return + } + if idx < 0 { + er.err = ErrUnknownColumn.GenWithStackByArgs(v.Name.OrigColName(), "field_list") + return + } + } + dbName := name.DBName + if dbName.O == "" { + // if database name is not specified, use current database name + dbName = model.NewCIStr(er.sctx.GetSessionVars().CurrentDB) + } + if name.OrigTblName.O == "" { + // column is evaluated by some expressions, for example: + // `select default(c) from (select (a+1) as c from t) as t0` + // in such case, a 'no default' error is returned + er.err = table.ErrNoDefaultValue.GenWithStackByArgs(name.ColName) + return + } + var tbl table.Table + tbl, er.err = er.b.is.TableByName(dbName, name.OrigTblName) + if er.err != nil { + return + } + colName := name.OrigColName.O + if colName == "" { + // in some cases, OrigColName is empty, use ColName instead + colName = name.ColName.O + } + col := table.FindCol(tbl.Cols(), colName) + if col == nil { + er.err = ErrUnknownColumn.GenWithStackByArgs(v.Name, "field_list") + return + } + var val *expression.Constant + // for other columns, just use what it is + val, er.err = er.b.getDefaultValue(col) + if er.err != nil { + return + } + er.ctxStackPop(1) + er.ctxStackAppend(val, types.EmptyName) +} diff --git a/planner/core/expression_rewriter_test.go b/planner/core/expression_rewriter_test.go new file mode 100644 index 0000000..bedd132 --- /dev/null +++ b/planner/core/expression_rewriter_test.go @@ -0,0 +1,60 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core_test + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/util/testkit" + "github.com/pingcap/tidb/util/testleak" +) + +var _ = Suite(&testExpressionRewriterSuite{}) + +type testExpressionRewriterSuite struct { +} + +func (s *testExpressionRewriterSuite) TestIfNullEliminateColName(c *C) { + defer testleak.AfterTest(c)() + store, dom, err := newStoreWithBootstrap() + c.Assert(err, IsNil) + tk := testkit.NewTestKit(c, store) + defer func() { + dom.Close() + store.Close() + }() + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int not null, b int not null)") + rs, err := tk.Exec("select ifnull(a,b) from t") + c.Assert(err, IsNil) + fields := rs.Fields() + c.Assert(fields[0].Column.Name.L, Equals, "ifnull(a,b)") +} + +func (s *testExpressionRewriterSuite) TestBinaryOpFunction(c *C) { + defer testleak.AfterTest(c)() + store, dom, err := newStoreWithBootstrap() + c.Assert(err, IsNil) + tk := testkit.NewTestKit(c, store) + defer func() { + dom.Close() + store.Close() + }() + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("CREATE TABLE t(a int, b int, c int);") + tk.MustExec("INSERT INTO t VALUES (1, 2, 3), (NULL, 2, 3 ), (1, NULL, 3),(1, 2, NULL),(NULL, 2, 3+1), (1, NULL, 3+1), (1, 2+1, NULL),(NULL, 2, 3-1), (1, NULL, 3-1), (1, 2-1, NULL)") + tk.MustQuery("SELECT * FROM t WHERE (a,b,c) <= (1,2,3) order by b").Check(testkit.Rows("1 1 ", "1 2 3")) + tk.MustQuery("SELECT * FROM t WHERE (a,b,c) > (1,2,3) order by b").Check(testkit.Rows("1 3 ")) +} diff --git a/planner/core/expression_test.go b/planner/core/expression_test.go new file mode 100644 index 0000000..7be9753 --- /dev/null +++ b/planner/core/expression_test.go @@ -0,0 +1,184 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "fmt" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/util/mock" + "github.com/pingcap/tidb/util/testleak" +) + +var _ = Suite(&testExpressionSuite{}) + +type testExpressionSuite struct { + *parser.Parser + ctx sessionctx.Context +} + +func (s *testExpressionSuite) SetUpSuite(c *C) { + s.Parser = parser.New() + s.ctx = mock.NewContext() +} + +func (s *testExpressionSuite) TearDownSuite(c *C) { +} + +func (s *testExpressionSuite) parseExpr(c *C, expr string) ast.ExprNode { + st, err := s.ParseOneStmt("select "+expr, "", "") + c.Assert(err, IsNil) + stmt := st.(*ast.SelectStmt) + return stmt.Fields.Fields[0].Expr +} + +type testCase struct { + exprStr string + resultStr string +} + +func (s *testExpressionSuite) runTests(c *C, tests []testCase) { + for _, tt := range tests { + expr := s.parseExpr(c, tt.exprStr) + val, err := evalAstExpr(s.ctx, expr) + c.Assert(err, IsNil) + valStr := fmt.Sprintf("%v", val.GetValue()) + c.Assert(valStr, Equals, tt.resultStr, Commentf("for %s", tt.exprStr)) + } +} + +func (s *testExpressionSuite) TestBetween(c *C) { + defer testleak.AfterTest(c)() + tests := []testCase{ + {exprStr: "1 between 2 and 3", resultStr: "0"}, + {exprStr: "1 not between 2 and 3", resultStr: "1"}, + } + s.runTests(c, tests) +} + +func (s *testExpressionSuite) TestPatternIn(c *C) { + defer testleak.AfterTest(c)() + tests := []testCase{ + { + exprStr: "1 not in (1, 2, 3)", + resultStr: "0", + }, + { + exprStr: "1 in (1, 2, 3)", + resultStr: "1", + }, + { + exprStr: "1 in (2, 3)", + resultStr: "0", + }, + { + exprStr: "NULL in (2, 3)", + resultStr: "", + }, + { + exprStr: "NULL not in (2, 3)", + resultStr: "", + }, + { + exprStr: "NULL in (NULL, 3)", + resultStr: "", + }, + { + exprStr: "1 in (1, NULL)", + resultStr: "1", + }, + { + exprStr: "1 in (NULL, 1)", + resultStr: "1", + }, + { + exprStr: "2 in (1, NULL)", + resultStr: "", + }, + { + exprStr: "(-(23)++46/51*+51) in (+23)", + resultStr: "0", + }, + } + s.runTests(c, tests) +} + +func (s *testExpressionSuite) TestIsNull(c *C) { + defer testleak.AfterTest(c)() + tests := []testCase{ + { + exprStr: "1 IS NULL", + resultStr: "0", + }, + { + exprStr: "1 IS NOT NULL", + resultStr: "1", + }, + { + exprStr: "NULL IS NULL", + resultStr: "1", + }, + { + exprStr: "NULL IS NOT NULL", + resultStr: "0", + }, + } + s.runTests(c, tests) +} + +func (s *testExpressionSuite) TestCompareRow(c *C) { + defer testleak.AfterTest(c)() + tests := []testCase{ + { + exprStr: "row(1,2,3)=row(1,2,3)", + resultStr: "1", + }, + { + exprStr: "row(1,2,3)=row(1+3,2,3)", + resultStr: "0", + }, + { + exprStr: "row(1,2,3)<>row(1,2,3)", + resultStr: "0", + }, + { + exprStr: "row(1,2,3)<>row(1+3,2,3)", + resultStr: "1", + }, + { + exprStr: "row(1+3,2,3)<>row(1+3,2,3)", + resultStr: "0", + }, + { + exprStr: "row(1,2,3)", + }, + { + exprStr: "row(1,2,3)=row(0,NULL,3)", + resultStr: "1", + }, + { + exprStr: "row(1,2,3)<=row(2,NULL,3)", + resultStr: "1", + }, + } + s.runTests(c, tests) +} diff --git a/planner/core/find_best_task.go b/planner/core/find_best_task.go new file mode 100644 index 0000000..c2062a3 --- /dev/null +++ b/planner/core/find_best_task.go @@ -0,0 +1,705 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "math" + + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/planner/property" + "github.com/pingcap/tidb/planner/util" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "golang.org/x/tools/container/intsets" +) + +const ( + selectionFactor = 0.8 + distinctFactor = 0.8 +) + +var aggFuncFactor = map[string]float64{ + ast.AggFuncCount: 1.0, + ast.AggFuncSum: 1.0, + ast.AggFuncAvg: 2.0, + ast.AggFuncFirstRow: 0.1, + ast.AggFuncMax: 1.0, + ast.AggFuncMin: 1.0, + "default": 1.5, +} + +// wholeTaskTypes records all possible kinds of task that a plan can return. For Agg, TopN and Limit, we will try to get +// these tasks one by one. +var wholeTaskTypes = [...]property.TaskType{property.CopSingleReadTaskType, property.CopDoubleReadTaskType, property.RootTaskType} + +var invalidTask = &rootTask{cst: math.MaxFloat64} + +// GetPropByOrderByItems will check if this sort property can be pushed or not. In order to simplify the problem, we only +// consider the case that all expression are columns. +func GetPropByOrderByItems(items []*ByItems) (*property.PhysicalProperty, bool) { + propItems := make([]property.Item, 0, len(items)) + for _, item := range items { + col, ok := item.Expr.(*expression.Column) + if !ok { + return nil, false + } + propItems = append(propItems, property.Item{Col: col, Desc: item.Desc}) + } + return &property.PhysicalProperty{Items: propItems}, true +} + +func (p *LogicalTableDual) findBestTask(prop *property.PhysicalProperty) (task, error) { + if !prop.IsEmpty() { + return invalidTask, nil + } + dual := PhysicalTableDual{ + RowCount: p.RowCount, + }.Init(p.ctx, p.stats) + dual.SetSchema(p.schema) + return &rootTask{p: dual}, nil +} + +func (p *LogicalShow) findBestTask(prop *property.PhysicalProperty) (task, error) { + if !prop.IsEmpty() { + return invalidTask, nil + } + pShow := PhysicalShow{ShowContents: p.ShowContents}.Init(p.ctx) + pShow.SetSchema(p.schema) + return &rootTask{p: pShow}, nil +} + +func (p *LogicalShowDDLJobs) findBestTask(prop *property.PhysicalProperty) (task, error) { + if !prop.IsEmpty() { + return invalidTask, nil + } + pShow := PhysicalShowDDLJobs{JobNumber: p.JobNumber}.Init(p.ctx) + pShow.SetSchema(p.schema) + return &rootTask{p: pShow}, nil +} + +// findBestTask implements LogicalPlan interface. +func (p *baseLogicalPlan) findBestTask(prop *property.PhysicalProperty) (bestTask task, err error) { + // Look up the task with this prop in the task map. + // It's used to reduce double counting. + bestTask = p.getTask(prop) + if bestTask != nil { + return bestTask, nil + } + + if prop.TaskTp != property.RootTaskType { + // Currently all plan cannot totally push down. + p.storeTask(prop, invalidTask) + return invalidTask, nil + } + + bestTask = invalidTask + childTasks := make([]task, 0, len(p.children)) + + // If prop.enforced is true, cols of prop as parameter in exhaustPhysicalPlans should be nil + // And reset it for enforcing task prop and storing map + oldPropCols := prop.Items + if prop.Enforced { + // First, get the bestTask without enforced prop + prop.Enforced = false + bestTask, err = p.findBestTask(prop) + if err != nil { + return nil, err + } + prop.Enforced = true + // Next, get the bestTask with enforced prop + prop.Items = []property.Item{} + } + physicalPlans := p.self.exhaustPhysicalPlans(prop) + prop.Items = oldPropCols + + for _, pp := range physicalPlans { + // find best child tasks firstly. + childTasks = childTasks[:0] + for i, child := range p.children { + childTask, err := child.findBestTask(pp.GetChildReqProps(i)) + if err != nil { + return nil, err + } + if childTask != nil && childTask.invalid() { + break + } + childTasks = append(childTasks, childTask) + } + + // This check makes sure that there is no invalid child task. + if len(childTasks) != len(p.children) { + continue + } + + // combine best child tasks with parent physical plan. + curTask := pp.attach2Task(childTasks...) + + // enforce curTask property + if prop.Enforced { + curTask = enforceProperty(prop, curTask, p.basePlan.ctx) + } + + // get the most efficient one. + if curTask.cost() < bestTask.cost() { + bestTask = curTask + } + } + + p.storeTask(prop, bestTask) + return bestTask, nil +} + +func (p *LogicalMemTable) findBestTask(prop *property.PhysicalProperty) (t task, err error) { + if !prop.IsEmpty() { + return invalidTask, nil + } + memTable := PhysicalMemTable{ + DBName: p.dbName, + Table: p.tableInfo, + Columns: p.tableInfo.Columns, + }.Init(p.ctx, p.stats) + memTable.SetSchema(p.schema) + return &rootTask{p: memTable}, nil +} + +// tryToGetDualTask will check if the push down predicate has false constant. If so, it will return table dual. +func (ds *DataSource) tryToGetDualTask() (task, error) { + for _, cond := range ds.pushedDownConds { + if _, ok := cond.(*expression.Constant); ok { + result, _, err := expression.EvalBool(ds.ctx, []expression.Expression{cond}, chunk.Row{}) + if err != nil { + return nil, err + } + if !result { + dual := PhysicalTableDual{}.Init(ds.ctx, ds.stats) + dual.SetSchema(ds.schema) + return &rootTask{ + p: dual, + }, nil + } + } + } + return nil, nil +} + +// candidatePath is used to maintain required info for skyline pruning. +type candidatePath struct { + path *util.AccessPath + columnSet *intsets.Sparse // columnSet is the set of columns that occurred in the access conditions. + isSingleScan bool + isMatchProp bool +} + +// compareCandidates is the core of skyline pruning. It compares the two candidate paths on three dimensions: +// (1): the set of columns that occurred in the access condition, +// (2): whether or not it matches the physical property +// (3): does it require a double scan. +// If `x` is not worse than `y` at all factors, +// and there exists one factor that `x` is better than `y`, then `x` is better than `y`. +func compareCandidates(lhs, rhs *candidatePath) int { + // TODO: implement the content according to the header comment. + return 0 +} + +func (ds *DataSource) getTableCandidate(path *util.AccessPath, prop *property.PhysicalProperty) *candidatePath { + candidate := &candidatePath{path: path} + pkCol := ds.getPKIsHandleCol() + if len(prop.Items) == 1 && pkCol != nil { + candidate.isMatchProp = prop.Items[0].Col.Equal(nil, pkCol) + } + candidate.columnSet = expression.ExtractColumnSet(path.AccessConds) + candidate.isSingleScan = true + return candidate +} + +func (ds *DataSource) getIndexCandidate(path *util.AccessPath, prop *property.PhysicalProperty, isSingleScan bool) *candidatePath { + candidate := &candidatePath{path: path} + all, _ := prop.AllSameOrder() + // When the prop is empty or `all` is false, `isMatchProp` is better to be `false` because + // it needs not to keep order for index scan. + if !prop.IsEmpty() && all { + for i, col := range path.IdxCols { + if col.Equal(nil, prop.Items[0].Col) { + candidate.isMatchProp = matchIndicesProp(path.IdxCols[i:], path.IdxColLens[i:], prop.Items) + break + } else if i >= path.EqCondCount { + break + } + } + } + candidate.columnSet = expression.ExtractColumnSet(path.AccessConds) + candidate.isSingleScan = isSingleScan + return candidate +} + +// skylinePruning prunes access paths according to different factors. An access path can be pruned only if +// there exists a path that is not worse than it at all factors and there is at least one better factor. +func (ds *DataSource) skylinePruning(prop *property.PhysicalProperty) []*candidatePath { + candidates := make([]*candidatePath, 0, 4) + for _, path := range ds.possibleAccessPaths { + // if we already know the range of the scan is empty, just return a TableDual + if len(path.Ranges) == 0 { + return []*candidatePath{{path: path}} + } + var currentCandidate *candidatePath + if path.IsTablePath { + currentCandidate = ds.getTableCandidate(path, prop) + } else { + coveredByIdx := isCoveringIndex(ds.schema.Columns, path.FullIdxCols, path.FullIdxColLens, ds.tableInfo.PKIsHandle) + if len(path.AccessConds) > 0 || !prop.IsEmpty() || path.Forced || coveredByIdx { + // We will use index to generate physical plan if any of the following conditions is satisfied: + // 1. This path's access cond is not nil. + // 2. We have a non-empty prop to match. + // 3. This index is forced to choose. + // 4. The needed columns are all covered by index columns(and handleCol). + currentCandidate = ds.getIndexCandidate(path, prop, coveredByIdx) + } else { + continue + } + } + // TODO: Here is the pruning phase. Will prune the access path which is must worse than others. + // You'll need to implement the content in function `compareCandidates`. + // And use it to prune unnecessary paths. + candidates = append(candidates, currentCandidate) + } + return candidates +} + +// findBestTask implements the PhysicalPlan interface. +// It will enumerate all the available indices and choose a plan with least cost. +func (ds *DataSource) findBestTask(prop *property.PhysicalProperty) (t task, err error) { + t = ds.getTask(prop) + if t != nil { + return + } + // If prop.enforced is true, the prop.cols need to be set nil for ds.findBestTask. + // Before function return, reset it for enforcing task prop and storing map. + oldPropCols := prop.Items + if prop.Enforced { + // First, get the bestTask without enforced prop + prop.Enforced = false + t, err = ds.findBestTask(prop) + if err != nil { + return nil, err + } + prop.Enforced = true + if t != invalidTask { + ds.storeTask(prop, t) + return + } + // Next, get the bestTask with enforced prop + prop.Items = []property.Item{} + } + defer func() { + if err != nil { + return + } + if prop.Enforced { + prop.Items = oldPropCols + t = enforceProperty(prop, t, ds.basePlan.ctx) + } + ds.storeTask(prop, t) + }() + + t, err = ds.tryToGetDualTask() + if err != nil || t != nil { + return t, err + } + + t = invalidTask + candidates := ds.skylinePruning(prop) + + for _, candidate := range candidates { + path := candidate.path + // if we already know the range of the scan is empty, just return a TableDual + if len(path.Ranges) == 0 { + dual := PhysicalTableDual{}.Init(ds.ctx, ds.stats) + dual.SetSchema(ds.schema) + return &rootTask{ + p: dual, + }, nil + } + if path.IsTablePath { + tblTask, err := ds.convertToTableScan(prop, candidate) + if err != nil { + return nil, err + } + if tblTask.cost() < t.cost() { + t = tblTask + } + continue + } + idxTask, err := ds.convertToIndexScan(prop, candidate) + if err != nil { + return nil, err + } + if idxTask.cost() < t.cost() { + t = idxTask + } + } + + return +} + +func isCoveringIndex(columns, indexColumns []*expression.Column, idxColLens []int, pkIsHandle bool) bool { + for _, col := range columns { + if pkIsHandle && mysql.HasPriKeyFlag(col.RetType.Flag) { + continue + } + if col.ID == model.ExtraHandleID { + continue + } + isIndexColumn := false + for i, indexCol := range indexColumns { + isFullLen := idxColLens[i] == types.UnspecifiedLength || idxColLens[i] == col.RetType.Flen + // We use col.OrigColName instead of col.ColName. + // Related issue: https://github.com/pingcap/tidb/issues/9636. + if indexCol != nil && col.Equal(nil, indexCol) && isFullLen { + isIndexColumn = true + break + } + } + if !isIndexColumn { + return false + } + } + return true +} + +// If there is a table reader which needs to keep order, we should append a pk to table scan. +func (ts *PhysicalTableScan) appendExtraHandleCol(ds *DataSource) (*expression.Column, bool) { + handleCol := ds.handleCol + if handleCol != nil { + return handleCol, false + } + handleCol = ds.newExtraHandleSchemaCol() + ts.schema.Append(handleCol) + ts.Columns = append(ts.Columns, model.NewExtraHandleColInfo()) + return handleCol, true +} + +// convertToIndexScan converts the DataSource to index scan with idx. +func (ds *DataSource) convertToIndexScan(prop *property.PhysicalProperty, candidate *candidatePath) (task task, err error) { + if !candidate.isSingleScan { + // If it's parent requires single read task, return max cost. + if prop.TaskTp == property.CopSingleReadTaskType { + return invalidTask, nil + } + } else if prop.TaskTp == property.CopDoubleReadTaskType { + // If it's parent requires double read task, return max cost. + return invalidTask, nil + } + if !prop.IsEmpty() && !candidate.isMatchProp { + return invalidTask, nil + } + path := candidate.path + is, cost, _ := ds.getOriginalPhysicalIndexScan(prop, path, candidate.isMatchProp, candidate.isSingleScan) + cop := &copTask{ + indexPlan: is, + tblColHists: ds.TblColHists, + tblCols: ds.TblCols, + } + if !candidate.isSingleScan { + // On this way, it's double read case. + ts := PhysicalTableScan{ + Columns: ds.Columns, + Table: is.Table, + TableAsName: ds.TableAsName, + }.Init(ds.ctx) + ts.SetSchema(ds.schema.Clone()) + cop.tablePlan = ts + } + cop.cst = cost + task = cop + if candidate.isMatchProp { + if cop.tablePlan != nil { + col, isNew := cop.tablePlan.(*PhysicalTableScan).appendExtraHandleCol(ds) + cop.extraHandleCol = col + cop.doubleReadNeedProj = isNew + } + cop.keepOrder = true + } + // prop.IsEmpty() would always return true when coming to here, + // so we can just use prop.ExpectedCnt as parameter of addPushedDownSelection. + finalStats := ds.stats.ScaleByExpectCnt(prop.ExpectedCnt) + is.addPushedDownSelection(cop, ds, path, finalStats) + if prop.TaskTp == property.RootTaskType { + task = finishCopTask(ds.ctx, task) + } else if _, ok := task.(*rootTask); ok { + return invalidTask, nil + } + return task, nil +} + +func (is *PhysicalIndexScan) indexScanRowSize(idx *model.IndexInfo, ds *DataSource, isForScan bool) float64 { + scanCols := make([]*expression.Column, 0, len(idx.Columns)+1) + // If `initSchema` has already appended the handle column in schema, just use schema columns, otherwise, add extra handle column. + if len(idx.Columns) == len(is.schema.Columns) { + scanCols = append(scanCols, is.schema.Columns...) + handleCol := ds.getPKIsHandleCol() + if handleCol != nil { + scanCols = append(scanCols, handleCol) + } + } else { + scanCols = is.schema.Columns + } + if isForScan { + return ds.TblColHists.GetIndexAvgRowSize(scanCols, is.Index.Unique) + } + return ds.TblColHists.GetAvgRowSize(scanCols, true) +} + +func (is *PhysicalIndexScan) initSchema(idx *model.IndexInfo, idxExprCols []*expression.Column, isDoubleRead bool) { + indexCols := make([]*expression.Column, len(is.IdxCols), len(idx.Columns)+1) + copy(indexCols, is.IdxCols) + for i := len(is.IdxCols); i < len(idx.Columns); i++ { + if idxExprCols[i] != nil { + indexCols = append(indexCols, idxExprCols[i]) + } else { + // TODO: try to reuse the col generated when building the DataSource. + indexCols = append(indexCols, &expression.Column{ + ID: is.Table.Columns[idx.Columns[i].Offset].ID, + RetType: &is.Table.Columns[idx.Columns[i].Offset].FieldType, + UniqueID: is.ctx.GetSessionVars().AllocPlanColumnID(), + }) + } + } + setHandle := len(indexCols) > len(idx.Columns) + if !setHandle { + for i, col := range is.Columns { + if (mysql.HasPriKeyFlag(col.Flag) && is.Table.PKIsHandle) || col.ID == model.ExtraHandleID { + indexCols = append(indexCols, is.dataSourceSchema.Columns[i]) + setHandle = true + break + } + } + } + // If it's double read case, the first index must return handle. So we should add extra handle column + // if there isn't a handle column. + if isDoubleRead && !setHandle { + indexCols = append(indexCols, &expression.Column{ + RetType: types.NewFieldType(mysql.TypeLonglong), + ID: model.ExtraHandleID, + UniqueID: is.ctx.GetSessionVars().AllocPlanColumnID(), + }) + } + is.SetSchema(expression.NewSchema(indexCols...)) +} + +func (is *PhysicalIndexScan) addPushedDownSelection(copTask *copTask, p *DataSource, path *util.AccessPath, finalStats *property.StatsInfo) { + // Add filter condition to table plan now. + indexConds, tableConds := path.IndexFilters, path.TableFilters + + sessVars := is.ctx.GetSessionVars() + if indexConds != nil { + copTask.cst += copTask.count() * sessVars.CopCPUFactor + var selectivity float64 + if path.CountAfterAccess > 0 { + selectivity = path.CountAfterIndex / path.CountAfterAccess + } + count := is.stats.RowCount * selectivity + stats := p.tableStats.ScaleByExpectCnt(count) + indexSel := PhysicalSelection{Conditions: indexConds}.Init(is.ctx, stats) + indexSel.SetChildren(is) + copTask.indexPlan = indexSel + } + if len(tableConds) > 0 { + copTask.finishIndexPlan() + copTask.cst += copTask.count() * sessVars.CopCPUFactor + tableSel := PhysicalSelection{Conditions: tableConds}.Init(is.ctx, finalStats) + tableSel.SetChildren(copTask.tablePlan) + copTask.tablePlan = tableSel + } +} + +func matchIndicesProp(idxCols []*expression.Column, colLens []int, propItems []property.Item) bool { + if len(idxCols) < len(propItems) { + return false + } + for i, item := range propItems { + if colLens[i] != types.UnspecifiedLength || !item.Col.Equal(nil, idxCols[i]) { + return false + } + } + return true +} + +func splitIndexFilterConditions(conditions []expression.Expression, indexColumns []*expression.Column, idxColLens []int, + table *model.TableInfo) (indexConds, tableConds []expression.Expression) { + var indexConditions, tableConditions []expression.Expression + for _, cond := range conditions { + if isCoveringIndex(expression.ExtractColumns(cond), indexColumns, idxColLens, table.PKIsHandle) { + indexConditions = append(indexConditions, cond) + } else { + tableConditions = append(tableConditions, cond) + } + } + return indexConditions, tableConditions +} + +// GetPhysicalScan returns PhysicalTableScan for the LogicalTableScan. +func (s *LogicalTableScan) GetPhysicalScan(schema *expression.Schema, stats *property.StatsInfo) *PhysicalTableScan { + ds := s.Source + ts := PhysicalTableScan{ + Table: ds.tableInfo, + Columns: ds.Columns, + TableAsName: ds.TableAsName, + DBName: ds.DBName, + Ranges: s.Ranges, + AccessCondition: s.AccessConds, + }.Init(s.ctx) + ts.stats = stats + ts.SetSchema(schema.Clone()) + return ts +} + +// GetPhysicalIndexScan returns PhysicalIndexScan for the logical IndexScan. +func (s *LogicalIndexScan) GetPhysicalIndexScan(schema *expression.Schema, stats *property.StatsInfo) *PhysicalIndexScan { + ds := s.Source + is := PhysicalIndexScan{ + Table: ds.tableInfo, + TableAsName: ds.TableAsName, + DBName: ds.DBName, + Columns: s.Columns, + Index: s.Index, + IdxCols: s.IdxCols, + IdxColLens: s.IdxColLens, + AccessCondition: s.AccessConds, + Ranges: s.Ranges, + dataSourceSchema: ds.schema, + }.Init(ds.ctx) + is.stats = stats + is.initSchema(s.Index, s.FullIdxCols, s.IsDoubleRead) + return is +} + +// convertToTableScan converts the DataSource to table scan. +func (ds *DataSource) convertToTableScan(prop *property.PhysicalProperty, candidate *candidatePath) (task task, err error) { + // It will be handled in convertToIndexScan. + if prop.TaskTp == property.CopDoubleReadTaskType { + return invalidTask, nil + } + if !prop.IsEmpty() && !candidate.isMatchProp { + return invalidTask, nil + } + ts, cost, _ := ds.getOriginalPhysicalTableScan(prop, candidate.path, candidate.isMatchProp) + copTask := &copTask{ + tablePlan: ts, + indexPlanFinished: true, + tblColHists: ds.TblColHists, + cst: cost, + } + task = copTask + if candidate.isMatchProp { + copTask.keepOrder = true + } + ts.addPushedDownSelection(copTask, ds.stats.ScaleByExpectCnt(prop.ExpectedCnt)) + if prop.TaskTp == property.RootTaskType { + task = finishCopTask(ds.ctx, task) + } else if _, ok := task.(*rootTask); ok { + return invalidTask, nil + } + return task, nil +} + +func (ts *PhysicalTableScan) addPushedDownSelection(copTask *copTask, stats *property.StatsInfo) { + // Add filter condition to table plan now. + sessVars := ts.ctx.GetSessionVars() + if len(ts.filterCondition) > 0 { + copTask.cst += copTask.count() * sessVars.CopCPUFactor + sel := PhysicalSelection{Conditions: ts.filterCondition}.Init(ts.ctx, stats) + sel.SetChildren(ts) + copTask.tablePlan = sel + } +} + +func (ds *DataSource) getOriginalPhysicalTableScan(prop *property.PhysicalProperty, path *util.AccessPath, isMatchProp bool) (*PhysicalTableScan, float64, float64) { + ts := PhysicalTableScan{ + Table: ds.tableInfo, + Columns: ds.Columns, + TableAsName: ds.TableAsName, + DBName: ds.DBName, + Ranges: path.Ranges, + AccessCondition: path.AccessConds, + filterCondition: path.TableFilters, + }.Init(ds.ctx) + ts.SetSchema(ds.schema.Clone()) + rowCount := path.CountAfterAccess + // Only use expectedCnt when it's smaller than the count we calculated. + // e.g. IndexScan(count1)->After Filter(count2). The `ds.stats.RowCount` is count2. count1 is the one we need to calculate + // If expectedCnt and count2 are both zero and we go into the below `if` block, the count1 will be set to zero though it's shouldn't be. + if (isMatchProp || prop.IsEmpty()) && prop.ExpectedCnt < ds.stats.RowCount { + selectivity := ds.stats.RowCount / rowCount + rowCount = math.Min(prop.ExpectedCnt/selectivity, rowCount) + } + // We need NDV of columns since it may be used in cost estimation of join. Precisely speaking, + // we should track NDV of each histogram bucket, and sum up the NDV of buckets we actually need + // to scan, but this would only help improve accuracy of NDV for one column, for other columns, + // we still need to assume values are uniformly distributed. For simplicity, we use uniform-assumption + // for all columns now, as we do in `deriveStatsByFilter`. + ts.stats = ds.tableStats.ScaleByExpectCnt(rowCount) + rowSize := ds.TblColHists.GetTableAvgRowSize(ds.TblCols) + sessVars := ds.ctx.GetSessionVars() + cost := rowCount * rowSize * sessVars.ScanFactor + if isMatchProp { + if prop.Items[0].Desc { + ts.Desc = true + cost = rowCount * rowSize * sessVars.DescScanFactor + } + ts.KeepOrder = true + } + cost += float64(len(ts.Ranges)) * sessVars.SeekFactor + return ts, cost, rowCount +} + +func (ds *DataSource) getOriginalPhysicalIndexScan(prop *property.PhysicalProperty, path *util.AccessPath, isMatchProp bool, isSingleScan bool) (*PhysicalIndexScan, float64, float64) { + idx := path.Index + is := PhysicalIndexScan{ + Table: ds.tableInfo, + TableAsName: ds.TableAsName, + DBName: ds.DBName, + Columns: ds.Columns, + Index: idx, + IdxCols: path.IdxCols, + IdxColLens: path.IdxColLens, + AccessCondition: path.AccessConds, + Ranges: path.Ranges, + dataSourceSchema: ds.schema, + }.Init(ds.ctx) + rowCount := path.CountAfterAccess + is.initSchema(idx, path.FullIdxCols, !isSingleScan) + // Only use expectedCnt when it's smaller than the count we calculated. + // e.g. IndexScan(count1)->After Filter(count2). The `ds.stats.RowCount` is count2. count1 is the one we need to calculate + // If expectedCnt and count2 are both zero and we go into the below `if` block, the count1 will be set to zero though it's shouldn't be. + if (isMatchProp || prop.IsEmpty()) && prop.ExpectedCnt < ds.stats.RowCount { + selectivity := ds.stats.RowCount / path.CountAfterAccess + rowCount = math.Min(prop.ExpectedCnt/selectivity, rowCount) + } + is.stats = ds.tableStats.ScaleByExpectCnt(rowCount) + rowSize := is.indexScanRowSize(idx, ds, true) + sessVars := ds.ctx.GetSessionVars() + cost := rowCount * rowSize * sessVars.ScanFactor + if isMatchProp { + if prop.Items[0].Desc { + is.Desc = true + cost = rowCount * rowSize * sessVars.DescScanFactor + } + is.KeepOrder = true + } + cost += float64(len(is.Ranges)) * sessVars.SeekFactor + return is, cost, rowCount +} diff --git a/planner/core/initialize.go b/planner/core/initialize.go new file mode 100644 index 0000000..a3a9ce6 --- /dev/null +++ b/planner/core/initialize.go @@ -0,0 +1,356 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "github.com/pingcap/tidb/planner/property" + "github.com/pingcap/tidb/sessionctx" +) + +const ( + // TypeSel is the type of Selection. + TypeSel = "Selection" + // TypeProj is the type of Projection. + TypeProj = "Projection" + // TypeAgg is the type of Aggregation. + TypeAgg = "Aggregation" + // TypeHashAgg is the type of HashAgg. + TypeHashAgg = "HashAgg" + // TypeShow is the type of show. + TypeShow = "Show" + // TypeJoin is the type of Join. + TypeJoin = "Join" + // TypeTableScan is the type of TableScan. + TypeTableScan = "TableScan" + // TypeMemTableScan is the type of TableScan. + TypeMemTableScan = "MemTableScan" + // TypeUnionScan is the type of UnionScan. + TypeUnionScan = "UnionScan" + // TypeIdxScan is the type of IndexScan. + TypeIdxScan = "IndexScan" + // TypeSort is the type of Sort. + TypeSort = "Sort" + // TypeTopN is the type of TopN. + TypeTopN = "TopN" + // TypeLimit is the type of Limit. + TypeLimit = "Limit" + // TypeHashLeftJoin is the type of left hash join. + TypeHashLeftJoin = "HashLeftJoin" + // TypeHashRightJoin is the type of right hash join. + TypeHashRightJoin = "HashRightJoin" + // TypeMergeJoin is the type of merge join. + TypeMergeJoin = "MergeJoin" + // TypeApply is the type of Apply. + TypeApply = "Apply" + // TypeMaxOneRow is the type of MaxOneRow. + TypeMaxOneRow = "MaxOneRow" + // TypeDual is the type of TableDual. + TypeDual = "TableDual" + // TypeInsert is the type of Insert + TypeInsert = "Insert" + // TypeDelete is the type of Delete. + TypeDelete = "Delete" + // TypeIndexLookUp is the type of IndexLookUp. + TypeIndexLookUp = "IndexLookUp" + // TypeTableReader is the type of TableReader. + TypeTableReader = "TableReader" + // TypeIndexReader is the type of IndexReader. + TypeIndexReader = "IndexReader" + // TypeTiKVSingleGather is the type of TiKVSingleGather. + TypeTiKVSingleGather = "TiKVSingleGather" + // TypeShowDDLJobs is the type of show ddl jobs. + TypeShowDDLJobs = "ShowDDLJobs" +) + +// Init initializes LogicalAggregation. +func (la LogicalAggregation) Init(ctx sessionctx.Context) *LogicalAggregation { + la.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeAgg, &la) + return &la +} + +// Init initializes LogicalJoin. +func (p LogicalJoin) Init(ctx sessionctx.Context) *LogicalJoin { + p.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeJoin, &p) + return &p +} + +// Init initializes DataSource. +func (ds DataSource) Init(ctx sessionctx.Context) *DataSource { + ds.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeTableScan, &ds) + return &ds +} + +// Init initializes TiKVSingleGather. +func (sg TiKVSingleGather) Init(ctx sessionctx.Context) *TiKVSingleGather { + sg.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeTiKVSingleGather, &sg) + return &sg +} + +// Init initializes LogicalTableScan. +func (ts LogicalTableScan) Init(ctx sessionctx.Context) *LogicalTableScan { + ts.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeTableScan, &ts) + return &ts +} + +// Init initializes LogicalIndexScan. +func (is LogicalIndexScan) Init(ctx sessionctx.Context) *LogicalIndexScan { + is.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeIdxScan, &is) + return &is +} + +// Init initializes LogicalSelection. +func (p LogicalSelection) Init(ctx sessionctx.Context) *LogicalSelection { + p.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeSel, &p) + return &p +} + +// Init initializes PhysicalSelection. +func (p PhysicalSelection) Init(ctx sessionctx.Context, stats *property.StatsInfo, props ...*property.PhysicalProperty) *PhysicalSelection { + p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeSel, &p) + p.childrenReqProps = props + p.stats = stats + return &p +} + +// Init initializes LogicalUnionScan. +func (p LogicalUnionScan) Init(ctx sessionctx.Context) *LogicalUnionScan { + p.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeUnionScan, &p) + return &p +} + +// Init initializes LogicalProjection. +func (p LogicalProjection) Init(ctx sessionctx.Context) *LogicalProjection { + p.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeProj, &p) + return &p +} + +// Init initializes PhysicalProjection. +func (p PhysicalProjection) Init(ctx sessionctx.Context, stats *property.StatsInfo, props ...*property.PhysicalProperty) *PhysicalProjection { + p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeProj, &p) + p.childrenReqProps = props + p.stats = stats + return &p +} + +// Init initializes LogicalSort. +func (ls LogicalSort) Init(ctx sessionctx.Context) *LogicalSort { + ls.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeSort, &ls) + return &ls +} + +// Init initializes PhysicalSort. +func (p PhysicalSort) Init(ctx sessionctx.Context, stats *property.StatsInfo, props ...*property.PhysicalProperty) *PhysicalSort { + p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeSort, &p) + p.childrenReqProps = props + p.stats = stats + return &p +} + +// Init initializes NominalSort. +func (p NominalSort) Init(ctx sessionctx.Context, props ...*property.PhysicalProperty) *NominalSort { + p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeSort, &p) + p.childrenReqProps = props + return &p +} + +// Init initializes LogicalTopN. +func (lt LogicalTopN) Init(ctx sessionctx.Context) *LogicalTopN { + lt.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeTopN, <) + return < +} + +// Init initializes PhysicalTopN. +func (p PhysicalTopN) Init(ctx sessionctx.Context, stats *property.StatsInfo, props ...*property.PhysicalProperty) *PhysicalTopN { + p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeTopN, &p) + p.childrenReqProps = props + p.stats = stats + return &p +} + +// Init initializes LogicalLimit. +func (p LogicalLimit) Init(ctx sessionctx.Context) *LogicalLimit { + p.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeLimit, &p) + return &p +} + +// Init initializes PhysicalLimit. +func (p PhysicalLimit) Init(ctx sessionctx.Context, stats *property.StatsInfo, props ...*property.PhysicalProperty) *PhysicalLimit { + p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeLimit, &p) + p.childrenReqProps = props + p.stats = stats + return &p +} + +// Init initializes LogicalTableDual. +func (p LogicalTableDual) Init(ctx sessionctx.Context) *LogicalTableDual { + p.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeDual, &p) + return &p +} + +// Init initializes PhysicalTableDual. +func (p PhysicalTableDual) Init(ctx sessionctx.Context, stats *property.StatsInfo) *PhysicalTableDual { + p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeDual, &p) + p.stats = stats + return &p +} + +// Init initializes Delete. +func (p Delete) Init(ctx sessionctx.Context) *Delete { + p.basePlan = newBasePlan(ctx, TypeDelete) + return &p +} + +// Init initializes Insert. +func (p Insert) Init(ctx sessionctx.Context) *Insert { + p.basePlan = newBasePlan(ctx, TypeInsert) + return &p +} + +// Init initializes LogicalShow. +func (p LogicalShow) Init(ctx sessionctx.Context) *LogicalShow { + p.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeShow, &p) + return &p +} + +// Init initializes LogicalShowDDLJobs. +func (p LogicalShowDDLJobs) Init(ctx sessionctx.Context) *LogicalShowDDLJobs { + p.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeShowDDLJobs, &p) + return &p +} + +// Init initializes PhysicalShow. +func (p PhysicalShow) Init(ctx sessionctx.Context) *PhysicalShow { + p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeShow, &p) + // Just use pseudo stats to avoid panic. + p.stats = &property.StatsInfo{RowCount: 1} + return &p +} + +// Init initializes PhysicalShowDDLJobs. +func (p PhysicalShowDDLJobs) Init(ctx sessionctx.Context) *PhysicalShowDDLJobs { + p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeShowDDLJobs, &p) + // Just use pseudo stats to avoid panic. + p.stats = &property.StatsInfo{RowCount: 1} + return &p +} + +// Init initializes PhysicalTableScan. +func (p PhysicalTableScan) Init(ctx sessionctx.Context) *PhysicalTableScan { + p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeTableScan, &p) + return &p +} + +// Init initializes PhysicalIndexScan. +func (p PhysicalIndexScan) Init(ctx sessionctx.Context) *PhysicalIndexScan { + p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeIdxScan, &p) + return &p +} + +// Init initializes LogicalMemTable. +func (p LogicalMemTable) Init(ctx sessionctx.Context) *LogicalMemTable { + p.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeMemTableScan, &p) + return &p +} + +// Init initializes PhysicalMemTable. +func (p PhysicalMemTable) Init(ctx sessionctx.Context, stats *property.StatsInfo) *PhysicalMemTable { + p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeMemTableScan, &p) + p.stats = stats + return &p +} + +// Init initializes PhysicalHashJoin. +func (p PhysicalHashJoin) Init(ctx sessionctx.Context, stats *property.StatsInfo, props ...*property.PhysicalProperty) *PhysicalHashJoin { + tp := TypeHashRightJoin + if p.InnerChildIdx == 1 { + tp = TypeHashLeftJoin + } + p.basePhysicalPlan = newBasePhysicalPlan(ctx, tp, &p) + p.childrenReqProps = props + p.stats = stats + return &p +} + +// Init initializes PhysicalMergeJoin. +func (p PhysicalMergeJoin) Init(ctx sessionctx.Context, stats *property.StatsInfo) *PhysicalMergeJoin { + p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeMergeJoin, &p) + p.stats = stats + return &p +} + +// Init initializes basePhysicalAgg. +func (base basePhysicalAgg) Init(ctx sessionctx.Context, stats *property.StatsInfo) *basePhysicalAgg { + base.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeHashAgg, &base) + base.stats = stats + return &base +} + +func (base basePhysicalAgg) initForHash(ctx sessionctx.Context, stats *property.StatsInfo, props ...*property.PhysicalProperty) *PhysicalHashAgg { + p := &PhysicalHashAgg{base} + p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeHashAgg, p) + p.childrenReqProps = props + p.stats = stats + return p +} + +// Init initializes PhysicalUnionScan. +func (p PhysicalUnionScan) Init(ctx sessionctx.Context, stats *property.StatsInfo, props ...*property.PhysicalProperty) *PhysicalUnionScan { + p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeUnionScan, &p) + p.childrenReqProps = props + p.stats = stats + return &p +} + +// Init initializes PhysicalIndexLookUpReader. +func (p PhysicalIndexLookUpReader) Init(ctx sessionctx.Context) *PhysicalIndexLookUpReader { + p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeIndexLookUp, &p) + p.TablePlans = flattenPushDownPlan(p.tablePlan) + p.IndexPlans = flattenPushDownPlan(p.indexPlan) + p.schema = p.tablePlan.Schema() + return &p +} + +// Init initializes PhysicalTableReader. +func (p PhysicalTableReader) Init(ctx sessionctx.Context) *PhysicalTableReader { + p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeTableReader, &p) + if p.tablePlan != nil { + p.TablePlans = flattenPushDownPlan(p.tablePlan) + p.schema = p.tablePlan.Schema() + } + return &p +} + +// Init initializes PhysicalIndexReader. +func (p PhysicalIndexReader) Init(ctx sessionctx.Context) *PhysicalIndexReader { + p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeIndexReader, &p) + p.SetSchema(nil) + return &p +} + +// flattenPushDownPlan converts a plan tree to a list, whose head is the leaf node like table scan. +func flattenPushDownPlan(p PhysicalPlan) []PhysicalPlan { + plans := make([]PhysicalPlan, 0, 5) + for { + plans = append(plans, p) + if len(p.Children()) == 0 { + break + } + p = p.Children()[0] + } + for i := 0; i < len(plans)/2; i++ { + j := len(plans) - i - 1 + plans[i], plans[j] = plans[j], plans[i] + } + return plans +} diff --git a/planner/core/integration_test.go b/planner/core/integration_test.go new file mode 100644 index 0000000..9980cbf --- /dev/null +++ b/planner/core/integration_test.go @@ -0,0 +1,90 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core_test + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/util/testkit" + "github.com/pingcap/tidb/util/testutil" +) + +var _ = Suite(&testIntegrationSuite{}) + +type testIntegrationSuite struct { + testData testutil.TestData + store kv.Storage + dom *domain.Domain +} + +func (s *testIntegrationSuite) SetUpSuite(c *C) { + var err error + s.testData, err = testutil.LoadTestSuiteData("testdata", "integration_suite") + c.Assert(err, IsNil) +} + +func (s *testIntegrationSuite) TearDownSuite(c *C) { + c.Assert(s.testData.GenerateOutputIfNeeded(), IsNil) +} + +func (s *testIntegrationSuite) SetUpTest(c *C) { + var err error + s.store, s.dom, err = newStoreWithBootstrap() + c.Assert(err, IsNil) +} + +func (s *testIntegrationSuite) TearDownTest(c *C) { + s.dom.Close() + err := s.store.Close() + c.Assert(err, IsNil) +} + +func (s *testIntegrationSuite) TestBitColErrorMessage(c *C) { + tk := testkit.NewTestKit(c, s.store) + + tk.MustExec("use test") + tk.MustExec("drop table if exists bit_col_t") + tk.MustExec("create table bit_col_t (a bit(64))") + tk.MustExec("drop table bit_col_t") + tk.MustExec("create table bit_col_t (a bit(1))") + tk.MustExec("drop table bit_col_t") + tk.MustGetErrCode("create table bit_col_t (a bit(0))", mysql.ErrInvalidFieldSize) + tk.MustGetErrCode("create table bit_col_t (a bit(65))", mysql.ErrTooBigDisplaywidth) +} + +func (s *testIntegrationSuite) TestPushLimitDownIndexLookUpReader(c *C) { + tk := testkit.NewTestKit(c, s.store) + + tk.MustExec("use test") + tk.MustExec("drop table if exists tbl") + tk.MustExec("create table tbl(a int, b int, c int, key idx_b_c(b,c))") + tk.MustExec("insert into tbl values(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5)") + tk.MustExec("analyze table tbl") + + var input []string + var output []struct { + SQL string + Plan []string + } + s.testData.GetTestCases(c, &input, &output) + for i, tt := range input { + s.testData.OnRecord(func() { + output[i].SQL = tt + output[i].Plan = s.testData.ConvertRowsToStrings(tk.MustQuery(tt).Rows()) + }) + tk.MustQuery(tt).Check(testkit.Rows(output[i].Plan...)) + } +} diff --git a/planner/core/logical_plan_builder.go b/planner/core/logical_plan_builder.go new file mode 100644 index 0000000..047ca39 --- /dev/null +++ b/planner/core/logical_plan_builder.go @@ -0,0 +1,1680 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distribute under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "context" + "fmt" + "math" + "math/bits" + "sort" + "strings" + "unicode" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/expression/aggregation" + "github.com/pingcap/tidb/parser" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/opcode" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/statistics" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/types" + driver "github.com/pingcap/tidb/types/parser_driver" + "github.com/pingcap/tidb/util/chunk" +) + +const ( + // TiDBMergeJoin is hint enforce merge join. + TiDBMergeJoin = "tidb_smj" + // HintSMJ is hint enforce merge join. + HintSMJ = "sm_join" + // TiDBHashJoin is hint enforce hash join. + TiDBHashJoin = "tidb_hj" + // HintHJ is hint enforce hash join. + HintHJ = "hash_join" + // HintUseIndex is hint enforce using some indexes. + HintUseIndex = "use_index" + // HintIgnoreIndex is hint enforce ignoring some indexes. + HintIgnoreIndex = "ignore_index" +) + +func (la *LogicalAggregation) collectGroupByColumns() { + la.groupByCols = la.groupByCols[:0] + for _, item := range la.GroupByItems { + if col, ok := item.(*expression.Column); ok { + la.groupByCols = append(la.groupByCols, col) + } + } +} + +func (b *PlanBuilder) buildAggregation(ctx context.Context, p LogicalPlan, aggFuncList []*ast.AggregateFuncExpr, gbyItems []expression.Expression) (LogicalPlan, map[int]int, error) { + b.optFlag = b.optFlag | flagBuildKeyInfo + b.optFlag = b.optFlag | flagPushDownAgg + // We may apply aggregation eliminate optimization. + // So we add the flagMaxMinEliminate to try to convert max/min to topn and flagPushDownTopN to handle the newly added topn operator. + b.optFlag = b.optFlag | flagMaxMinEliminate + b.optFlag = b.optFlag | flagPushDownTopN + // when we eliminate the max and min we may add `is not null` filter. + b.optFlag = b.optFlag | flagPredicatePushDown + b.optFlag = b.optFlag | flagEliminateAgg + b.optFlag = b.optFlag | flagEliminateProjection + + plan4Agg := LogicalAggregation{AggFuncs: make([]*aggregation.AggFuncDesc, 0, len(aggFuncList))}.Init(b.ctx) + schema4Agg := expression.NewSchema(make([]*expression.Column, 0, len(aggFuncList)+p.Schema().Len())...) + names := make(types.NameSlice, 0, len(aggFuncList)+p.Schema().Len()) + // aggIdxMap maps the old index to new index after applying common aggregation functions elimination. + aggIndexMap := make(map[int]int) + + for i, aggFunc := range aggFuncList { + newArgList := make([]expression.Expression, 0, len(aggFunc.Args)) + for _, arg := range aggFunc.Args { + newArg, np, err := b.rewrite(ctx, arg, p, nil, true) + if err != nil { + return nil, nil, err + } + p = np + newArgList = append(newArgList, newArg) + } + newFunc, err := aggregation.NewAggFuncDesc(b.ctx, aggFunc.F, newArgList) + if err != nil { + return nil, nil, err + } + combined := false + for j, oldFunc := range plan4Agg.AggFuncs { + if oldFunc.Equal(b.ctx, newFunc) { + aggIndexMap[i] = j + combined = true + break + } + } + if !combined { + position := len(plan4Agg.AggFuncs) + aggIndexMap[i] = position + plan4Agg.AggFuncs = append(plan4Agg.AggFuncs, newFunc) + schema4Agg.Append(&expression.Column{ + UniqueID: b.ctx.GetSessionVars().AllocPlanColumnID(), + RetType: newFunc.RetTp, + }) + names = append(names, types.EmptyName) + } + } + for i, col := range p.Schema().Columns { + newFunc, err := aggregation.NewAggFuncDesc(b.ctx, ast.AggFuncFirstRow, []expression.Expression{col}) + if err != nil { + return nil, nil, err + } + plan4Agg.AggFuncs = append(plan4Agg.AggFuncs, newFunc) + newCol, _ := col.Clone().(*expression.Column) + newCol.RetType = newFunc.RetTp + schema4Agg.Append(newCol) + names = append(names, p.OutputNames()[i]) + } + plan4Agg.names = names + plan4Agg.SetChildren(p) + plan4Agg.GroupByItems = gbyItems + plan4Agg.SetSchema(schema4Agg) + plan4Agg.collectGroupByColumns() + return plan4Agg, aggIndexMap, nil +} + +func (b *PlanBuilder) buildResultSetNode(ctx context.Context, node ast.ResultSetNode) (p LogicalPlan, err error) { + switch x := node.(type) { + case *ast.Join: + return b.buildJoin(ctx, x) + case *ast.TableSource: + switch v := x.Source.(type) { + case *ast.SelectStmt: + p, err = b.buildSelect(ctx, v) + case *ast.TableName: + p, err = b.buildDataSource(ctx, v, &x.AsName) + default: + err = ErrUnsupportedType.GenWithStackByArgs(v) + } + if err != nil { + return nil, err + } + + for _, name := range p.OutputNames() { + if name.Hidden { + continue + } + if x.AsName.L != "" { + name.TblName = x.AsName + } + } + // Duplicate column name in one table is not allowed. + // "select * from (select 1, 1) as a;" is duplicate + dupNames := make(map[string]struct{}, len(p.Schema().Columns)) + for _, name := range p.OutputNames() { + colName := name.ColName.O + if _, ok := dupNames[colName]; ok { + return nil, ErrDupFieldName.GenWithStackByArgs(colName) + } + dupNames[colName] = struct{}{} + } + return p, nil + case *ast.SelectStmt: + return b.buildSelect(ctx, x) + default: + return nil, ErrUnsupportedType.GenWithStack("Unsupported ast.ResultSetNode(%T) for buildResultSetNode()", x) + } +} + +// pushDownConstExpr checks if the condition is from filter condition, if true, push it down to both +// children of join, whatever the join type is; if false, push it down to inner child of outer join, +// and both children of non-outer-join. +func (p *LogicalJoin) pushDownConstExpr(expr expression.Expression, leftCond []expression.Expression, + rightCond []expression.Expression, filterCond bool) ([]expression.Expression, []expression.Expression) { + switch p.JoinType { + case LeftOuterJoin: + if filterCond { + leftCond = append(leftCond, expr) + // Append the expr to right join condition instead of `rightCond`, to make it able to be + // pushed down to children of join. + p.RightConditions = append(p.RightConditions, expr) + } else { + rightCond = append(rightCond, expr) + } + case RightOuterJoin: + if filterCond { + rightCond = append(rightCond, expr) + p.LeftConditions = append(p.LeftConditions, expr) + } else { + leftCond = append(leftCond, expr) + } + case InnerJoin: + leftCond = append(leftCond, expr) + rightCond = append(rightCond, expr) + } + return leftCond, rightCond +} + +func (p *LogicalJoin) extractOnCondition(conditions []expression.Expression, deriveLeft bool, + deriveRight bool) (eqCond []*expression.ScalarFunction, leftCond []expression.Expression, + rightCond []expression.Expression, otherCond []expression.Expression) { + return p.ExtractOnCondition(conditions, p.children[0].Schema(), p.children[1].Schema(), deriveLeft, deriveRight) +} + +// ExtractOnCondition divide conditions in CNF of join node into 4 groups. +// These conditions can be where conditions, join conditions, or collection of both. +// If deriveLeft/deriveRight is set, we would try to derive more conditions for left/right plan. +func (p *LogicalJoin) ExtractOnCondition( + conditions []expression.Expression, + leftSchema *expression.Schema, + rightSchema *expression.Schema, + deriveLeft bool, + deriveRight bool) (eqCond []*expression.ScalarFunction, leftCond []expression.Expression, + rightCond []expression.Expression, otherCond []expression.Expression) { + for _, expr := range conditions { + binop, ok := expr.(*expression.ScalarFunction) + if ok && len(binop.GetArgs()) == 2 { + ctx := binop.GetCtx() + arg0, lOK := binop.GetArgs()[0].(*expression.Column) + arg1, rOK := binop.GetArgs()[1].(*expression.Column) + if lOK && rOK { + leftCol := leftSchema.RetrieveColumn(arg0) + rightCol := rightSchema.RetrieveColumn(arg1) + if leftCol == nil || rightCol == nil { + leftCol = leftSchema.RetrieveColumn(arg1) + rightCol = rightSchema.RetrieveColumn(arg0) + arg0, arg1 = arg1, arg0 + } + if leftCol != nil && rightCol != nil { + if deriveLeft { + if isNullRejected(ctx, leftSchema, expr) && !mysql.HasNotNullFlag(leftCol.RetType.Flag) { + notNullExpr := expression.BuildNotNullExpr(ctx, leftCol) + leftCond = append(leftCond, notNullExpr) + } + } + if deriveRight { + if isNullRejected(ctx, rightSchema, expr) && !mysql.HasNotNullFlag(rightCol.RetType.Flag) { + notNullExpr := expression.BuildNotNullExpr(ctx, rightCol) + rightCond = append(rightCond, notNullExpr) + } + } + if binop.FuncName.L == ast.EQ { + cond := expression.NewFunctionInternal(ctx, ast.EQ, types.NewFieldType(mysql.TypeTiny), arg0, arg1) + eqCond = append(eqCond, cond.(*expression.ScalarFunction)) + continue + } + } + } + } + columns := expression.ExtractColumns(expr) + // `columns` may be empty, if the condition is like `correlated_column op constant`, or `constant`, + // push this kind of constant condition down according to join type. + if len(columns) == 0 { + leftCond, rightCond = p.pushDownConstExpr(expr, leftCond, rightCond, deriveLeft || deriveRight) + continue + } + allFromLeft, allFromRight := true, true + for _, col := range columns { + if !leftSchema.Contains(col) { + allFromLeft = false + } + if !rightSchema.Contains(col) { + allFromRight = false + } + } + if allFromRight { + rightCond = append(rightCond, expr) + } else if allFromLeft { + leftCond = append(leftCond, expr) + } else { + // Relax expr to two supersets: leftRelaxedCond and rightRelaxedCond, the expression now is + // `expr AND leftRelaxedCond AND rightRelaxedCond`. Motivation is to push filters down to + // children as much as possible. + if deriveLeft { + leftRelaxedCond := expression.DeriveRelaxedFiltersFromDNF(expr, leftSchema) + if leftRelaxedCond != nil { + leftCond = append(leftCond, leftRelaxedCond) + } + } + if deriveRight { + rightRelaxedCond := expression.DeriveRelaxedFiltersFromDNF(expr, rightSchema) + if rightRelaxedCond != nil { + rightCond = append(rightCond, rightRelaxedCond) + } + } + otherCond = append(otherCond, expr) + } + } + return +} + +// extractTableAlias returns table alias of the LogicalPlan's columns. +// It will return nil when there are multiple table alias, because the alias is only used to check if +// the logicalPlan match some optimizer hints, and hints are not expected to take effect in this case. +func extractTableAlias(p Plan) *hintTableInfo { + if len(p.OutputNames()) > 0 && p.OutputNames()[0].TblName.L != "" { + firstName := p.OutputNames()[0] + for _, name := range p.OutputNames() { + if name.TblName.L != firstName.TblName.L || name.DBName.L != firstName.DBName.L { + return nil + } + } + return &hintTableInfo{dbName: firstName.DBName, tblName: firstName.TblName} + } + return nil +} + +func (p *LogicalJoin) setPreferredJoinType(hintInfo *tableHintInfo) { + if hintInfo == nil { + return + } + + lhsAlias := extractTableAlias(p.children[0]) + rhsAlias := extractTableAlias(p.children[1]) + if hintInfo.ifPreferMergeJoin(lhsAlias, rhsAlias) { + p.preferJoinType |= preferMergeJoin + } + if hintInfo.ifPreferHashJoin(lhsAlias, rhsAlias) { + p.preferJoinType |= preferHashJoin + } + + // set hintInfo for further usage if this hint info can be used. + if p.preferJoinType != 0 { + p.hintInfo = hintInfo + } + + if containDifferentJoinTypes(p.preferJoinType) { + errMsg := "Join hints are conflict, you can only specify one type of join" + warning := ErrInternal.GenWithStack(errMsg) + p.ctx.GetSessionVars().StmtCtx.AppendWarning(warning) + } +} + +func resetNotNullFlag(schema *expression.Schema, start, end int) { + for i := start; i < end; i++ { + col := *schema.Columns[i] + newFieldType := *col.RetType + newFieldType.Flag &= ^mysql.NotNullFlag + col.RetType = &newFieldType + schema.Columns[i] = &col + } +} + +func (b *PlanBuilder) buildJoin(ctx context.Context, joinNode *ast.Join) (LogicalPlan, error) { + // We will construct a "Join" node for some statements like "INSERT", + // "DELETE", "UPDATE", "REPLACE". For this scenario "joinNode.Right" is nil + // and we only build the left "ResultSetNode". + if joinNode.Right == nil { + return b.buildResultSetNode(ctx, joinNode.Left) + } + + b.optFlag = b.optFlag | flagPredicatePushDown + + leftPlan, err := b.buildResultSetNode(ctx, joinNode.Left) + if err != nil { + return nil, err + } + + rightPlan, err := b.buildResultSetNode(ctx, joinNode.Right) + if err != nil { + return nil, err + } + + handleMap1 := b.handleHelper.popMap() + handleMap2 := b.handleHelper.popMap() + b.handleHelper.mergeAndPush(handleMap1, handleMap2) + + joinPlan := LogicalJoin{StraightJoin: b.inStraightJoin}.Init(b.ctx) + joinPlan.SetChildren(leftPlan, rightPlan) + joinPlan.SetSchema(expression.MergeSchema(leftPlan.Schema(), rightPlan.Schema())) + joinPlan.names = make([]*types.FieldName, leftPlan.Schema().Len()+rightPlan.Schema().Len()) + copy(joinPlan.names, leftPlan.OutputNames()) + copy(joinPlan.names[leftPlan.Schema().Len():], rightPlan.OutputNames()) + + // Set join type. + switch joinNode.Tp { + case ast.LeftJoin: + // left outer join need to be checked elimination + b.optFlag = b.optFlag | flagEliminateOuterJoin + joinPlan.JoinType = LeftOuterJoin + resetNotNullFlag(joinPlan.schema, leftPlan.Schema().Len(), joinPlan.schema.Len()) + case ast.RightJoin: + // right outer join need to be checked elimination + b.optFlag = b.optFlag | flagEliminateOuterJoin + joinPlan.JoinType = RightOuterJoin + resetNotNullFlag(joinPlan.schema, 0, leftPlan.Schema().Len()) + default: + b.optFlag = b.optFlag | flagJoinReOrder + joinPlan.JoinType = InnerJoin + } + + // Set preferred join algorithm if some join hints is specified by user. + joinPlan.setPreferredJoinType(b.TableHints()) + + if joinNode.On != nil { + b.curClause = onClause + onExpr, newPlan, err := b.rewrite(ctx, joinNode.On.Expr, joinPlan, nil, false) + if err != nil { + return nil, err + } + if newPlan != joinPlan { + return nil, errors.New("ON condition doesn't support subqueries yet") + } + onCondition := expression.SplitCNFItems(onExpr) + joinPlan.attachOnConds(onCondition) + } else if joinPlan.JoinType == InnerJoin { + // If a inner join without "ON" or "USING" clause, it's a cartesian + // product over the join tables. + joinPlan.cartesianJoin = true + } + + return joinPlan, nil +} + +func (b *PlanBuilder) buildSelection(ctx context.Context, p LogicalPlan, where ast.ExprNode, AggMapper map[*ast.AggregateFuncExpr]int) (LogicalPlan, error) { + b.optFlag = b.optFlag | flagPredicatePushDown + if b.curClause != havingClause { + b.curClause = whereClause + } + + conditions := splitWhere(where) + expressions := make([]expression.Expression, 0, len(conditions)) + selection := LogicalSelection{}.Init(b.ctx) + for _, cond := range conditions { + expr, np, err := b.rewrite(ctx, cond, p, AggMapper, false) + if err != nil { + return nil, err + } + p = np + if expr == nil { + continue + } + cnfItems := expression.SplitCNFItems(expr) + for _, item := range cnfItems { + if con, ok := item.(*expression.Constant); ok { + ret, _, err := expression.EvalBool(b.ctx, expression.CNFExprs{con}, chunk.Row{}) + if err != nil || ret { + continue + } + // If there is condition which is always false, return dual plan directly. + dual := LogicalTableDual{}.Init(b.ctx) + dual.names = p.OutputNames() + dual.SetSchema(p.Schema()) + return dual, nil + } + expressions = append(expressions, item) + } + } + if len(expressions) == 0 { + return p, nil + } + selection.Conditions = expressions + selection.SetChildren(p) + return selection, nil +} + +// buildProjectionFieldNameFromColumns builds the field name, table name and database name when field expression is a column reference. +func (b *PlanBuilder) buildProjectionFieldNameFromColumns(origField *ast.SelectField, colNameField *ast.ColumnNameExpr, name *types.FieldName) (colName, origColName, tblName, origTblName, dbName model.CIStr) { + origTblName, origColName, dbName = name.OrigTblName, name.OrigColName, name.DBName + if origField.AsName.L == "" { + colName = colNameField.Name.Name + } else { + colName = origField.AsName + } + if tblName.L == "" { + tblName = name.TblName + } else { + tblName = colNameField.Name.Table + } + return +} + +// buildProjectionFieldNameFromExpressions builds the field name when field expression is a normal expression. +func (b *PlanBuilder) buildProjectionFieldNameFromExpressions(ctx context.Context, field *ast.SelectField) (model.CIStr, error) { + if agg, ok := field.Expr.(*ast.AggregateFuncExpr); ok && agg.F == ast.AggFuncFirstRow { + // When the query is select t.a from t group by a; The Column Name should be a but not t.a; + return agg.Args[0].(*ast.ColumnNameExpr).Name.Name, nil + } + + innerExpr := getInnerFromParenthesesAndUnaryPlus(field.Expr) + valueExpr, isValueExpr := innerExpr.(*driver.ValueExpr) + + // Non-literal: Output as inputed, except that comments need to be removed. + if !isValueExpr { + return model.NewCIStr(parser.SpecFieldPattern.ReplaceAllStringFunc(field.Text(), parser.TrimComment)), nil + } + + // Literal: Need special processing + switch valueExpr.Kind() { + case types.KindString: + projName := valueExpr.GetString() + projOffset := valueExpr.GetProjectionOffset() + if projOffset >= 0 { + projName = projName[:projOffset] + } + // See #3686, #3994: + // For string literals, string content is used as column name. Non-graph initial characters are trimmed. + fieldName := strings.TrimLeftFunc(projName, func(r rune) bool { + return !unicode.IsOneOf(mysql.RangeGraph, r) + }) + return model.NewCIStr(fieldName), nil + case types.KindNull: + // See #4053, #3685 + return model.NewCIStr("NULL"), nil + case types.KindInt64: + // See #9683 + // TRUE or FALSE can be a int64 + if mysql.HasIsBooleanFlag(valueExpr.Type.Flag) { + if i := valueExpr.GetValue().(int64); i == 0 { + return model.NewCIStr("FALSE"), nil + } + return model.NewCIStr("TRUE"), nil + } + fallthrough + + default: + fieldName := field.Text() + fieldName = strings.TrimLeft(fieldName, "\t\n +(") + fieldName = strings.TrimRight(fieldName, "\t\n )") + return model.NewCIStr(fieldName), nil + } +} + +// buildProjectionField builds the field object according to SelectField in projection. +func (b *PlanBuilder) buildProjectionField(ctx context.Context, p LogicalPlan, field *ast.SelectField, expr expression.Expression) (*expression.Column, *types.FieldName, error) { + var origTblName, tblName, origColName, colName, dbName model.CIStr + innerNode := getInnerFromParenthesesAndUnaryPlus(field.Expr) + col, isCol := expr.(*expression.Column) + if colNameField, ok := innerNode.(*ast.ColumnNameExpr); ok && isCol { + // Field is a column reference. + idx := p.Schema().ColumnIndex(col) + name := p.OutputNames()[idx] + colName, origColName, tblName, origTblName, dbName = b.buildProjectionFieldNameFromColumns(field, colNameField, name) + } else if field.AsName.L != "" { + // Field has alias. + colName = field.AsName + } else { + // Other: field is an expression. + var err error + if colName, err = b.buildProjectionFieldNameFromExpressions(ctx, field); err != nil { + return nil, nil, err + } + } + name := &types.FieldName{ + TblName: tblName, + OrigTblName: origTblName, + ColName: colName, + OrigColName: origColName, + DBName: dbName, + } + if isCol { + return col, name, nil + } + newCol := &expression.Column{ + UniqueID: b.ctx.GetSessionVars().AllocPlanColumnID(), + RetType: expr.GetType(), + } + return newCol, name, nil +} + +// buildProjection returns a Projection plan and non-aux columns length. +func (b *PlanBuilder) buildProjection(ctx context.Context, p LogicalPlan, fields []*ast.SelectField, mapper map[*ast.AggregateFuncExpr]int) (LogicalPlan, int, error) { + b.optFlag |= flagEliminateProjection + b.curClause = fieldList + proj := LogicalProjection{Exprs: make([]expression.Expression, 0, len(fields))}.Init(b.ctx) + schema := expression.NewSchema(make([]*expression.Column, 0, len(fields))...) + oldLen := 0 + newNames := make([]*types.FieldName, 0, len(fields)) + for _, field := range fields { + if !field.Auxiliary { + oldLen++ + } + + newExpr, np, err := b.rewriteWithPreprocess(ctx, field.Expr, p, mapper, true, nil) + if err != nil { + return nil, 0, err + } + + p = np + proj.Exprs = append(proj.Exprs, newExpr) + + col, name, err := b.buildProjectionField(ctx, p, field, newExpr) + if err != nil { + return nil, 0, err + } + schema.Append(col) + newNames = append(newNames, name) + } + proj.SetSchema(schema) + proj.SetChildren(p) + proj.names = newNames + return proj, oldLen, nil +} + +func (b *PlanBuilder) buildDistinct(child LogicalPlan, length int) (*LogicalAggregation, error) { + b.optFlag = b.optFlag | flagBuildKeyInfo + b.optFlag = b.optFlag | flagPushDownAgg + plan4Agg := LogicalAggregation{ + AggFuncs: make([]*aggregation.AggFuncDesc, 0, child.Schema().Len()), + GroupByItems: expression.Column2Exprs(child.Schema().Clone().Columns[:length]), + }.Init(b.ctx) + plan4Agg.collectGroupByColumns() + for _, col := range child.Schema().Columns { + aggDesc, err := aggregation.NewAggFuncDesc(b.ctx, ast.AggFuncFirstRow, []expression.Expression{col}) + if err != nil { + return nil, err + } + plan4Agg.AggFuncs = append(plan4Agg.AggFuncs, aggDesc) + } + plan4Agg.SetChildren(child) + plan4Agg.SetSchema(child.Schema().Clone()) + plan4Agg.names = child.OutputNames() + // Distinct will be rewritten as first_row, we reset the type here since the return type + // of first_row is not always the same as the column arg of first_row. + for i, col := range plan4Agg.schema.Columns { + col.RetType = plan4Agg.AggFuncs[i].RetTp + } + return plan4Agg, nil +} + +// ByItems wraps a "by" item. +type ByItems struct { + Expr expression.Expression + Desc bool +} + +// String implements fmt.Stringer interface. +func (by *ByItems) String() string { + if by.Desc { + return fmt.Sprintf("%s true", by.Expr) + } + return by.Expr.String() +} + +// Clone makes a copy of ByItems. +func (by *ByItems) Clone() *ByItems { + return &ByItems{Expr: by.Expr.Clone(), Desc: by.Desc} +} + +func (b *PlanBuilder) buildSort(ctx context.Context, p LogicalPlan, byItems []*ast.ByItem, aggMapper map[*ast.AggregateFuncExpr]int) (*LogicalSort, error) { + b.curClause = orderByClause + sort := LogicalSort{}.Init(b.ctx) + exprs := make([]*ByItems, 0, len(byItems)) + for _, item := range byItems { + it, np, err := b.rewriteWithPreprocess(ctx, item.Expr, p, aggMapper, true, nil) + if err != nil { + return nil, err + } + + p = np + exprs = append(exprs, &ByItems{Expr: it, Desc: item.Desc}) + } + sort.ByItems = exprs + sort.SetChildren(p) + return sort, nil +} + +// getUintFromNode gets uint64 value from ast.Node. +// For ordinary statement, node should be uint64 constant value. +func getUintFromNode(ctx sessionctx.Context, n ast.Node) (uVal uint64, isNull bool, isExpectedType bool) { + var val interface{} + switch v := n.(type) { + case *driver.ValueExpr: + val = v.GetValue() + default: + return 0, false, false + } + switch v := val.(type) { + case uint64: + return v, false, true + case int64: + if v >= 0 { + return uint64(v), false, true + } + case string: + sc := ctx.GetSessionVars().StmtCtx + uVal, err := types.StrToUint(sc, v) + if err != nil { + return 0, false, false + } + return uVal, false, true + } + return 0, false, false +} + +func extractLimitCountOffset(ctx sessionctx.Context, limit *ast.Limit) (count uint64, + offset uint64, err error) { + var isExpectedType bool + if limit.Count != nil { + count, _, isExpectedType = getUintFromNode(ctx, limit.Count) + if !isExpectedType { + return 0, 0, ErrWrongArguments.GenWithStackByArgs("LIMIT") + } + } + if limit.Offset != nil { + offset, _, isExpectedType = getUintFromNode(ctx, limit.Offset) + if !isExpectedType { + return 0, 0, ErrWrongArguments.GenWithStackByArgs("LIMIT") + } + } + return count, offset, nil +} + +func (b *PlanBuilder) buildLimit(src LogicalPlan, limit *ast.Limit) (LogicalPlan, error) { + b.optFlag = b.optFlag | flagPushDownTopN + var ( + offset, count uint64 + err error + ) + if count, offset, err = extractLimitCountOffset(b.ctx, limit); err != nil { + return nil, err + } + + if count > math.MaxUint64-offset { + count = math.MaxUint64 - offset + } + if offset+count == 0 { + tableDual := LogicalTableDual{RowCount: 0}.Init(b.ctx) + tableDual.schema = src.Schema() + tableDual.names = src.OutputNames() + return tableDual, nil + } + li := LogicalLimit{ + Offset: offset, + Count: count, + }.Init(b.ctx) + li.SetChildren(src) + return li, nil +} + +// colMatch means that if a match b, e.g. t.a can match test.t.a but test.t.a can't match t.a. +// Because column a want column from database test exactly. +func colMatch(a *ast.ColumnName, b *ast.ColumnName) bool { + if a.Schema.L == "" || a.Schema.L == b.Schema.L { + if a.Table.L == "" || a.Table.L == b.Table.L { + return a.Name.L == b.Name.L + } + } + return false +} + +func matchField(f *ast.SelectField, col *ast.ColumnNameExpr, ignoreAsName bool) bool { + // if col specify a table name, resolve from table source directly. + if col.Name.Table.L == "" { + if f.AsName.L == "" || ignoreAsName { + if curCol, isCol := f.Expr.(*ast.ColumnNameExpr); isCol { + return curCol.Name.Name.L == col.Name.Name.L + } else if _, isFunc := f.Expr.(*ast.FuncCallExpr); isFunc { + // Fix issue 7331 + // If there are some function calls in SelectField, we check if + // ColumnNameExpr in GroupByClause matches one of these function calls. + // Example: select concat(k1,k2) from t group by `concat(k1,k2)`, + // `concat(k1,k2)` matches with function call concat(k1, k2). + return strings.ToLower(f.Text()) == col.Name.Name.L + } + // a expression without as name can't be matched. + return false + } + return f.AsName.L == col.Name.Name.L + } + return false +} + +func resolveFromSelectFields(v *ast.ColumnNameExpr, fields []*ast.SelectField, ignoreAsName bool) (index int, err error) { + var matchedExpr ast.ExprNode + index = -1 + for i, field := range fields { + if field.Auxiliary { + continue + } + if matchField(field, v, ignoreAsName) { + curCol, isCol := field.Expr.(*ast.ColumnNameExpr) + if !isCol { + return i, nil + } + if matchedExpr == nil { + matchedExpr = curCol + index = i + } else if !colMatch(matchedExpr.(*ast.ColumnNameExpr).Name, curCol.Name) && + !colMatch(curCol.Name, matchedExpr.(*ast.ColumnNameExpr).Name) { + return -1, ErrAmbiguous.GenWithStackByArgs(curCol.Name.Name.L, clauseMsg[fieldList]) + } + } + } + return +} + +// havingAndOrderbyExprResolver visits Expr tree. +// It converts ColunmNameExpr to AggregateFuncExpr and collects AggregateFuncExpr. +type havingAndOrderbyExprResolver struct { + inAggFunc bool + inExpr bool + orderBy bool + err error + p LogicalPlan + selectFields []*ast.SelectField + aggMapper map[*ast.AggregateFuncExpr]int + colMapper map[*ast.ColumnNameExpr]int + gbyItems []*ast.ByItem + curClause clauseCode +} + +// Enter implements Visitor interface. +func (a *havingAndOrderbyExprResolver) Enter(n ast.Node) (node ast.Node, skipChildren bool) { + switch n.(type) { + case *ast.AggregateFuncExpr: + a.inAggFunc = true + case *ast.ColumnNameExpr, *ast.ColumnName: + default: + a.inExpr = true + } + return n, false +} + +func (a *havingAndOrderbyExprResolver) resolveFromPlan(v *ast.ColumnNameExpr, p LogicalPlan) (int, error) { + idx, err := expression.FindFieldName(p.OutputNames(), v.Name) + if err != nil { + return -1, err + } + if idx < 0 { + return -1, nil + } + col := p.Schema().Columns[idx] + name := p.OutputNames()[idx] + newColName := &ast.ColumnName{ + Schema: name.DBName, + Table: name.TblName, + Name: name.ColName, + } + for i, field := range a.selectFields { + if c, ok := field.Expr.(*ast.ColumnNameExpr); ok && colMatch(c.Name, newColName) { + return i, nil + } + } + sf := &ast.SelectField{ + Expr: &ast.ColumnNameExpr{Name: newColName}, + Auxiliary: true, + } + sf.Expr.SetType(col.GetType()) + a.selectFields = append(a.selectFields, sf) + return len(a.selectFields) - 1, nil +} + +// Leave implements Visitor interface. +func (a *havingAndOrderbyExprResolver) Leave(n ast.Node) (node ast.Node, ok bool) { + switch v := n.(type) { + case *ast.AggregateFuncExpr: + a.inAggFunc = false + a.aggMapper[v] = len(a.selectFields) + a.selectFields = append(a.selectFields, &ast.SelectField{ + Auxiliary: true, + Expr: v, + AsName: model.NewCIStr(fmt.Sprintf("sel_agg_%d", len(a.selectFields))), + }) + case *ast.ColumnNameExpr: + resolveFieldsFirst := true + if a.inAggFunc || (a.orderBy && a.inExpr) || a.curClause == fieldList { + resolveFieldsFirst = false + } + if !a.inAggFunc && !a.orderBy { + for _, item := range a.gbyItems { + if col, ok := item.Expr.(*ast.ColumnNameExpr); ok && + (colMatch(v.Name, col.Name) || colMatch(col.Name, v.Name)) { + resolveFieldsFirst = false + break + } + } + } + var index int + if resolveFieldsFirst { + index, a.err = resolveFromSelectFields(v, a.selectFields, false) + if a.err != nil { + return node, false + } + if index == -1 { + if a.orderBy { + index, a.err = a.resolveFromPlan(v, a.p) + } else { + index, a.err = resolveFromSelectFields(v, a.selectFields, true) + } + } + } else { + // We should ignore the err when resolving from schema. Because we could resolve successfully + // when considering select fields. + var err error + index, err = a.resolveFromPlan(v, a.p) + _ = err + if index == -1 && a.curClause != fieldList { + index, a.err = resolveFromSelectFields(v, a.selectFields, false) + } + } + if a.err != nil { + return node, false + } + if index == -1 { + a.err = ErrUnknownColumn.GenWithStackByArgs(v.Name.OrigColName(), clauseMsg[a.curClause]) + return node, false + } + if a.inAggFunc { + return a.selectFields[index].Expr, true + } + a.colMapper[v] = index + } + return n, true +} + +// resolveHavingAndOrderBy will process aggregate functions and resolve the columns that don't exist in select fields. +// If we found some columns that are not in select fields, we will append it to select fields and update the colMapper. +// When we rewrite the order by / having expression, we will find column in map at first. +func (b *PlanBuilder) resolveHavingAndOrderBy(sel *ast.SelectStmt, p LogicalPlan) ( + map[*ast.AggregateFuncExpr]int, map[*ast.AggregateFuncExpr]int, error) { + extractor := &havingAndOrderbyExprResolver{ + p: p, + selectFields: sel.Fields.Fields, + aggMapper: make(map[*ast.AggregateFuncExpr]int), + colMapper: b.colMapper, + } + if sel.GroupBy != nil { + extractor.gbyItems = sel.GroupBy.Items + } + // Extract agg funcs from having clause. + if sel.Having != nil { + extractor.curClause = havingClause + n, ok := sel.Having.Expr.Accept(extractor) + if !ok { + return nil, nil, errors.Trace(extractor.err) + } + sel.Having.Expr = n.(ast.ExprNode) + } + havingAggMapper := extractor.aggMapper + extractor.aggMapper = make(map[*ast.AggregateFuncExpr]int) + extractor.orderBy = true + extractor.inExpr = false + // Extract agg funcs from order by clause. + if sel.OrderBy != nil { + extractor.curClause = orderByClause + for _, item := range sel.OrderBy.Items { + n, ok := item.Expr.Accept(extractor) + if !ok { + return nil, nil, errors.Trace(extractor.err) + } + item.Expr = n.(ast.ExprNode) + } + } + sel.Fields.Fields = extractor.selectFields + return havingAggMapper, extractor.aggMapper, nil +} + +func (b *PlanBuilder) extractAggFuncs(fields []*ast.SelectField) ([]*ast.AggregateFuncExpr, map[*ast.AggregateFuncExpr]int) { + extractor := &AggregateFuncExtractor{} + for _, f := range fields { + n, _ := f.Expr.Accept(extractor) + f.Expr = n.(ast.ExprNode) + } + aggList := extractor.AggFuncs + totalAggMapper := make(map[*ast.AggregateFuncExpr]int) + + for i, agg := range aggList { + totalAggMapper[agg] = i + } + return aggList, totalAggMapper +} + +// gbyResolver resolves group by items from select fields. +type gbyResolver struct { + ctx sessionctx.Context + fields []*ast.SelectField + schema *expression.Schema + names []*types.FieldName + err error + inExpr bool + + exprDepth int // exprDepth is the depth of current expression in expression tree. +} + +func (g *gbyResolver) Enter(inNode ast.Node) (ast.Node, bool) { + g.exprDepth++ + switch inNode.(type) { + case *driver.ValueExpr, *ast.ColumnNameExpr, *ast.ParenthesesExpr, *ast.ColumnName: + default: + g.inExpr = true + } + return inNode, false +} + +func (g *gbyResolver) Leave(inNode ast.Node) (ast.Node, bool) { + extractor := &AggregateFuncExtractor{} + switch v := inNode.(type) { + case *ast.ColumnNameExpr: + idx, err := expression.FindFieldName(g.names, v.Name) + if idx < 0 || !g.inExpr { + var index int + index, g.err = resolveFromSelectFields(v, g.fields, false) + if g.err != nil { + return inNode, false + } + if idx >= 0 { + return inNode, true + } + if index != -1 { + ret := g.fields[index].Expr + ret.Accept(extractor) + if len(extractor.AggFuncs) != 0 { + err = ErrIllegalReference.GenWithStackByArgs(v.Name.OrigColName(), "reference to group function") + } else { + return ret, true + } + } + g.err = err + return inNode, false + } + case *ast.ValuesExpr: + if v.Column == nil { + g.err = ErrUnknownColumn.GenWithStackByArgs("", "VALUES() function") + } + } + return inNode, true +} + +func (b *PlanBuilder) resolveGbyExprs(ctx context.Context, p LogicalPlan, gby *ast.GroupByClause, fields []*ast.SelectField) (LogicalPlan, []expression.Expression, error) { + b.curClause = groupByClause + exprs := make([]expression.Expression, 0, len(gby.Items)) + resolver := &gbyResolver{ + ctx: b.ctx, + fields: fields, + schema: p.Schema(), + names: p.OutputNames(), + } + for _, item := range gby.Items { + resolver.inExpr = false + retExpr, _ := item.Expr.Accept(resolver) + if resolver.err != nil { + return nil, nil, errors.Trace(resolver.err) + } + item.Expr = retExpr.(ast.ExprNode) + + itemExpr := retExpr.(ast.ExprNode) + expr, np, err := b.rewrite(ctx, itemExpr, p, nil, true) + if err != nil { + return nil, nil, err + } + + exprs = append(exprs, expr) + p = np + } + return p, exprs, nil +} + +func (b *PlanBuilder) unfoldWildStar(p LogicalPlan, selectFields []*ast.SelectField) (resultList []*ast.SelectField, err error) { + for i, field := range selectFields { + if field.WildCard == nil { + resultList = append(resultList, field) + continue + } + if field.WildCard.Table.L == "" && i > 0 { + return nil, ErrInvalidWildCard + } + dbName := field.WildCard.Schema + tblName := field.WildCard.Table + findTblNameInSchema := false + for i, name := range p.OutputNames() { + col := p.Schema().Columns[i] + if (dbName.L == "" || dbName.L == name.DBName.L) && + (tblName.L == "" || tblName.L == name.TblName.L) && + col.ID != model.ExtraHandleID { + findTblNameInSchema = true + colName := &ast.ColumnNameExpr{ + Name: &ast.ColumnName{ + Schema: name.DBName, + Table: name.TblName, + Name: name.ColName, + }} + colName.SetType(col.GetType()) + field := &ast.SelectField{Expr: colName} + field.SetText(name.ColName.O) + resultList = append(resultList, field) + } + } + if !findTblNameInSchema { + return nil, ErrBadTable.GenWithStackByArgs(tblName) + } + } + return resultList, nil +} + +func (b *PlanBuilder) pushTableHints(hints []*ast.TableOptimizerHint) { + var ( + sortMergeTables, hashJoinTables []hintTableInfo + indexHintList []indexHintInfo + ) + for _, hint := range hints { + switch hint.HintName.L { + case TiDBMergeJoin, HintSMJ: + sortMergeTables = append(sortMergeTables, tableNames2HintTableInfo(b.ctx, hint.Tables)...) + case TiDBHashJoin, HintHJ: + hashJoinTables = append(hashJoinTables, tableNames2HintTableInfo(b.ctx, hint.Tables)...) + case HintUseIndex: + if len(hint.Tables) != 0 { + dbName := hint.Tables[0].DBName + if dbName.L == "" { + dbName = model.NewCIStr(b.ctx.GetSessionVars().CurrentDB) + } + indexHintList = append(indexHintList, indexHintInfo{ + dbName: dbName, + tblName: hint.Tables[0].TableName, + indexHint: &ast.IndexHint{ + IndexNames: hint.Indexes, + HintType: ast.HintUse, + HintScope: ast.HintForScan, + }, + }) + } + case HintIgnoreIndex: + if len(hint.Tables) != 0 { + dbName := hint.Tables[0].DBName + if dbName.L == "" { + dbName = model.NewCIStr(b.ctx.GetSessionVars().CurrentDB) + } + indexHintList = append(indexHintList, indexHintInfo{ + dbName: dbName, + tblName: hint.Tables[0].TableName, + indexHint: &ast.IndexHint{ + IndexNames: hint.Indexes, + HintType: ast.HintIgnore, + HintScope: ast.HintForScan, + }, + }) + } + + default: + // ignore hints that not implemented + } + } + b.tableHintInfo = append(b.tableHintInfo, tableHintInfo{ + sortMergeJoinTables: sortMergeTables, + hashJoinTables: hashJoinTables, + indexHintList: indexHintList, + }) +} + +func (b *PlanBuilder) popTableHints() { + hintInfo := b.tableHintInfo[len(b.tableHintInfo)-1] + b.appendUnmatchedJoinHintWarning(HintSMJ, TiDBMergeJoin, hintInfo.sortMergeJoinTables) + b.appendUnmatchedJoinHintWarning(HintHJ, TiDBHashJoin, hintInfo.hashJoinTables) + b.tableHintInfo = b.tableHintInfo[:len(b.tableHintInfo)-1] +} + +func (b *PlanBuilder) appendUnmatchedJoinHintWarning(joinType string, joinTypeAlias string, hintTables []hintTableInfo) { + unMatchedTables := extractUnmatchedTables(hintTables) + if len(unMatchedTables) == 0 { + return + } + if len(joinTypeAlias) != 0 { + joinTypeAlias = fmt.Sprintf(" or %s", restore2JoinHint(joinTypeAlias, hintTables)) + } + + errMsg := fmt.Sprintf("There are no matching table names for (%s) in optimizer hint %s%s. Maybe you can use the table alias name", + strings.Join(unMatchedTables, ", "), restore2JoinHint(joinType, hintTables), joinTypeAlias) + b.ctx.GetSessionVars().StmtCtx.AppendWarning(ErrInternal.GenWithStack(errMsg)) +} + +// TableHints returns the *tableHintInfo of PlanBuilder. +func (b *PlanBuilder) TableHints() *tableHintInfo { + if len(b.tableHintInfo) == 0 { + return nil + } + return &(b.tableHintInfo[len(b.tableHintInfo)-1]) +} + +func (b *PlanBuilder) buildSelect(ctx context.Context, sel *ast.SelectStmt) (p LogicalPlan, err error) { + b.pushTableHints(sel.TableHints) + defer func() { + // table hints are only visible in the current SELECT statement. + b.popTableHints() + }() + + if sel.SelectStmtOpts != nil { + origin := b.inStraightJoin + b.inStraightJoin = sel.SelectStmtOpts.StraightJoin + defer func() { b.inStraightJoin = origin }() + } + + var ( + aggFuncs []*ast.AggregateFuncExpr + havingMap, orderMap, totalMap map[*ast.AggregateFuncExpr]int + gbyCols []expression.Expression + ) + + if sel.From != nil { + p, err = b.buildResultSetNode(ctx, sel.From.TableRefs) + if err != nil { + return nil, err + } + } else { + p = b.buildTableDual() + } + + originalFields := sel.Fields.Fields + sel.Fields.Fields, err = b.unfoldWildStar(p, sel.Fields.Fields) + if err != nil { + return nil, err + } + + if sel.GroupBy != nil { + p, gbyCols, err = b.resolveGbyExprs(ctx, p, sel.GroupBy, sel.Fields.Fields) + if err != nil { + return nil, err + } + } + + // We must resolve having and order by clause before build projection, + // because when the query is "select a+1 as b from t having sum(b) < 0", we must replace sum(b) to sum(a+1), + // which only can be done before building projection and extracting Agg functions. + havingMap, orderMap, err = b.resolveHavingAndOrderBy(sel, p) + if err != nil { + return nil, err + } + + if sel.Where != nil { + p, err = b.buildSelection(ctx, p, sel.Where, nil) + if err != nil { + return nil, err + } + } + + b.handleHelper.popMap() + b.handleHelper.pushMap(nil) + + hasAgg := b.detectSelectAgg(sel) + if hasAgg { + aggFuncs, totalMap = b.extractAggFuncs(sel.Fields.Fields) + var aggIndexMap map[int]int + p, aggIndexMap, err = b.buildAggregation(ctx, p, aggFuncs, gbyCols) + if err != nil { + return nil, err + } + for k, v := range totalMap { + totalMap[k] = aggIndexMap[v] + } + } + + var oldLen int + p, oldLen, err = b.buildProjection(ctx, p, sel.Fields.Fields, totalMap) + if err != nil { + return nil, err + } + + if sel.Having != nil { + b.curClause = havingClause + p, err = b.buildSelection(ctx, p, sel.Having.Expr, havingMap) + if err != nil { + return nil, err + } + } + + if sel.Distinct { + p, err = b.buildDistinct(p, oldLen) + if err != nil { + return nil, err + } + } + + if sel.OrderBy != nil { + p, err = b.buildSort(ctx, p, sel.OrderBy.Items, orderMap) + if err != nil { + return nil, err + } + } + + if sel.Limit != nil { + p, err = b.buildLimit(p, sel.Limit) + if err != nil { + return nil, err + } + } + + sel.Fields.Fields = originalFields + if oldLen != p.Schema().Len() { + proj := LogicalProjection{Exprs: expression.Column2Exprs(p.Schema().Columns[:oldLen])}.Init(b.ctx) + proj.SetChildren(p) + schema := expression.NewSchema(p.Schema().Clone().Columns[:oldLen]...) + for _, col := range schema.Columns { + col.UniqueID = b.ctx.GetSessionVars().AllocPlanColumnID() + } + proj.names = p.OutputNames()[:oldLen] + proj.SetSchema(schema) + return proj, nil + } + + return p, nil +} + +func (b *PlanBuilder) buildTableDual() *LogicalTableDual { + b.handleHelper.pushMap(nil) + return LogicalTableDual{RowCount: 1}.Init(b.ctx) +} + +func (ds *DataSource) newExtraHandleSchemaCol() *expression.Column { + return &expression.Column{ + RetType: types.NewFieldType(mysql.TypeLonglong), + UniqueID: ds.ctx.GetSessionVars().AllocPlanColumnID(), + ID: model.ExtraHandleID, + OrigName: fmt.Sprintf("%v.%v.%v", ds.DBName, ds.tableInfo.Name, model.ExtraHandleName), + } +} + +// getStatsTable gets statistics information for a table specified by "tableID". +// A pseudo statistics table is returned in any of the following scenario: +// 1. tidb-server started and statistics handle has not been initialized. +// 2. table row count from statistics is zero. +// 3. statistics is outdated. +func getStatsTable(ctx sessionctx.Context, tblInfo *model.TableInfo, pid int64) *statistics.Table { + statsHandle := domain.GetDomain(ctx).StatsHandle() + + // 1. tidb-server started and statistics handle has not been initialized. + if statsHandle == nil { + return statistics.PseudoTable(tblInfo) + } + + var statsTbl *statistics.Table + if pid != tblInfo.ID { + statsTbl = statsHandle.GetPartitionStats(tblInfo, pid) + } else { + statsTbl = statsHandle.GetTableStats(tblInfo) + } + + // 2. table row count from statistics is zero. + if statsTbl.Count == 0 { + return statistics.PseudoTable(tblInfo) + } + + // 3. statistics is outdated. + if statsTbl.IsOutdated() { + tbl := *statsTbl + tbl.Pseudo = true + statsTbl = &tbl + + } + return statsTbl +} + +func (b *PlanBuilder) buildDataSource(ctx context.Context, tn *ast.TableName, asName *model.CIStr) (LogicalPlan, error) { + dbName := tn.Schema + if dbName.L == "" { + dbName = model.NewCIStr(b.ctx.GetSessionVars().CurrentDB) + } + + tbl, err := b.is.TableByName(dbName, tn.Name) + if err != nil { + return nil, err + } + + tableInfo := tbl.Meta() + + if tbl.Type().IsVirtualTable() { + return b.buildMemTable(ctx, dbName, tableInfo) + } + + tblName := *asName + if tblName.L == "" { + tblName = tn.Name + } + possiblePaths, err := b.getPossibleAccessPaths(tn.IndexHints, tbl, dbName, tblName) + if err != nil { + return nil, err + } + + columns := tbl.Cols() + ds := DataSource{ + DBName: dbName, + TableAsName: asName, + table: tbl, + tableInfo: tableInfo, + statisticTable: getStatsTable(b.ctx, tbl.Meta(), tbl.Meta().ID), + indexHints: tn.IndexHints, + possibleAccessPaths: possiblePaths, + Columns: make([]*model.ColumnInfo, 0, len(columns)), + TblCols: make([]*expression.Column, 0, len(columns)), + }.Init(b.ctx) + + var handleCol *expression.Column + schema := expression.NewSchema(make([]*expression.Column, 0, len(columns))...) + names := make([]*types.FieldName, 0, len(columns)) + for i, col := range columns { + ds.Columns = append(ds.Columns, col.ToInfo()) + names = append(names, &types.FieldName{ + DBName: dbName, + TblName: tableInfo.Name, + ColName: col.Name, + OrigTblName: tableInfo.Name, + OrigColName: col.Name, + }) + newCol := &expression.Column{ + UniqueID: b.ctx.GetSessionVars().AllocPlanColumnID(), + ID: col.ID, + RetType: &col.FieldType, + OrigName: names[i].String(), + } + + if tableInfo.PKIsHandle && mysql.HasPriKeyFlag(col.Flag) { + handleCol = newCol + } + schema.Append(newCol) + ds.TblCols = append(ds.TblCols, newCol) + } + // We append an extra handle column to the schema when the handle + // column is not the primary key of "ds". + if handleCol == nil { + ds.Columns = append(ds.Columns, model.NewExtraHandleColInfo()) + handleCol = ds.newExtraHandleSchemaCol() + schema.Append(handleCol) + names = append(names, &types.FieldName{ + DBName: dbName, + TblName: tableInfo.Name, + ColName: model.ExtraHandleName, + OrigColName: model.ExtraHandleName, + }) + ds.TblCols = append(ds.TblCols, handleCol) + } + if handleCol != nil { + ds.handleCol = handleCol + handleMap := make(map[int64][]*expression.Column) + handleMap[tableInfo.ID] = []*expression.Column{handleCol} + b.handleHelper.pushMap(handleMap) + } else { + b.handleHelper.pushMap(nil) + } + ds.SetSchema(schema) + ds.names = names + + // Init FullIdxCols, FullIdxColLens for accessPaths. + for _, path := range ds.possibleAccessPaths { + if !path.IsTablePath { + path.FullIdxCols, path.FullIdxColLens = expression.IndexInfo2Cols(ds.Columns, ds.schema.Columns, path.Index) + } + } + + var result LogicalPlan = ds + + // If this SQL is executed in a non-readonly transaction, we need a + // "UnionScan" operator to read the modifications of former SQLs, which is + // buffered in tidb-server memory. + txn, err := b.ctx.Txn(false) + if err != nil { + return nil, err + } + if txn.Valid() && !txn.IsReadOnly() { + us := LogicalUnionScan{handleCol: handleCol}.Init(b.ctx) + us.SetChildren(ds) + result = us + } + return result, nil +} + +func (b *PlanBuilder) buildMemTable(ctx context.Context, dbName model.CIStr, tableInfo *model.TableInfo) (LogicalPlan, error) { + // We can use the `tableInfo.Columns` directly because the memory table has + // a stable schema and there is no online DDL on the memory table. + schema := expression.NewSchema(make([]*expression.Column, 0, len(tableInfo.Columns))...) + names := make([]*types.FieldName, 0, len(tableInfo.Columns)) + var handleCol *expression.Column + for _, col := range tableInfo.Columns { + names = append(names, &types.FieldName{ + DBName: dbName, + TblName: tableInfo.Name, + ColName: col.Name, + OrigTblName: tableInfo.Name, + OrigColName: col.Name, + }) + // NOTE: Rewrite the expression if memory table supports generated columns in the future + newCol := &expression.Column{ + UniqueID: b.ctx.GetSessionVars().AllocPlanColumnID(), + ID: col.ID, + RetType: &col.FieldType, + } + if tableInfo.PKIsHandle && mysql.HasPriKeyFlag(col.Flag) { + handleCol = newCol + } + schema.Append(newCol) + } + + if handleCol != nil { + handleMap := make(map[int64][]*expression.Column) + handleMap[tableInfo.ID] = []*expression.Column{handleCol} + b.handleHelper.pushMap(handleMap) + } else { + b.handleHelper.pushMap(nil) + } + + // NOTE: Add a `LogicalUnionScan` if we support update memory table in the future + p := LogicalMemTable{ + dbName: dbName, + tableInfo: tableInfo, + }.Init(b.ctx) + p.SetSchema(schema) + p.names = names + return p, nil +} + +func getTableOffset(names []*types.FieldName, handleName *types.FieldName) (int, error) { + for i, name := range names { + if name.DBName.L == handleName.DBName.L && name.TblName.L == handleName.TblName.L { + return i, nil + } + } + return -1, errors.Errorf("Couldn't get column information when do update/delete") +} + +// TblColPosInfo represents an mapper from column index to handle index. +type TblColPosInfo struct { + TblID int64 + // Start and End represent the ordinal range [Start, End) of the consecutive columns. + Start, End int + // HandleOrdinal represents the ordinal of the handle column. + HandleOrdinal int +} + +// TblColPosInfoSlice attaches the methods of sort.Interface to []TblColPosInfos sorting in increasing order. +type TblColPosInfoSlice []TblColPosInfo + +// Len implements sort.Interface#Len. +func (c TblColPosInfoSlice) Len() int { + return len(c) +} + +// Swap implements sort.Interface#Swap. +func (c TblColPosInfoSlice) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} + +// Less implements sort.Interface#Less. +func (c TblColPosInfoSlice) Less(i, j int) bool { + return c[i].Start < c[j].Start +} + +// FindHandle finds the ordinal of the corresponding handle column. +func (c TblColPosInfoSlice) FindHandle(colOrdinal int) (int, bool) { + if len(c) == 0 { + return 0, false + } + // find the smallest index of the range that its start great than colOrdinal. + // @see https://godoc.org/sort#Search + rangeBehindOrdinal := sort.Search(len(c), func(i int) bool { return c[i].Start > colOrdinal }) + if rangeBehindOrdinal == 0 { + return 0, false + } + return c[rangeBehindOrdinal-1].HandleOrdinal, true +} + +// buildColumns2Handle builds columns to handle mapping. +func buildColumns2Handle( + names []*types.FieldName, + tblID2Handle map[int64][]*expression.Column, + tblID2Table map[int64]table.Table, + onlyWritableCol bool, +) (TblColPosInfoSlice, error) { + var cols2Handles TblColPosInfoSlice + for tblID, handleCols := range tblID2Handle { + tbl := tblID2Table[tblID] + var tblLen int + if onlyWritableCol { + tblLen = len(tbl.WritableCols()) + } else { + tblLen = len(tbl.Cols()) + } + for _, handleCol := range handleCols { + offset, err := getTableOffset(names, names[handleCol.Index]) + if err != nil { + return nil, err + } + end := offset + tblLen + cols2Handles = append(cols2Handles, TblColPosInfo{tblID, offset, end, handleCol.Index}) + } + } + sort.Sort(cols2Handles) + return cols2Handles, nil +} + +// extractDefaultExpr extract a `DefaultExpr` from `ExprNode`, +// If it is a `DEFAULT` function like `DEFAULT(a)`, return nil. +// Only if it is `DEFAULT` keyword, it will return the `DefaultExpr`. +func extractDefaultExpr(node ast.ExprNode) *ast.DefaultExpr { + if expr, ok := node.(*ast.DefaultExpr); ok && expr.Name == nil { + return expr + } + return nil +} + +func (b *PlanBuilder) buildDelete(ctx context.Context, delete *ast.DeleteStmt) (Plan, error) { + p, err := b.buildResultSetNode(ctx, delete.TableRefs.TableRefs) + if err != nil { + return nil, err + } + oldSchema := p.Schema() + oldLen := oldSchema.Len() + + if delete.Where != nil { + p, err = b.buildSelection(ctx, p, delete.Where, nil) + if err != nil { + return nil, err + } + } + + if delete.Order != nil { + p, err = b.buildSort(ctx, p, delete.Order.Items, nil) + if err != nil { + return nil, err + } + } + + if delete.Limit != nil { + p, err = b.buildLimit(p, delete.Limit) + if err != nil { + return nil, err + } + } + + proj := LogicalProjection{Exprs: expression.Column2Exprs(p.Schema().Columns[:oldLen])}.Init(b.ctx) + proj.SetChildren(p) + proj.SetSchema(oldSchema.Clone()) + proj.names = p.OutputNames()[:oldLen] + p = proj + + del := Delete{}.Init(b.ctx) + + del.names = p.OutputNames() + del.SelectPlan, err = DoOptimize(ctx, b.optFlag, p) + if err != nil { + return nil, err + } + + tblID2Handle, err := resolveIndicesForTblID2Handle(b.handleHelper.tailMap(), del.SelectPlan.Schema()) + if err != nil { + return nil, err + } + tblID2table := make(map[int64]table.Table) + for id := range tblID2Handle { + tblID2table[id], _ = b.is.TableByID(id) + } + del.TblColPosInfos, err = buildColumns2Handle(del.names, tblID2Handle, tblID2table, false) + return del, err +} + +func resolveIndicesForTblID2Handle(tblID2Handle map[int64][]*expression.Column, schema *expression.Schema) (map[int64][]*expression.Column, error) { + newMap := make(map[int64][]*expression.Column, len(tblID2Handle)) + for i, cols := range tblID2Handle { + for _, col := range cols { + resolvedCol, err := col.ResolveIndices(schema) + if err != nil { + return nil, err + } + newMap[i] = append(newMap[i], resolvedCol.(*expression.Column)) + } + } + return newMap, nil +} + +func getInnerFromParenthesesAndUnaryPlus(expr ast.ExprNode) ast.ExprNode { + if pexpr, ok := expr.(*ast.ParenthesesExpr); ok { + return getInnerFromParenthesesAndUnaryPlus(pexpr.Expr) + } + if uexpr, ok := expr.(*ast.UnaryOperationExpr); ok && uexpr.Op == opcode.Plus { + return getInnerFromParenthesesAndUnaryPlus(uexpr.V) + } + return expr +} + +// containDifferentJoinTypes checks whether `preferJoinType` contains different +// join types. +func containDifferentJoinTypes(preferJoinType uint) bool { + return bits.OnesCount(preferJoinType) > 1 +} diff --git a/planner/core/logical_plan_test.go b/planner/core/logical_plan_test.go new file mode 100644 index 0000000..6a444ee --- /dev/null +++ b/planner/core/logical_plan_test.go @@ -0,0 +1,782 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "context" + "fmt" + "strings" + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/parser" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/planner/property" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/util/testleak" + "github.com/pingcap/tidb/util/testutil" +) + +var _ = Suite(&testPlanSuite{}) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + TestingT(t) +} + +type testPlanSuite struct { + *parser.Parser + + is infoschema.InfoSchema + ctx sessionctx.Context + + testData testutil.TestData +} + +func (s *testPlanSuite) SetUpSuite(c *C) { + s.is = infoschema.MockInfoSchema([]*model.TableInfo{MockSignedTable(), MockUnsignedTable()}) + s.ctx = MockContext() + s.Parser = parser.New() + + var err error + s.testData, err = testutil.LoadTestSuiteData("testdata", "plan_suite_unexported") + c.Assert(err, IsNil) +} + +func (s *testPlanSuite) TearDownSuite(c *C) { + c.Assert(s.testData.GenerateOutputIfNeeded(), IsNil) +} + +func (s *testPlanSuite) TestPredicatePushDown(c *C) { + defer testleak.AfterTest(c)() + var input, output []string + s.testData.GetTestCases(c, &input, &output) + ctx := context.Background() + for ith, ca := range input { + comment := Commentf("for %s", ca) + stmt, err := s.ParseOneStmt(ca, "", "") + c.Assert(err, IsNil, comment) + p, _, err := BuildLogicalPlan(ctx, s.ctx, stmt, s.is) + c.Assert(err, IsNil) + p, err = logicalOptimize(context.TODO(), flagPredicatePushDown|flagPrunColumns, p.(LogicalPlan)) + c.Assert(err, IsNil) + s.testData.OnRecord(func() { + output[ith] = ToString(p) + }) + c.Assert(ToString(p), Equals, output[ith], Commentf("for %s %d", ca, ith)) + } +} + +func (s *testPlanSuite) TestJoinPredicatePushDown(c *C) { + defer testleak.AfterTest(c)() + var ( + input []string + output []struct { + Left string + Right string + } + ) + s.testData.GetTestCases(c, &input, &output) + + ctx := context.Background() + for i, ca := range input { + comment := Commentf("for %s", ca) + stmt, err := s.ParseOneStmt(ca, "", "") + c.Assert(err, IsNil, comment) + p, _, err := BuildLogicalPlan(ctx, s.ctx, stmt, s.is) + c.Assert(err, IsNil, comment) + p, err = logicalOptimize(context.TODO(), flagPredicatePushDown|flagPrunColumns, p.(LogicalPlan)) + c.Assert(err, IsNil, comment) + proj, ok := p.(*LogicalProjection) + c.Assert(ok, IsTrue, comment) + join, ok := proj.children[0].(*LogicalJoin) + c.Assert(ok, IsTrue, comment) + leftPlan, ok := join.children[0].(*DataSource) + c.Assert(ok, IsTrue, comment) + rightPlan, ok := join.children[1].(*DataSource) + c.Assert(ok, IsTrue, comment) + leftCond := fmt.Sprintf("%s", leftPlan.pushedDownConds) + rightCond := fmt.Sprintf("%s", rightPlan.pushedDownConds) + s.testData.OnRecord(func() { + output[i].Left, output[i].Right = leftCond, rightCond + }) + c.Assert(leftCond, Equals, output[i].Left, comment) + c.Assert(rightCond, Equals, output[i].Right, comment) + } +} + +func (s *testPlanSuite) TestOuterWherePredicatePushDown(c *C) { + defer testleak.AfterTest(c)() + var ( + input []string + output []struct { + Sel string + Left string + Right string + } + ) + s.testData.GetTestCases(c, &input, &output) + + ctx := context.Background() + for i, ca := range input { + comment := Commentf("for %s", ca) + stmt, err := s.ParseOneStmt(ca, "", "") + c.Assert(err, IsNil, comment) + p, _, err := BuildLogicalPlan(ctx, s.ctx, stmt, s.is) + c.Assert(err, IsNil, comment) + p, err = logicalOptimize(context.TODO(), flagPredicatePushDown|flagPrunColumns, p.(LogicalPlan)) + c.Assert(err, IsNil, comment) + proj, ok := p.(*LogicalProjection) + c.Assert(ok, IsTrue, comment) + selection, ok := proj.children[0].(*LogicalSelection) + c.Assert(ok, IsTrue, comment) + selCond := fmt.Sprintf("%s", selection.Conditions) + s.testData.OnRecord(func() { + output[i].Sel = selCond + }) + c.Assert(selCond, Equals, output[i].Sel, comment) + join, ok := selection.children[0].(*LogicalJoin) + c.Assert(ok, IsTrue, comment) + leftPlan, ok := join.children[0].(*DataSource) + c.Assert(ok, IsTrue, comment) + rightPlan, ok := join.children[1].(*DataSource) + c.Assert(ok, IsTrue, comment) + leftCond := fmt.Sprintf("%s", leftPlan.pushedDownConds) + rightCond := fmt.Sprintf("%s", rightPlan.pushedDownConds) + s.testData.OnRecord(func() { + output[i].Left, output[i].Right = leftCond, rightCond + }) + c.Assert(leftCond, Equals, output[i].Left, comment) + c.Assert(rightCond, Equals, output[i].Right, comment) + } +} + +func (s *testPlanSuite) TestSimplifyOuterJoin(c *C) { + defer testleak.AfterTest(c)() + var ( + input []string + output []struct { + Best string + JoinType string + } + ) + s.testData.GetTestCases(c, &input, &output) + + ctx := context.Background() + for i, ca := range input { + comment := Commentf("for %s", ca) + stmt, err := s.ParseOneStmt(ca, "", "") + c.Assert(err, IsNil, comment) + p, _, err := BuildLogicalPlan(ctx, s.ctx, stmt, s.is) + c.Assert(err, IsNil, comment) + p, err = logicalOptimize(context.TODO(), flagPredicatePushDown|flagPrunColumns, p.(LogicalPlan)) + c.Assert(err, IsNil, comment) + planString := ToString(p) + s.testData.OnRecord(func() { + output[i].Best = planString + }) + c.Assert(planString, Equals, output[i].Best, comment) + join, ok := p.(LogicalPlan).Children()[0].(*LogicalJoin) + if !ok { + join, ok = p.(LogicalPlan).Children()[0].Children()[0].(*LogicalJoin) + c.Assert(ok, IsTrue, comment) + } + s.testData.OnRecord(func() { + output[i].JoinType = join.JoinType.String() + }) + c.Assert(join.JoinType.String(), Equals, output[i].JoinType, comment) + } +} + +func (s *testPlanSuite) TestDeriveNotNullConds(c *C) { + defer testleak.AfterTest(c)() + var ( + input []string + output []struct { + Plan string + Left string + Right string + } + ) + s.testData.GetTestCases(c, &input, &output) + + ctx := context.Background() + for i, ca := range input { + comment := Commentf("for %s", ca) + stmt, err := s.ParseOneStmt(ca, "", "") + c.Assert(err, IsNil, comment) + p, _, err := BuildLogicalPlan(ctx, s.ctx, stmt, s.is) + c.Assert(err, IsNil, comment) + p, err = logicalOptimize(context.TODO(), flagPredicatePushDown|flagPrunColumns, p.(LogicalPlan)) + c.Assert(err, IsNil, comment) + s.testData.OnRecord(func() { + output[i].Plan = ToString(p) + }) + c.Assert(ToString(p), Equals, output[i].Plan, comment) + join := p.(LogicalPlan).Children()[0].(*LogicalJoin) + left := join.Children()[0].(*DataSource) + right := join.Children()[1].(*DataSource) + leftConds := fmt.Sprintf("%s", left.pushedDownConds) + rightConds := fmt.Sprintf("%s", right.pushedDownConds) + s.testData.OnRecord(func() { + output[i].Left, output[i].Right = leftConds, rightConds + }) + c.Assert(leftConds, Equals, output[i].Left, comment) + c.Assert(rightConds, Equals, output[i].Right, comment) + } +} + +func (s *testPlanSuite) TestPlanBuilder(c *C) { + defer testleak.AfterTest(c)() + var input, output []string + s.testData.GetTestCases(c, &input, &output) + ctx := context.Background() + for i, ca := range input { + comment := Commentf("for %s", ca) + stmt, err := s.ParseOneStmt(ca, "", "") + c.Assert(err, IsNil, comment) + + s.ctx.GetSessionVars().HashJoinConcurrency = 1 + Preprocess(s.ctx, stmt, s.is) + p, _, err := BuildLogicalPlan(ctx, s.ctx, stmt, s.is) + c.Assert(err, IsNil) + if lp, ok := p.(LogicalPlan); ok { + p, err = logicalOptimize(context.TODO(), flagPrunColumns, lp) + c.Assert(err, IsNil) + } + s.testData.OnRecord(func() { + output[i] = ToString(p) + }) + c.Assert(ToString(p), Equals, output[i], Commentf("for %s", ca)) + } +} + +func (s *testPlanSuite) TestJoinReOrder(c *C) { + defer testleak.AfterTest(c)() + var input, output []string + s.testData.GetTestCases(c, &input, &output) + + ctx := context.Background() + for i, tt := range input { + comment := Commentf("for %s", tt) + stmt, err := s.ParseOneStmt(tt, "", "") + c.Assert(err, IsNil, comment) + + p, _, err := BuildLogicalPlan(ctx, s.ctx, stmt, s.is) + c.Assert(err, IsNil) + p, err = logicalOptimize(context.TODO(), flagPredicatePushDown|flagJoinReOrder, p.(LogicalPlan)) + c.Assert(err, IsNil) + planString := ToString(p) + s.testData.OnRecord(func() { + output[i] = planString + }) + c.Assert(planString, Equals, output[i], Commentf("for %s", tt)) + } +} + +func (s *testPlanSuite) TestEagerAggregation(c *C) { + defer testleak.AfterTest(c)() + var input []string + var output []string + s.testData.GetTestCases(c, &input, &output) + ctx := context.Background() + s.ctx.GetSessionVars().AllowAggPushDown = true + for ith, tt := range input { + comment := Commentf("for %s", tt) + stmt, err := s.ParseOneStmt(tt, "", "") + c.Assert(err, IsNil, comment) + + p, _, err := BuildLogicalPlan(ctx, s.ctx, stmt, s.is) + c.Assert(err, IsNil) + p, err = logicalOptimize(context.TODO(), flagBuildKeyInfo|flagPredicatePushDown|flagPrunColumns|flagPushDownAgg, p.(LogicalPlan)) + c.Assert(err, IsNil) + s.testData.OnRecord(func() { + output[ith] = ToString(p) + }) + c.Assert(ToString(p), Equals, output[ith], Commentf("for %s %d", tt, ith)) + } + s.ctx.GetSessionVars().AllowAggPushDown = false +} + +func (s *testPlanSuite) TestColumnPruning(c *C) { + defer testleak.AfterTest(c)() + var ( + input []string + output []map[int][]string + ) + s.testData.GetTestCases(c, &input, &output) + + ctx := context.Background() + for i, tt := range input { + comment := Commentf("for %s", tt) + stmt, err := s.ParseOneStmt(tt, "", "") + c.Assert(err, IsNil, comment) + + p, _, err := BuildLogicalPlan(ctx, s.ctx, stmt, s.is) + c.Assert(err, IsNil) + lp, err := logicalOptimize(ctx, flagPredicatePushDown|flagPrunColumns, p.(LogicalPlan)) + c.Assert(err, IsNil) + s.testData.OnRecord(func() { + output[i] = make(map[int][]string) + }) + s.checkDataSourceCols(lp, c, output[i], comment) + } +} + +func (s *testPlanSuite) TestAllocID(c *C) { + ctx := MockContext() + pA := DataSource{}.Init(ctx) + pB := DataSource{}.Init(ctx) + c.Assert(pA.id+1, Equals, pB.id) +} + +func (s *testPlanSuite) checkDataSourceCols(p LogicalPlan, c *C, ans map[int][]string, comment CommentInterface) { + switch p.(type) { + case *DataSource: + s.testData.OnRecord(func() { + ans[p.ID()] = make([]string, p.Schema().Len()) + }) + colList, ok := ans[p.ID()] + c.Assert(ok, IsTrue, Commentf("For %v DataSource ID %d Not found", comment, p.ID())) + c.Assert(len(p.Schema().Columns), Equals, len(colList), comment) + for i, col := range p.Schema().Columns { + s.testData.OnRecord(func() { + colList[i] = col.String() + }) + c.Assert(col.String(), Equals, colList[i], comment) + } + } + for _, child := range p.Children() { + s.checkDataSourceCols(child, c, ans, comment) + } +} + +func (s *testPlanSuite) TestValidate(c *C) { + defer testleak.AfterTest(c)() + tests := []struct { + sql string + err *terror.Error + }{ + { + sql: "select date_format((1,2), '%H');", + err: expression.ErrOperandColumns, + }, + { + sql: "select (1,2) between (3,4) and (5,6)", + err: expression.ErrOperandColumns, + }, + { + sql: "select (1,2) in ((3,4),(5,6))", + err: nil, + }, + { + sql: "select (1,2) in ((3,4),5)", + err: expression.ErrOperandColumns, + }, + { + sql: "select (1,2) is null", + err: expression.ErrOperandColumns, + }, + { + sql: "select (+(1,2))=(1,2)", + err: nil, + }, + { + sql: "select (-(1,2))=(1,2)", + err: expression.ErrOperandColumns, + }, + { + sql: "select (1,2)||(1,2)", + err: expression.ErrOperandColumns, + }, + { + sql: "select (1,2) < (3,4)", + err: nil, + }, + { + sql: "select (1,2) < 3", + err: expression.ErrOperandColumns, + }, + { + sql: "select 1, * from t", + err: ErrInvalidWildCard, + }, + { + sql: "select *, 1 from t", + err: nil, + }, + { + sql: "select 1, t.* from t", + err: nil, + }, + { + sql: "insert into t set a = 1, b = a + 1", + err: nil, + }, + { + sql: "insert into t set a = 1, b = values(a) + 1", + err: nil, + }, + { + sql: "select a as c1, b as c1 from t order by c1", + err: ErrAmbiguous, + }, + { + sql: "select * from t t1 use index(e)", + err: ErrKeyDoesNotExist, + }, + { + sql: "select a from t having c2", + err: ErrUnknownColumn, + }, + { + sql: "select a from t group by c2 + 1 having c2", + err: ErrUnknownColumn, + }, + { + sql: "select a as b, b from t having b", + err: ErrAmbiguous, + }, + { + sql: "select a + 1 from t having a", + err: ErrUnknownColumn, + }, + { + sql: "select a from t having sum(avg(a))", + err: ErrInvalidGroupFuncUse, + }, + } + + ctx := context.Background() + for _, tt := range tests { + sql := tt.sql + comment := Commentf("for %s", sql) + stmt, err := s.ParseOneStmt(sql, "", "") + c.Assert(err, IsNil, comment) + Preprocess(s.ctx, stmt, s.is) + _, _, err = BuildLogicalPlan(ctx, s.ctx, stmt, s.is) + if tt.err == nil { + c.Assert(err, IsNil, comment) + } else { + c.Assert(tt.err.Equal(err), IsTrue, comment) + } + } +} + +func (s *testPlanSuite) checkUniqueKeys(p LogicalPlan, c *C, ans map[int][][]string, sql string) { + s.testData.OnRecord(func() { + ans[p.ID()] = make([][]string, len(p.Schema().Keys)) + }) + keyList, ok := ans[p.ID()] + c.Assert(ok, IsTrue, Commentf("for %s, %v not found", sql, p.ID())) + c.Assert(len(p.Schema().Keys), Equals, len(keyList), Commentf("for %s, %v, the number of key doesn't match, the schema is %s", sql, p.ID(), p.Schema())) + for i := range keyList { + s.testData.OnRecord(func() { + keyList[i] = make([]string, len(p.Schema().Keys[i])) + }) + c.Assert(len(p.Schema().Keys[i]), Equals, len(keyList[i]), Commentf("for %s, %v %v, the number of column doesn't match", sql, p.ID(), keyList[i])) + for j := range keyList[i] { + s.testData.OnRecord(func() { + keyList[i][j] = p.Schema().Keys[i][j].String() + }) + c.Assert(p.Schema().Keys[i][j].String(), Equals, keyList[i][j], Commentf("for %s, %v %v, column dosen't match", sql, p.ID(), keyList[i])) + } + } + s.testData.OnRecord(func() { + ans[p.ID()] = keyList + }) + for _, child := range p.Children() { + s.checkUniqueKeys(child, c, ans, sql) + } +} + +func (s *testPlanSuite) TestUniqueKeyInfo(c *C) { + defer testleak.AfterTest(c)() + var input []string + var output []map[int][][]string + s.testData.GetTestCases(c, &input, &output) + s.testData.OnRecord(func() { + output = make([]map[int][][]string, len(input)) + }) + + ctx := context.Background() + for ith, tt := range input { + comment := Commentf("for %s %d", tt, ith) + stmt, err := s.ParseOneStmt(tt, "", "") + c.Assert(err, IsNil, comment) + + p, _, err := BuildLogicalPlan(ctx, s.ctx, stmt, s.is) + c.Assert(err, IsNil) + lp, err := logicalOptimize(context.TODO(), flagPredicatePushDown|flagPrunColumns|flagBuildKeyInfo, p.(LogicalPlan)) + c.Assert(err, IsNil) + s.testData.OnRecord(func() { + output[ith] = make(map[int][][]string) + }) + s.checkUniqueKeys(lp, c, output[ith], tt) + } +} + +func (s *testPlanSuite) TestAggPrune(c *C) { + defer testleak.AfterTest(c)() + var input, output []string + s.testData.GetTestCases(c, &input, &output) + + ctx := context.Background() + for i, tt := range input { + comment := Commentf("for %s", tt) + stmt, err := s.ParseOneStmt(tt, "", "") + c.Assert(err, IsNil, comment) + + p, _, err := BuildLogicalPlan(ctx, s.ctx, stmt, s.is) + c.Assert(err, IsNil) + + p, err = logicalOptimize(context.TODO(), flagPredicatePushDown|flagPrunColumns|flagBuildKeyInfo|flagEliminateAgg|flagEliminateProjection, p.(LogicalPlan)) + c.Assert(err, IsNil) + planString := ToString(p) + s.testData.OnRecord(func() { + output[i] = planString + }) + c.Assert(planString, Equals, output[i], comment) + } +} + +func (s *testPlanSuite) TestTopNPushDown(c *C) { + defer func() { + testleak.AfterTest(c)() + }() + var input, output []string + s.testData.GetTestCases(c, &input, &output) + ctx := context.TODO() + for i, tt := range input { + comment := Commentf("case:%v sql:%s", i, tt) + stmt, err := s.ParseOneStmt(tt, "", "") + c.Assert(err, IsNil, comment) + Preprocess(s.ctx, stmt, s.is) + builder := NewPlanBuilder(MockContext(), s.is) + p, err := builder.Build(ctx, stmt) + c.Assert(err, IsNil) + p, err = logicalOptimize(ctx, builder.optFlag, p.(LogicalPlan)) + c.Assert(err, IsNil) + s.testData.OnRecord(func() { + output[i] = ToString(p) + }) + c.Assert(ToString(p), Equals, output[i], comment) + } +} + +func (s *testPlanSuite) TestNameResolver(c *C) { + defer testleak.AfterTest(c)() + tests := []struct { + sql string + err string + }{ + {"select a from t", ""}, + {"select c3 from t", "[planner:1054]Unknown column 'c3' in 'field list'"}, + {"select c1 from t4", "[schema:1146]Table 'test.t4' doesn't exist"}, + {"select * from t", ""}, + {"select t.* from t", ""}, + {"select t2.* from t", "[planner:1051]Unknown table 't2'"}, + {"select b as a, c as a from t group by a", "[planner:1052]Column 'c' in field list is ambiguous"}, + {"select 1 as a, b as a, c as a from t group by a", ""}, + {"select a, b as a from t group by a+1", ""}, + {"select c, a as c from t order by c+1", ""}, + {"select * from t as t1, t as t2 join t as t3 on t2.a = t3.a", ""}, + {"select * from t as t1, t as t2 join t as t3 on t1.c1 = t2.a", "[planner:1054]Unknown column 't1.c1' in 'on clause'"}, + {"select a from t group by a having a = 3", ""}, + {"select a from t group by a having c2 = 3", "[planner:1054]Unknown column 'c2' in 'having clause'"}, + {"select a from t where t11.a < t.a", "[planner:1054]Unknown column 't11.a' in 'where clause'"}, + {"select a from t having t11.c1 < t.a", "[planner:1054]Unknown column 't11.c1' in 'having clause'"}, + {"select a from t where t.a < t.a order by t11.c1", "[planner:1054]Unknown column 't11.c1' in 'order clause'"}, + {"select a from t group by t11.c1", "[planner:1054]Unknown column 't11.c1' in 'group statement'"}, + {"select '' as fakeCol from t group by values(fakeCol)", "[planner:1054]Unknown column '' in 'VALUES() function'"}, + } + + ctx := context.Background() + for _, t := range tests { + comment := Commentf("for %s", t.sql) + stmt, err := s.ParseOneStmt(t.sql, "", "") + c.Assert(err, IsNil, comment) + s.ctx.GetSessionVars().HashJoinConcurrency = 1 + + _, _, err = BuildLogicalPlan(ctx, s.ctx, stmt, s.is) + if t.err == "" { + c.Check(err, IsNil) + } else { + c.Assert(err.Error(), Equals, t.err) + } + } +} + +func (s *testPlanSuite) TestOuterJoinEliminator(c *C) { + defer testleak.AfterTest(c)() + var input, output []string + s.testData.GetTestCases(c, &input, &output) + + ctx := context.TODO() + for i, tt := range input { + comment := Commentf("case:%v sql:%s", i, tt) + stmt, err := s.ParseOneStmt(tt, "", "") + c.Assert(err, IsNil, comment) + Preprocess(s.ctx, stmt, s.is) + builder := NewPlanBuilder(MockContext(), s.is) + p, err := builder.Build(ctx, stmt) + c.Assert(err, IsNil) + p, err = logicalOptimize(ctx, builder.optFlag, p.(LogicalPlan)) + c.Assert(err, IsNil) + planString := ToString(p) + s.testData.OnRecord(func() { + output[i] = planString + }) + c.Assert(planString, Equals, output[i], comment) + } +} + +func byItemsToProperty(byItems []*ByItems) *property.PhysicalProperty { + pp := &property.PhysicalProperty{} + for _, item := range byItems { + pp.Items = append(pp.Items, property.Item{Col: item.Expr.(*expression.Column), Desc: item.Desc}) + } + return pp +} + +func pathsName(paths []*candidatePath) string { + var names []string + for _, path := range paths { + if path.path.IsTablePath { + names = append(names, "PRIMARY_KEY") + } else { + names = append(names, path.path.Index.Name.O) + } + } + return strings.Join(names, ",") +} + +func (s *testPlanSuite) TestSkylinePruning(c *C) { + defer testleak.AfterTest(c)() + tests := []struct { + sql string + result string + }{ + { + sql: "select * from t", + result: "PRIMARY_KEY", + }, + { + sql: "select * from t order by f", + result: "PRIMARY_KEY,f,f_g", + }, + { + sql: "select * from t where a > 1", + result: "PRIMARY_KEY", + }, + { + sql: "select * from t where a > 1 order by f", + result: "PRIMARY_KEY,f,f_g", + }, + { + sql: "select * from t where f > 1", + result: "PRIMARY_KEY,f,f_g", + }, + { + sql: "select f from t where f > 1", + result: "f,f_g", + }, + { + sql: "select f from t where f > 1 order by a", + result: "PRIMARY_KEY,f,f_g", + }, + { + sql: "select * from t where f > 1 and g > 1", + result: "PRIMARY_KEY,f,g,f_g", + }, + { + sql: "select count(1) from t", + result: "PRIMARY_KEY,c_d_e,f,g,f_g,c_d_e_str,e_d_c_str_prefix", + }, + } + ctx := context.TODO() + for i, tt := range tests { + comment := Commentf("case:%v sql:%s", i, tt.sql) + stmt, err := s.ParseOneStmt(tt.sql, "", "") + c.Assert(err, IsNil, comment) + Preprocess(s.ctx, stmt, s.is) + builder := NewPlanBuilder(MockContext(), s.is) + p, err := builder.Build(ctx, stmt) + if err != nil { + c.Assert(err.Error(), Equals, tt.result, comment) + continue + } + c.Assert(err, IsNil, comment) + p, err = logicalOptimize(ctx, builder.optFlag, p.(LogicalPlan)) + c.Assert(err, IsNil, comment) + lp := p.(LogicalPlan) + _, err = lp.recursiveDeriveStats() + c.Assert(err, IsNil, comment) + var ds *DataSource + var byItems []*ByItems + for ds == nil { + switch v := lp.(type) { + case *DataSource: + ds = v + case *LogicalSort: + byItems = v.ByItems + lp = lp.Children()[0] + case *LogicalProjection: + newItems := make([]*ByItems, 0, len(byItems)) + for _, col := range byItems { + idx := v.schema.ColumnIndex(col.Expr.(*expression.Column)) + switch expr := v.Exprs[idx].(type) { + case *expression.Column: + newItems = append(newItems, &ByItems{Expr: expr, Desc: col.Desc}) + } + } + byItems = newItems + lp = lp.Children()[0] + default: + lp = lp.Children()[0] + } + } + paths := ds.skylinePruning(byItemsToProperty(byItems)) + c.Assert(pathsName(paths), Equals, tt.result, comment) + } +} + +func (s *testPlanSuite) TestUpdateEQCond(c *C) { + defer testleak.AfterTest(c)() + tests := []struct { + sql string + best string + }{ + { + sql: "select t1.a from t t1, t t2 where t1.a = t2.a+1", + best: "Join{DataScan(t1)->DataScan(t2)->Projection}(test.t.a,Column#25)->Projection", + }, + } + ctx := context.TODO() + for i, tt := range tests { + comment := Commentf("case:%v sql:%s", i, tt.sql) + stmt, err := s.ParseOneStmt(tt.sql, "", "") + c.Assert(err, IsNil, comment) + Preprocess(s.ctx, stmt, s.is) + builder := NewPlanBuilder(MockContext(), s.is) + p, err := builder.Build(ctx, stmt) + c.Assert(err, IsNil) + p, err = logicalOptimize(ctx, builder.optFlag, p.(LogicalPlan)) + c.Assert(err, IsNil) + c.Assert(ToString(p), Equals, tt.best, comment) + } +} diff --git a/planner/core/logical_plans.go b/planner/core/logical_plans.go new file mode 100644 index 0000000..07b20f0 --- /dev/null +++ b/planner/core/logical_plans.go @@ -0,0 +1,601 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "math" + + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/expression/aggregation" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/planner/property" + "github.com/pingcap/tidb/planner/util" + "github.com/pingcap/tidb/statistics" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/ranger" + "go.uber.org/zap" +) + +var ( + _ LogicalPlan = &LogicalJoin{} + _ LogicalPlan = &LogicalAggregation{} + _ LogicalPlan = &LogicalProjection{} + _ LogicalPlan = &LogicalSelection{} + _ LogicalPlan = &LogicalTableDual{} + _ LogicalPlan = &DataSource{} + _ LogicalPlan = &TiKVSingleGather{} + _ LogicalPlan = &LogicalTableScan{} + _ LogicalPlan = &LogicalIndexScan{} + _ LogicalPlan = &LogicalSort{} + _ LogicalPlan = &LogicalLimit{} +) + +// JoinType contains CrossJoin, InnerJoin, LeftOuterJoin, RightOuterJoin, FullOuterJoin, SemiJoin. +type JoinType int + +const ( + // InnerJoin means inner join. + InnerJoin JoinType = iota + // LeftOuterJoin means left join. + LeftOuterJoin + // RightOuterJoin means right join. + RightOuterJoin +) + +// IsOuterJoin returns if this joiner is a outer joiner +func (tp JoinType) IsOuterJoin() bool { + return tp == LeftOuterJoin || tp == RightOuterJoin +} + +func (tp JoinType) String() string { + switch tp { + case InnerJoin: + return "inner join" + case LeftOuterJoin: + return "left outer join" + case RightOuterJoin: + return "right outer join" + } + return "unsupported join type" +} + +const ( + preferHashJoin uint = 1 << iota + preferMergeJoin +) + +// LogicalJoin is the logical join plan. +type LogicalJoin struct { + logicalSchemaProducer + + JoinType JoinType + reordered bool + cartesianJoin bool + StraightJoin bool + + // hintInfo stores the join algorithm hint information specified by client. + hintInfo *tableHintInfo + preferJoinType uint + + EqualConditions []*expression.ScalarFunction + LeftConditions expression.CNFExprs + RightConditions expression.CNFExprs + OtherConditions expression.CNFExprs + + LeftJoinKeys []*expression.Column + RightJoinKeys []*expression.Column + leftProperties [][]*expression.Column + rightProperties [][]*expression.Column + + // DefaultValues is only used for left/right outer join, which is values the inner row's should be when the outer table + // doesn't match any inner table's row. + // That it's nil just means the default values is a slice of NULL. + // Currently, only `aggregation push down` phase will set this. + DefaultValues []types.Datum + + // equalCondOutCnt indicates the estimated count of joined rows after evaluating `EqualConditions`. + equalCondOutCnt float64 +} + +func (p *LogicalJoin) attachOnConds(onConds []expression.Expression) { + eq, left, right, other := p.extractOnCondition(onConds, false, false) + p.EqualConditions = append(eq, p.EqualConditions...) + p.LeftConditions = append(left, p.LeftConditions...) + p.RightConditions = append(right, p.RightConditions...) + p.OtherConditions = append(other, p.OtherConditions...) +} + +// LogicalProjection represents a select fields plan. +type LogicalProjection struct { + logicalSchemaProducer + + Exprs []expression.Expression +} + +// LogicalAggregation represents an aggregate plan. +type LogicalAggregation struct { + logicalSchemaProducer + + AggFuncs []*aggregation.AggFuncDesc + GroupByItems []expression.Expression + // groupByCols stores the columns that are group-by items. + groupByCols []*expression.Column + + possibleProperties [][]*expression.Column + inputCount float64 // inputCount is the input count of this plan. +} + +// IsPartialModeAgg returns if all of the AggFuncs are partialMode. +func (la *LogicalAggregation) IsPartialModeAgg() bool { + // Since all of the AggFunc share the same AggMode, we only need to check the first one. + return la.AggFuncs[0].Mode == aggregation.Partial1Mode +} + +// GetGroupByCols returns the groupByCols. If the groupByCols haven't be collected, +// this method would collect them at first. If the GroupByItems have been changed, +// we should explicitly collect GroupByColumns before this method. +func (la *LogicalAggregation) GetGroupByCols() []*expression.Column { + if la.groupByCols == nil { + la.collectGroupByColumns() + } + return la.groupByCols +} + +// LogicalSelection represents a where or having predicate. +type LogicalSelection struct { + baseLogicalPlan + + // Originally the WHERE or ON condition is parsed into a single expression, + // but after we converted to CNF(Conjunctive normal form), it can be + // split into a list of AND conditions. + Conditions []expression.Expression +} + +// LogicalTableDual represents a dual table plan. +type LogicalTableDual struct { + logicalSchemaProducer + + RowCount int +} + +// LogicalMemTable represents a memory table or virtual table +type LogicalMemTable struct { + logicalSchemaProducer + + dbName model.CIStr + tableInfo *model.TableInfo +} + +// LogicalUnionScan is only used in non read-only txn. +type LogicalUnionScan struct { + baseLogicalPlan + + conditions []expression.Expression + + handleCol *expression.Column +} + +// DataSource represents a tableScan without condition push down. +type DataSource struct { + logicalSchemaProducer + + indexHints []*ast.IndexHint + table table.Table + tableInfo *model.TableInfo + Columns []*model.ColumnInfo + DBName model.CIStr + + TableAsName *model.CIStr + // pushedDownConds are the conditions that will be pushed down to coprocessor. + pushedDownConds []expression.Expression + // allConds contains all the filters on this table. For now it's maintained + // in predicate push down and used only in partition pruning. + allConds []expression.Expression + + statisticTable *statistics.Table + tableStats *property.StatsInfo + + // possibleAccessPaths stores all the possible access path for physical plan, including table scan. + possibleAccessPaths []*util.AccessPath + + // handleCol represents the handle column for the datasource, either the + // int primary key column or extra handle column. + handleCol *expression.Column + // TblCols contains the original columns of table before being pruned, and it + // is used for estimating table scan cost. + TblCols []*expression.Column + // TblColHists contains the Histogram of all original table columns, + // it is converted from statisticTable, and used for IO/network cost estimating. + TblColHists *statistics.HistColl +} + +// TiKVSingleGather is a leaf logical operator of TiDB layer to gather +// tuples from TiKV regions. +type TiKVSingleGather struct { + logicalSchemaProducer + Source *DataSource + // IsIndexGather marks if this TiKVSingleGather gathers tuples from an IndexScan. + // in implementation phase, we need this flag to determine whether to generate + // PhysicalTableReader or PhysicalIndexReader. + IsIndexGather bool + Index *model.IndexInfo +} + +// LogicalTableScan is the logical table scan operator for TiKV. +type LogicalTableScan struct { + logicalSchemaProducer + Source *DataSource + Handle *expression.Column + AccessConds expression.CNFExprs + Ranges []*ranger.Range +} + +// LogicalIndexScan is the logical index scan operator for TiKV. +type LogicalIndexScan struct { + logicalSchemaProducer + // DataSource should be read-only here. + Source *DataSource + IsDoubleRead bool + + EqCondCount int + AccessConds expression.CNFExprs + Ranges []*ranger.Range + + Index *model.IndexInfo + Columns []*model.ColumnInfo + FullIdxCols []*expression.Column + FullIdxColLens []int + IdxCols []*expression.Column + IdxColLens []int +} + +// MatchIndexProp checks if the indexScan can match the required property. +func (p *LogicalIndexScan) MatchIndexProp(prop *property.PhysicalProperty) (match bool) { + if prop.IsEmpty() { + return true + } + if all, _ := prop.AllSameOrder(); !all { + return false + } + for i, col := range p.IdxCols { + if col.Equal(nil, prop.Items[0].Col) { + return matchIndicesProp(p.IdxCols[i:], p.IdxColLens[i:], prop.Items) + } else if i >= p.EqCondCount { + break + } + } + return false +} + +// getTablePath finds the TablePath from a group of accessPaths. +func getTablePath(paths []*util.AccessPath) *util.AccessPath { + for _, path := range paths { + if path.IsTablePath { + return path + } + } + return nil +} + +func (ds *DataSource) buildTableGather() LogicalPlan { + ts := LogicalTableScan{Source: ds, Handle: ds.getHandleCol()}.Init(ds.ctx) + ts.SetSchema(ds.Schema()) + sg := TiKVSingleGather{Source: ds, IsIndexGather: false}.Init(ds.ctx) + sg.SetSchema(ds.Schema()) + sg.SetChildren(ts) + return sg +} + +func (ds *DataSource) buildIndexGather(path *util.AccessPath) LogicalPlan { + is := LogicalIndexScan{ + Source: ds, + IsDoubleRead: false, + Index: path.Index, + FullIdxCols: path.FullIdxCols, + FullIdxColLens: path.FullIdxColLens, + IdxCols: path.IdxCols, + IdxColLens: path.IdxColLens, + }.Init(ds.ctx) + + is.Columns = make([]*model.ColumnInfo, len(ds.Columns)) + copy(is.Columns, ds.Columns) + is.SetSchema(ds.Schema()) + is.IdxCols, is.IdxColLens = expression.IndexInfo2PrefixCols(is.Columns, is.schema.Columns, is.Index) + + sg := TiKVSingleGather{ + Source: ds, + IsIndexGather: true, + Index: path.Index, + }.Init(ds.ctx) + sg.SetSchema(ds.Schema()) + sg.SetChildren(is) + return sg +} + +// Convert2Gathers builds logical TiKVSingleGathers from DataSource. +func (ds *DataSource) Convert2Gathers() (gathers []LogicalPlan) { + tg := ds.buildTableGather() + gathers = append(gathers, tg) + for _, path := range ds.possibleAccessPaths { + if !path.IsTablePath { + path.FullIdxCols, path.FullIdxColLens = expression.IndexInfo2Cols(ds.Columns, ds.schema.Columns, path.Index) + path.IdxCols, path.IdxColLens = expression.IndexInfo2PrefixCols(ds.Columns, ds.schema.Columns, path.Index) + // If index columns can cover all of the needed columns, we can use a IndexGather + IndexScan. + if isCoveringIndex(ds.schema.Columns, path.FullIdxCols, path.FullIdxColLens, ds.tableInfo.PKIsHandle) { + gathers = append(gathers, ds.buildIndexGather(path)) + } + // TODO: If index columns can not cover the schema, use IndexLookUpGather. + } + } + return gathers +} + +// deriveTablePathStats will fulfill the information that the AccessPath need. +// And it will check whether the primary key is covered only by point query. +func (ds *DataSource) deriveTablePathStats(path *util.AccessPath, conds []expression.Expression) (bool, error) { + var err error + sc := ds.ctx.GetSessionVars().StmtCtx + path.CountAfterAccess = float64(ds.statisticTable.Count) + path.TableFilters = conds + var pkCol *expression.Column + columnLen := len(ds.schema.Columns) + isUnsigned := false + if ds.tableInfo.PKIsHandle { + if pkColInfo := ds.tableInfo.GetPkColInfo(); pkColInfo != nil { + isUnsigned = mysql.HasUnsignedFlag(pkColInfo.Flag) + pkCol = expression.ColInfo2Col(ds.schema.Columns, pkColInfo) + } + } else if columnLen > 0 && ds.schema.Columns[columnLen-1].ID == model.ExtraHandleID { + pkCol = ds.schema.Columns[columnLen-1] + } + if pkCol == nil { + path.Ranges = ranger.FullIntRange(isUnsigned) + return false, nil + } + + path.Ranges = ranger.FullIntRange(isUnsigned) + if len(conds) == 0 { + return false, nil + } + path.AccessConds, path.TableFilters = ranger.DetachCondsForColumn(ds.ctx, conds, pkCol) + path.Ranges, err = ranger.BuildTableRange(path.AccessConds, sc, pkCol.RetType) + if err != nil { + return false, err + } + path.CountAfterAccess, err = ds.statisticTable.GetRowCountByIntColumnRanges(sc, pkCol.ID, path.Ranges) + // If the `CountAfterAccess` is less than `stats.RowCount`, there must be some inconsistent stats info. + // We prefer the `stats.RowCount` because it could use more stats info to calculate the selectivity. + if path.CountAfterAccess < ds.stats.RowCount { + path.CountAfterAccess = math.Min(ds.stats.RowCount/selectionFactor, float64(ds.statisticTable.Count)) + } + // Check whether the primary key is covered by point query. + noIntervalRange := true + for _, ran := range path.Ranges { + if !ran.IsPoint(sc) { + noIntervalRange = false + break + } + } + return noIntervalRange, err +} + +func (ds *DataSource) fillIndexPath(path *util.AccessPath, conds []expression.Expression) error { + sc := ds.ctx.GetSessionVars().StmtCtx + path.Ranges = ranger.FullRange() + path.CountAfterAccess = float64(ds.statisticTable.Count) + path.IdxCols, path.IdxColLens = expression.IndexInfo2PrefixCols(ds.Columns, ds.schema.Columns, path.Index) + path.FullIdxCols, path.FullIdxColLens = expression.IndexInfo2Cols(ds.Columns, ds.schema.Columns, path.Index) + if !path.Index.Unique && !path.Index.Primary && len(path.Index.Columns) == len(path.IdxCols) { + handleCol := ds.getPKIsHandleCol() + if handleCol != nil && !mysql.HasUnsignedFlag(handleCol.RetType.Flag) { + path.IdxCols = append(path.IdxCols, handleCol) + path.IdxColLens = append(path.IdxColLens, types.UnspecifiedLength) + } + } + if len(path.IdxCols) != 0 { + res, err := ranger.DetachCondAndBuildRangeForIndex(ds.ctx, conds, path.IdxCols, path.IdxColLens) + if err != nil { + return err + } + path.Ranges = res.Ranges + path.AccessConds = res.AccessConds + path.TableFilters = res.RemainedConds + path.EqCondCount = res.EqCondCount + path.EqOrInCondCount = res.EqOrInCount + path.IsDNFCond = res.IsDNFCond + path.CountAfterAccess, err = ds.tableStats.HistColl.GetRowCountByIndexRanges(sc, path.Index.ID, path.Ranges) + if err != nil { + return err + } + } else { + path.TableFilters = conds + } + return nil +} + +// deriveIndexPathStats will fulfill the information that the AccessPath need. +// And it will check whether this index is full matched by point query. We will use this check to +// determine whether we remove other paths or not. +// conds is the conditions used to generate the DetachRangeResult for path. +func (ds *DataSource) deriveIndexPathStats(path *util.AccessPath) bool { + sc := ds.ctx.GetSessionVars().StmtCtx + if path.EqOrInCondCount == len(path.AccessConds) { + accesses, remained := path.SplitAccessCondFromFilters(path.EqOrInCondCount) + path.AccessConds = append(path.AccessConds, accesses...) + path.TableFilters = remained + if len(accesses) > 0 && ds.statisticTable.Pseudo { + path.CountAfterAccess = ds.statisticTable.PseudoAvgCountPerValue() + } else { + selectivity := path.CountAfterAccess / float64(ds.statisticTable.Count) + for i := range accesses { + col := path.IdxCols[path.EqOrInCondCount+i] + ndv := ds.getColumnNDV(col.ID) + ndv *= selectivity + if ndv < 1 { + ndv = 1.0 + } + path.CountAfterAccess = path.CountAfterAccess / ndv + } + } + } + path.IndexFilters, path.TableFilters = splitIndexFilterConditions(path.TableFilters, path.FullIdxCols, path.FullIdxColLens, ds.tableInfo) + // If the `CountAfterAccess` is less than `stats.RowCount`, there must be some inconsistent stats info. + // We prefer the `stats.RowCount` because it could use more stats info to calculate the selectivity. + if path.CountAfterAccess < ds.stats.RowCount { + path.CountAfterAccess = math.Min(ds.stats.RowCount/selectionFactor, float64(ds.statisticTable.Count)) + } + if path.IndexFilters != nil { + selectivity, err := ds.tableStats.HistColl.Selectivity(ds.ctx, path.IndexFilters, nil) + if err != nil { + logutil.BgLogger().Debug("calculate selectivity failed, use selection factor", zap.Error(err)) + selectivity = selectionFactor + } + path.CountAfterIndex = math.Max(path.CountAfterAccess*selectivity, ds.stats.RowCount) + } + // Check whether there's only point query. + noIntervalRanges := true + haveNullVal := false + for _, ran := range path.Ranges { + // Not point or the not full matched. + if !ran.IsPoint(sc) || len(ran.HighVal) != len(path.Index.Columns) { + noIntervalRanges = false + break + } + // Check whether there's null value. + for i := 0; i < len(path.Index.Columns); i++ { + if ran.HighVal[i].IsNull() { + haveNullVal = true + break + } + } + if haveNullVal { + break + } + } + return noIntervalRanges && !haveNullVal +} + +func getPKIsHandleColFromSchema(cols []*model.ColumnInfo, schema *expression.Schema, pkIsHandle bool) *expression.Column { + if !pkIsHandle { + // If the PKIsHandle is false, return the ExtraHandleColumn. + for i, col := range cols { + if col.ID == model.ExtraHandleID { + return schema.Columns[i] + } + } + return nil + } + for i, col := range cols { + if mysql.HasPriKeyFlag(col.Flag) { + return schema.Columns[i] + } + } + return nil +} + +func (ds *DataSource) getPKIsHandleCol() *expression.Column { + return getPKIsHandleColFromSchema(ds.Columns, ds.schema, ds.tableInfo.PKIsHandle) +} + +func (p *LogicalIndexScan) getPKIsHandleCol(schema *expression.Schema) *expression.Column { + // We cannot use p.Source.getPKIsHandleCol() here, + // Because we may re-prune p.Columns and p.schema during the transformation. + // That will make p.Columns different from p.Source.Columns. + return getPKIsHandleColFromSchema(p.Columns, schema, p.Source.tableInfo.PKIsHandle) +} + +func (ds *DataSource) getHandleCol() *expression.Column { + if ds.handleCol != nil { + return ds.handleCol + } + + if !ds.tableInfo.PKIsHandle { + ds.handleCol = ds.newExtraHandleSchemaCol() + return ds.handleCol + } + + for i, col := range ds.Columns { + if mysql.HasPriKeyFlag(col.Flag) { + ds.handleCol = ds.schema.Columns[i] + break + } + } + + return ds.handleCol +} + +// TableInfo returns the *TableInfo of data source. +func (ds *DataSource) TableInfo() *model.TableInfo { + return ds.tableInfo +} + +// LogicalSort stands for the order by plan. +type LogicalSort struct { + baseLogicalPlan + + ByItems []*ByItems +} + +// LogicalTopN represents a top-n plan. +type LogicalTopN struct { + baseLogicalPlan + + ByItems []*ByItems + Offset uint64 + Count uint64 +} + +// isLimit checks if TopN is a limit plan. +func (lt *LogicalTopN) isLimit() bool { + return len(lt.ByItems) == 0 +} + +// LogicalLimit represents offset and limit plan. +type LogicalLimit struct { + baseLogicalPlan + + Offset uint64 + Count uint64 +} + +// ShowContents stores the contents for the `SHOW` statement. +type ShowContents struct { + Tp ast.ShowStmtType // Databases/Tables/Columns/.... + DBName string + Table *ast.TableName // Used for showing columns. + Column *ast.ColumnName // Used for `desc table column`. + IndexName model.CIStr + Flag int // Some flag parsed from sql, such as FULL. + Full bool + IfNotExists bool // Used for `show create database if not exists`. + + GlobalScope bool // Used by show variables. +} + +// LogicalShow represents a show plan. +type LogicalShow struct { + logicalSchemaProducer + ShowContents +} + +// LogicalShowDDLJobs is for showing DDL job list. +type LogicalShowDDLJobs struct { + logicalSchemaProducer + + JobNumber int64 +} diff --git a/planner/core/mock.go b/planner/core/mock.go new file mode 100644 index 0000000..82a3eaf --- /dev/null +++ b/planner/core/mock.go @@ -0,0 +1,344 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/mock" +) + +func newLongType() types.FieldType { + return *(types.NewFieldType(mysql.TypeLong)) +} + +func newStringType() types.FieldType { + ft := types.NewFieldType(mysql.TypeVarchar) + ft.Charset, ft.Collate = types.DefaultCharsetForType(mysql.TypeVarchar) + return *ft +} + +func newDateType() types.FieldType { + ft := types.NewFieldType(mysql.TypeDate) + return *ft +} + +// MockSignedTable is only used for plan related tests. +func MockSignedTable() *model.TableInfo { + // column: a, b, c, d, e, c_str, d_str, e_str, f, g + // PK: a + // indices: c_d_e, e, f, g, f_g, c_d_e_str, c_d_e_str_prefix + indices := []*model.IndexInfo{ + { + Name: model.NewCIStr("c_d_e"), + Columns: []*model.IndexColumn{ + { + Name: model.NewCIStr("c"), + Length: types.UnspecifiedLength, + Offset: 2, + }, + { + Name: model.NewCIStr("d"), + Length: types.UnspecifiedLength, + Offset: 3, + }, + { + Name: model.NewCIStr("e"), + Length: types.UnspecifiedLength, + Offset: 4, + }, + }, + State: model.StatePublic, + Unique: true, + }, + { + Name: model.NewCIStr("e"), + Columns: []*model.IndexColumn{ + { + Name: model.NewCIStr("e"), + Length: types.UnspecifiedLength, + Offset: 4, + }, + }, + State: model.StateWriteOnly, + Unique: true, + }, + { + Name: model.NewCIStr("f"), + Columns: []*model.IndexColumn{ + { + Name: model.NewCIStr("f"), + Length: types.UnspecifiedLength, + Offset: 8, + }, + }, + State: model.StatePublic, + Unique: true, + }, + { + Name: model.NewCIStr("g"), + Columns: []*model.IndexColumn{ + { + Name: model.NewCIStr("g"), + Length: types.UnspecifiedLength, + Offset: 9, + }, + }, + State: model.StatePublic, + }, + { + Name: model.NewCIStr("f_g"), + Columns: []*model.IndexColumn{ + { + Name: model.NewCIStr("f"), + Length: types.UnspecifiedLength, + Offset: 8, + }, + { + Name: model.NewCIStr("g"), + Length: types.UnspecifiedLength, + Offset: 9, + }, + }, + State: model.StatePublic, + Unique: true, + }, + { + Name: model.NewCIStr("c_d_e_str"), + Columns: []*model.IndexColumn{ + { + Name: model.NewCIStr("c_str"), + Length: types.UnspecifiedLength, + Offset: 5, + }, + { + Name: model.NewCIStr("d_str"), + Length: types.UnspecifiedLength, + Offset: 6, + }, + { + Name: model.NewCIStr("e_str"), + Length: types.UnspecifiedLength, + Offset: 7, + }, + }, + State: model.StatePublic, + }, + { + Name: model.NewCIStr("e_d_c_str_prefix"), + Columns: []*model.IndexColumn{ + { + Name: model.NewCIStr("e_str"), + Length: types.UnspecifiedLength, + Offset: 7, + }, + { + Name: model.NewCIStr("d_str"), + Length: types.UnspecifiedLength, + Offset: 6, + }, + { + Name: model.NewCIStr("c_str"), + Length: 10, + Offset: 5, + }, + }, + State: model.StatePublic, + }, + } + pkColumn := &model.ColumnInfo{ + State: model.StatePublic, + Offset: 0, + Name: model.NewCIStr("a"), + FieldType: newLongType(), + ID: 1, + } + col0 := &model.ColumnInfo{ + State: model.StatePublic, + Offset: 1, + Name: model.NewCIStr("b"), + FieldType: newLongType(), + ID: 2, + } + col1 := &model.ColumnInfo{ + State: model.StatePublic, + Offset: 2, + Name: model.NewCIStr("c"), + FieldType: newLongType(), + ID: 3, + } + col2 := &model.ColumnInfo{ + State: model.StatePublic, + Offset: 3, + Name: model.NewCIStr("d"), + FieldType: newLongType(), + ID: 4, + } + col3 := &model.ColumnInfo{ + State: model.StatePublic, + Offset: 4, + Name: model.NewCIStr("e"), + FieldType: newLongType(), + ID: 5, + } + colStr1 := &model.ColumnInfo{ + State: model.StatePublic, + Offset: 5, + Name: model.NewCIStr("c_str"), + FieldType: newStringType(), + ID: 6, + } + colStr2 := &model.ColumnInfo{ + State: model.StatePublic, + Offset: 6, + Name: model.NewCIStr("d_str"), + FieldType: newStringType(), + ID: 7, + } + colStr3 := &model.ColumnInfo{ + State: model.StatePublic, + Offset: 7, + Name: model.NewCIStr("e_str"), + FieldType: newStringType(), + ID: 8, + } + col4 := &model.ColumnInfo{ + State: model.StatePublic, + Offset: 8, + Name: model.NewCIStr("f"), + FieldType: newLongType(), + ID: 9, + } + col5 := &model.ColumnInfo{ + State: model.StatePublic, + Offset: 9, + Name: model.NewCIStr("g"), + FieldType: newLongType(), + ID: 10, + } + col6 := &model.ColumnInfo{ + State: model.StatePublic, + Offset: 10, + Name: model.NewCIStr("h"), + FieldType: newLongType(), + ID: 11, + } + col7 := &model.ColumnInfo{ + State: model.StatePublic, + Offset: 11, + Name: model.NewCIStr("i_date"), + FieldType: newDateType(), + ID: 12, + } + pkColumn.Flag = mysql.PriKeyFlag | mysql.NotNullFlag + // Column 'b', 'c', 'd', 'f', 'g' is not null. + col0.Flag = mysql.NotNullFlag + col1.Flag = mysql.NotNullFlag + col2.Flag = mysql.NotNullFlag + col4.Flag = mysql.NotNullFlag + col5.Flag = mysql.NotNullFlag + col6.Flag = mysql.NoDefaultValueFlag + table := &model.TableInfo{ + Columns: []*model.ColumnInfo{pkColumn, col0, col1, col2, col3, colStr1, colStr2, colStr3, col4, col5, col6, col7}, + Indices: indices, + Name: model.NewCIStr("t"), + PKIsHandle: true, + } + return table +} + +// MockUnsignedTable is only used for plan related tests. +func MockUnsignedTable() *model.TableInfo { + // column: a, b + // PK: a + // indeices: b + indices := []*model.IndexInfo{ + { + Name: model.NewCIStr("b"), + Columns: []*model.IndexColumn{ + { + Name: model.NewCIStr("b"), + Length: types.UnspecifiedLength, + Offset: 1, + }, + }, + State: model.StatePublic, + Unique: true, + }, + { + Name: model.NewCIStr("b_c"), + Columns: []*model.IndexColumn{ + { + Name: model.NewCIStr("b"), + Length: types.UnspecifiedLength, + Offset: 1, + }, + { + Name: model.NewCIStr("c"), + Length: types.UnspecifiedLength, + Offset: 2, + }, + }, + State: model.StatePublic, + }, + } + pkColumn := &model.ColumnInfo{ + State: model.StatePublic, + Offset: 0, + Name: model.NewCIStr("a"), + FieldType: newLongType(), + ID: 1, + } + col0 := &model.ColumnInfo{ + State: model.StatePublic, + Offset: 1, + Name: model.NewCIStr("b"), + FieldType: newLongType(), + ID: 2, + } + col1 := &model.ColumnInfo{ + State: model.StatePublic, + Offset: 2, + Name: model.NewCIStr("c"), + FieldType: newLongType(), + ID: 3, + } + pkColumn.Flag = mysql.PriKeyFlag | mysql.NotNullFlag | mysql.UnsignedFlag + // Column 'b', 'c', 'd', 'f', 'g' is not null. + col0.Flag = mysql.NotNullFlag + col1.Flag = mysql.UnsignedFlag + table := &model.TableInfo{ + Columns: []*model.ColumnInfo{pkColumn, col0, col1}, + Indices: indices, + Name: model.NewCIStr("t2"), + PKIsHandle: true, + } + return table +} + +// MockContext is only used for plan related tests. +func MockContext() sessionctx.Context { + ctx := mock.NewContext() + ctx.Store = &mock.Store{ + Client: &mock.Client{}, + } + ctx.GetSessionVars().CurrentDB = "test" + do := &domain.Domain{} + do.CreateStatsHandle(ctx) + domain.BindDomain(ctx, do) + return ctx +} diff --git a/planner/core/optimizer.go b/planner/core/optimizer.go new file mode 100644 index 0000000..a5f109d --- /dev/null +++ b/planner/core/optimizer.go @@ -0,0 +1,138 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "context" + "math" + + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/planner/property" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" +) + +// OptimizeAstNode optimizes the query to a physical plan directly. +var OptimizeAstNode func(ctx context.Context, sctx sessionctx.Context, node ast.Node, is infoschema.InfoSchema) (Plan, types.NameSlice, error) + +const ( + flagPrunColumns uint64 = 1 << iota + flagBuildKeyInfo + flagEliminateAgg + flagEliminateProjection + flagMaxMinEliminate + flagPredicatePushDown + flagEliminateOuterJoin + flagPushDownAgg + flagPushDownTopN + flagJoinReOrder +) + +var optRuleList = []logicalOptRule{ + &columnPruner{}, + &buildKeySolver{}, + &aggregationEliminator{}, + &projectionEliminator{}, + &maxMinEliminator{}, + &ppdSolver{}, + &outerJoinEliminator{}, + &aggregationPushDownSolver{}, + &pushDownTopNOptimizer{}, + &joinReOrderSolver{}, +} + +// logicalOptRule means a logical optimizing rule, which contains decorrelate, ppd, column pruning, etc. +type logicalOptRule interface { + optimize(context.Context, LogicalPlan) (LogicalPlan, error) + name() string +} + +// BuildLogicalPlan used to build logical plan from ast.Node. +func BuildLogicalPlan(ctx context.Context, sctx sessionctx.Context, node ast.Node, is infoschema.InfoSchema) (Plan, types.NameSlice, error) { + sctx.GetSessionVars().PlanID = 0 + sctx.GetSessionVars().PlanColumnID = 0 + builder := NewPlanBuilder(sctx, is) + p, err := builder.Build(ctx, node) + if err != nil { + return nil, nil, err + } + return p, p.OutputNames(), err +} + +// DoOptimize optimizes a logical plan to a physical plan. +func DoOptimize(ctx context.Context, flag uint64, logic LogicalPlan) (PhysicalPlan, error) { + logic, err := logicalOptimize(ctx, flag, logic) + if err != nil { + return nil, err + } + physical, err := physicalOptimize(logic) + if err != nil { + return nil, err + } + finalPlan := postOptimize(physical) + return finalPlan, nil +} + +func postOptimize(plan PhysicalPlan) PhysicalPlan { + plan = eliminatePhysicalProjection(plan) + plan = injectExtraProjection(plan) + return plan +} + +func logicalOptimize(ctx context.Context, flag uint64, logic LogicalPlan) (LogicalPlan, error) { + var err error + for i, rule := range optRuleList { + // The order of flags is same as the order of optRule in the list. + // We use a bitmask to record which opt rules should be used. If the i-th bit is 1, it means we should + // apply i-th optimizing rule. + if flag&(1< 0 + }) + c.Assert(core.ToString(p), Equals, output[i].Best, comment) + warnings := se.GetSessionVars().StmtCtx.GetWarnings() + if output[i].HasWarn { + c.Assert(warnings, HasLen, 1, comment) + } else { + c.Assert(warnings, HasLen, 0, comment) + } + } +} diff --git a/planner/core/physical_plans.go b/planner/core/physical_plans.go new file mode 100644 index 0000000..dbe4fb6 --- /dev/null +++ b/planner/core/physical_plans.go @@ -0,0 +1,367 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/expression/aggregation" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/planner/property" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/ranger" +) + +var ( + _ PhysicalPlan = &PhysicalSelection{} + _ PhysicalPlan = &PhysicalProjection{} + _ PhysicalPlan = &PhysicalTopN{} + _ PhysicalPlan = &PhysicalTableDual{} + _ PhysicalPlan = &PhysicalSort{} + _ PhysicalPlan = &NominalSort{} + _ PhysicalPlan = &PhysicalLimit{} + _ PhysicalPlan = &PhysicalIndexScan{} + _ PhysicalPlan = &PhysicalTableScan{} + _ PhysicalPlan = &PhysicalTableReader{} + _ PhysicalPlan = &PhysicalIndexReader{} + _ PhysicalPlan = &PhysicalIndexLookUpReader{} + _ PhysicalPlan = &PhysicalHashAgg{} + _ PhysicalPlan = &PhysicalHashJoin{} + _ PhysicalPlan = &PhysicalMergeJoin{} + _ PhysicalPlan = &PhysicalUnionScan{} +) + +// PhysicalTableReader is the table reader in tidb. +type PhysicalTableReader struct { + physicalSchemaProducer + + // TablePlans flats the tablePlan to construct executor pb. + TablePlans []PhysicalPlan + tablePlan PhysicalPlan +} + +// GetPhysicalTableReader returns PhysicalTableReader for logical TiKVSingleGather. +func (sg *TiKVSingleGather) GetPhysicalTableReader(schema *expression.Schema, stats *property.StatsInfo, props ...*property.PhysicalProperty) *PhysicalTableReader { + reader := PhysicalTableReader{}.Init(sg.ctx) + reader.stats = stats + reader.SetSchema(schema) + reader.childrenReqProps = props + return reader +} + +// GetPhysicalIndexReader returns PhysicalIndexReader for logical TiKVSingleGather. +func (sg *TiKVSingleGather) GetPhysicalIndexReader(schema *expression.Schema, stats *property.StatsInfo, props ...*property.PhysicalProperty) *PhysicalIndexReader { + reader := PhysicalIndexReader{}.Init(sg.ctx) + reader.stats = stats + reader.SetSchema(schema) + reader.childrenReqProps = props + return reader +} + +// SetChildren overrides PhysicalPlan SetChildren interface. +func (p *PhysicalTableReader) SetChildren(children ...PhysicalPlan) { + p.tablePlan = children[0] + p.TablePlans = flattenPushDownPlan(p.tablePlan) +} + +// PhysicalIndexReader is the index reader in tidb. +type PhysicalIndexReader struct { + physicalSchemaProducer + + // IndexPlans flats the indexPlan to construct executor pb. + IndexPlans []PhysicalPlan + indexPlan PhysicalPlan + + // OutputColumns represents the columns that index reader should return. + OutputColumns []*expression.Column +} + +// SetSchema overrides PhysicalPlan SetSchema interface. +func (p *PhysicalIndexReader) SetSchema(_ *expression.Schema) { + if p.indexPlan != nil { + p.IndexPlans = flattenPushDownPlan(p.indexPlan) + switch p.indexPlan.(type) { + case *PhysicalHashAgg: + p.schema = p.indexPlan.Schema() + default: + is := p.IndexPlans[0].(*PhysicalIndexScan) + p.schema = is.dataSourceSchema + } + p.OutputColumns = p.schema.Clone().Columns + } +} + +// SetChildren overrides PhysicalPlan SetChildren interface. +func (p *PhysicalIndexReader) SetChildren(children ...PhysicalPlan) { + p.indexPlan = children[0] + p.SetSchema(nil) +} + +// PhysicalIndexLookUpReader is the index look up reader in tidb. It's used in case of double reading. +type PhysicalIndexLookUpReader struct { + physicalSchemaProducer + + // IndexPlans flats the indexPlan to construct executor pb. + IndexPlans []PhysicalPlan + // TablePlans flats the tablePlan to construct executor pb. + TablePlans []PhysicalPlan + indexPlan PhysicalPlan + tablePlan PhysicalPlan + + ExtraHandleCol *expression.Column +} + +// PhysicalIndexScan represents an index scan plan. +type PhysicalIndexScan struct { + physicalSchemaProducer + + // AccessCondition is used to calculate range. + AccessCondition []expression.Expression + + Table *model.TableInfo + Index *model.IndexInfo + IdxCols []*expression.Column + IdxColLens []int + Ranges []*ranger.Range + Columns []*model.ColumnInfo + DBName model.CIStr + + TableAsName *model.CIStr + + // dataSourceSchema is the original schema of DataSource. The schema of index scan in KV and index reader in TiDB + // will be different. The schema of index scan will decode all columns of index but the TiDB only need some of them. + dataSourceSchema *expression.Schema + + Desc bool + KeepOrder bool + // DoubleRead means if the index executor will read kv two times. + // If the query requires the columns that don't belong to index, DoubleRead will be true. + DoubleRead bool +} + +// PhysicalMemTable reads memory table. +type PhysicalMemTable struct { + physicalSchemaProducer + + DBName model.CIStr + Table *model.TableInfo + Columns []*model.ColumnInfo +} + +// PhysicalTableScan represents a table scan plan. +type PhysicalTableScan struct { + physicalSchemaProducer + + // AccessCondition is used to calculate range. + AccessCondition []expression.Expression + filterCondition []expression.Expression + + Table *model.TableInfo + Columns []*model.ColumnInfo + DBName model.CIStr + Ranges []*ranger.Range + pkCol *expression.Column + + TableAsName *model.CIStr + + HandleIdx int + + // KeepOrder is true, if sort data by scanning pkcol, + KeepOrder bool + Desc bool +} + +// PhysicalProjection is the physical operator of projection. +type PhysicalProjection struct { + physicalSchemaProducer + + Exprs []expression.Expression +} + +// PhysicalTopN is the physical operator of topN. +type PhysicalTopN struct { + basePhysicalPlan + + ByItems []*ByItems + Offset uint64 + Count uint64 +} + +type basePhysicalJoin struct { + physicalSchemaProducer + + JoinType JoinType + + LeftConditions expression.CNFExprs + RightConditions expression.CNFExprs + OtherConditions expression.CNFExprs + + InnerChildIdx int + OuterJoinKeys []*expression.Column + InnerJoinKeys []*expression.Column + LeftJoinKeys []*expression.Column + RightJoinKeys []*expression.Column + DefaultValues []types.Datum +} + +// PhysicalHashJoin represents hash join implementation of LogicalJoin. +type PhysicalHashJoin struct { + basePhysicalJoin + + Concurrency uint + EqualConditions []*expression.ScalarFunction +} + +// NewPhysicalHashJoin creates a new PhysicalHashJoin from LogicalJoin. +func NewPhysicalHashJoin(p *LogicalJoin, innerIdx int, newStats *property.StatsInfo, prop ...*property.PhysicalProperty) *PhysicalHashJoin { + baseJoin := basePhysicalJoin{ + LeftConditions: p.LeftConditions, + RightConditions: p.RightConditions, + OtherConditions: p.OtherConditions, + LeftJoinKeys: p.LeftJoinKeys, + RightJoinKeys: p.RightJoinKeys, + JoinType: p.JoinType, + DefaultValues: p.DefaultValues, + InnerChildIdx: innerIdx, + } + hashJoin := PhysicalHashJoin{ + basePhysicalJoin: baseJoin, + EqualConditions: p.EqualConditions, + Concurrency: uint(p.ctx.GetSessionVars().HashJoinConcurrency), + }.Init(p.ctx, newStats, prop...) + return hashJoin +} + +// PhysicalMergeJoin represents merge join implementation of LogicalJoin. +type PhysicalMergeJoin struct { + basePhysicalJoin + + CompareFuncs []expression.CompareFunc +} + +// PhysicalLimit is the physical operator of Limit. +type PhysicalLimit struct { + basePhysicalPlan + + Offset uint64 + Count uint64 +} + +type basePhysicalAgg struct { + physicalSchemaProducer + + AggFuncs []*aggregation.AggFuncDesc + GroupByItems []expression.Expression +} + +func (p *basePhysicalAgg) getAggFuncCostFactor() (factor float64) { + factor = 0.0 + for _, agg := range p.AggFuncs { + if fac, ok := aggFuncFactor[agg.Name]; ok { + factor += fac + } else { + factor += aggFuncFactor["default"] + } + } + if factor == 0 { + factor = 1.0 + } + return +} + +// PhysicalHashAgg is hash operator of aggregate. +type PhysicalHashAgg struct { + basePhysicalAgg +} + +// NewPhysicalHashAgg creates a new PhysicalHashAgg from a LogicalAggregation. +func NewPhysicalHashAgg(la *LogicalAggregation, newStats *property.StatsInfo, prop *property.PhysicalProperty) *PhysicalHashAgg { + agg := basePhysicalAgg{ + GroupByItems: la.GroupByItems, + AggFuncs: la.AggFuncs, + }.initForHash(la.ctx, newStats, prop) + return agg +} + +// PhysicalSort is the physical operator of sort, which implements a memory sort. +type PhysicalSort struct { + basePhysicalPlan + + ByItems []*ByItems +} + +// NominalSort asks sort properties for its child. It is a fake operator that will not +// appear in final physical operator tree. +type NominalSort struct { + basePhysicalPlan +} + +// PhysicalUnionScan represents a union scan operator. +type PhysicalUnionScan struct { + basePhysicalPlan + + Conditions []expression.Expression + + HandleCol *expression.Column +} + +// IsPointGetByUniqueKey checks whether is a point get by unique key. +func (p *PhysicalIndexScan) IsPointGetByUniqueKey(sc *stmtctx.StatementContext) bool { + return len(p.Ranges) == 1 && + p.Index.Unique && + len(p.Ranges[0].LowVal) == len(p.Index.Columns) && + p.Ranges[0].IsPoint(sc) +} + +// PhysicalSelection represents a filter. +type PhysicalSelection struct { + basePhysicalPlan + + Conditions []expression.Expression +} + +// PhysicalTableDual is the physical operator of dual. +type PhysicalTableDual struct { + physicalSchemaProducer + + RowCount int + + // names is used for OutputNames() method. Dual may be inited when building point get plan. + // So it needs to hold names for itself. + names []*types.FieldName +} + +// OutputNames returns the outputting names of each column. +func (p *PhysicalTableDual) OutputNames() types.NameSlice { + return p.names +} + +// SetOutputNames sets the outputting name by the given slice. +func (p *PhysicalTableDual) SetOutputNames(names types.NameSlice) { + p.names = names +} + +// PhysicalShow represents a show plan. +type PhysicalShow struct { + physicalSchemaProducer + + ShowContents +} + +// PhysicalShowDDLJobs is for showing DDL job list. +type PhysicalShowDDLJobs struct { + physicalSchemaProducer + + JobNumber int64 +} diff --git a/planner/core/plan.go b/planner/core/plan.go new file mode 100644 index 0000000..7cb6972 --- /dev/null +++ b/planner/core/plan.go @@ -0,0 +1,354 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "fmt" + "math" + "strconv" + + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/planner/property" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/stringutil" + "github.com/pingcap/tipb/go-tipb" +) + +// Plan is the description of an execution flow. +// It is created from ast.Node first, then optimized by the optimizer, +// finally used by the executor to create a Cursor which executes the statement. +type Plan interface { + // Get the schema. + Schema() *expression.Schema + + // Get the ID. + ID() int + + // TP get the plan type. + TP() string + + // Get the ID in explain statement + ExplainID() fmt.Stringer + + // ExplainInfo returns operator information to be explained. + ExplainInfo() string + + // replaceExprColumns replace all the column reference in the plan's expression node. + replaceExprColumns(replace map[string]*expression.Column) + + SCtx() sessionctx.Context + + // property.StatsInfo will return the property.StatsInfo for this plan. + statsInfo() *property.StatsInfo + + // OutputNames returns the outputting names of each column. + OutputNames() types.NameSlice + + // SetOutputNames sets the outputting name by the given slice. + SetOutputNames(names types.NameSlice) +} + +func enforceProperty(p *property.PhysicalProperty, tsk task, ctx sessionctx.Context) task { + if p.IsEmpty() || tsk.plan() == nil { + return tsk + } + tsk = finishCopTask(ctx, tsk) + sortReqProp := &property.PhysicalProperty{TaskTp: property.RootTaskType, Items: p.Items, ExpectedCnt: math.MaxFloat64} + sort := PhysicalSort{ByItems: make([]*ByItems, 0, len(p.Items))}.Init(ctx, tsk.plan().statsInfo(), sortReqProp) + for _, col := range p.Items { + sort.ByItems = append(sort.ByItems, &ByItems{col.Col, col.Desc}) + } + return sort.attach2Task(tsk) +} + +// LogicalPlan is a tree of logical operators. +// We can do a lot of logical optimizations to it, like predicate pushdown and column pruning. +type LogicalPlan interface { + Plan + + // PredicatePushDown pushes down the predicates in the where/on/having clauses as deeply as possible. + // It will accept a predicate that is an expression slice, and return the expressions that can't be pushed. + // Because it might change the root if the having clause exists, we need to return a plan that represents a new root. + PredicatePushDown([]expression.Expression) ([]expression.Expression, LogicalPlan) + + // PruneColumns prunes the unused columns. + PruneColumns([]*expression.Column) error + + // findBestTask converts the logical plan to the physical plan. It's a new interface. + // It is called recursively from the parent to the children to create the result physical plan. + // Some logical plans will convert the children to the physical plans in different ways, and return the one + // with the lowest cost. + findBestTask(prop *property.PhysicalProperty) (task, error) + + // BuildKeyInfo will collect the information of unique keys into schema. + // Because this method is also used in cascades planner, we cannot use + // things like `p.schema` or `p.children` inside it. We should use the `selfSchema` + // and `childSchema` instead. + BuildKeyInfo(selfSchema *expression.Schema, childSchema []*expression.Schema) + + // pushDownTopN will push down the topN or limit operator during logical optimization. + pushDownTopN(topN *LogicalTopN) LogicalPlan + + // recursiveDeriveStats derives statistic info between plans. + recursiveDeriveStats() (*property.StatsInfo, error) + + // DeriveStats derives statistic info for current plan node given child stats. + // We need selfSchema, childSchema here because it makes this method can be used in + // cascades planner, where LogicalPlan might not record its children or schema. + DeriveStats(childStats []*property.StatsInfo, selfSchema *expression.Schema, childSchema []*expression.Schema) (*property.StatsInfo, error) + + // PreparePossibleProperties is only used for join and aggregation. Like group by a,b,c, all permutation of (a,b,c) is + // valid, but the ordered indices in leaf plan is limited. So we can get all possible order properties by a pre-walking. + PreparePossibleProperties(schema *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column + + // exhaustPhysicalPlans generates all possible plans that can match the required property. + exhaustPhysicalPlans(*property.PhysicalProperty) []PhysicalPlan + + // Get all the children. + Children() []LogicalPlan + + // SetChildren sets the children for the plan. + SetChildren(...LogicalPlan) + + // SetChild sets the ith child for the plan. + SetChild(i int, child LogicalPlan) +} + +// PhysicalPlan is a tree of the physical operators. +type PhysicalPlan interface { + Plan + + // attach2Task makes the current physical plan as the father of task's physicalPlan and updates the cost of + // current task. If the child's task is cop task, some operator may close this task and return a new rootTask. + attach2Task(...task) task + + // ToPB converts physical plan to tipb executor. + ToPB(ctx sessionctx.Context) (*tipb.Executor, error) + + // getChildReqProps gets the required property by child index. + GetChildReqProps(idx int) *property.PhysicalProperty + + // StatsCount returns the count of property.StatsInfo for this plan. + StatsCount() float64 + + // Get all the children. + Children() []PhysicalPlan + + // SetChildren sets the children for the plan. + SetChildren(...PhysicalPlan) + + // SetChild sets the ith child for the plan. + SetChild(i int, child PhysicalPlan) + + // ResolveIndices resolves the indices for columns. After doing this, the columns can evaluate the rows by their indices. + ResolveIndices() error + + // Stats returns the StatsInfo of the plan. + Stats() *property.StatsInfo + + // ExplainNormalizedInfo returns operator normalized information for generating digest. + ExplainNormalizedInfo() string +} + +type baseLogicalPlan struct { + basePlan + + taskMap map[string]task + self LogicalPlan + children []LogicalPlan +} + +// ExplainInfo implements Plan interface. +func (p *baseLogicalPlan) ExplainInfo() string { + return "" +} + +type basePhysicalPlan struct { + basePlan + + childrenReqProps []*property.PhysicalProperty + self PhysicalPlan + children []PhysicalPlan +} + +// ExplainInfo implements Plan interface. +func (p *basePhysicalPlan) ExplainInfo() string { + return "" +} + +// ExplainInfo implements Plan interface. +func (p *basePhysicalPlan) ExplainNormalizedInfo() string { + return "" +} + +func (p *basePhysicalPlan) GetChildReqProps(idx int) *property.PhysicalProperty { + return p.childrenReqProps[idx] +} + +func (p *baseLogicalPlan) getTask(prop *property.PhysicalProperty) task { + key := prop.HashCode() + return p.taskMap[string(key)] +} + +func (p *baseLogicalPlan) storeTask(prop *property.PhysicalProperty, task task) { + key := prop.HashCode() + p.taskMap[string(key)] = task +} + +// BuildKeyInfo implements LogicalPlan BuildKeyInfo interface. +func (p *baseLogicalPlan) BuildKeyInfo(selfSchema *expression.Schema, childSchema []*expression.Schema) { +} + +// BuildKeyInfo implements LogicalPlan BuildKeyInfo interface. +func (p *logicalSchemaProducer) BuildKeyInfo(selfSchema *expression.Schema, childSchema []*expression.Schema) { + selfSchema.Keys = nil +} + +func newBasePlan(ctx sessionctx.Context, tp string) basePlan { + ctx.GetSessionVars().PlanID++ + id := ctx.GetSessionVars().PlanID + return basePlan{ + tp: tp, + id: id, + ctx: ctx, + } +} + +func newBaseLogicalPlan(ctx sessionctx.Context, tp string, self LogicalPlan) baseLogicalPlan { + return baseLogicalPlan{ + taskMap: make(map[string]task), + basePlan: newBasePlan(ctx, tp), + self: self, + } +} + +func newBasePhysicalPlan(ctx sessionctx.Context, tp string, self PhysicalPlan) basePhysicalPlan { + return basePhysicalPlan{ + basePlan: newBasePlan(ctx, tp), + self: self, + } +} + +// PruneColumns implements LogicalPlan interface. +func (p *baseLogicalPlan) PruneColumns(parentUsedCols []*expression.Column) error { + if len(p.children) == 0 { + return nil + } + return p.children[0].PruneColumns(parentUsedCols) +} + +// basePlan implements base Plan interface. +// Should be used as embedded struct in Plan implementations. +type basePlan struct { + tp string + id int + ctx sessionctx.Context + stats *property.StatsInfo +} + +// OutputNames returns the outputting names of each column. +func (p *basePlan) OutputNames() types.NameSlice { + return nil +} + +func (p *basePlan) SetOutputNames(names types.NameSlice) { +} + +func (p *basePlan) replaceExprColumns(replace map[string]*expression.Column) { +} + +// ID implements Plan ID interface. +func (p *basePlan) ID() int { + return p.id +} + +// property.StatsInfo implements the Plan interface. +func (p *basePlan) statsInfo() *property.StatsInfo { + return p.stats +} + +// ExplainInfo implements Plan interface. +func (p *basePlan) ExplainInfo() string { + return "N/A" +} + +func (p *basePlan) ExplainID() fmt.Stringer { + return stringutil.MemoizeStr(func() string { + return p.tp + "_" + strconv.Itoa(p.id) + }) +} + +// TP implements Plan interface. +func (p *basePlan) TP() string { + return p.tp +} + +// Stats implements Plan Stats interface. +func (p *basePlan) Stats() *property.StatsInfo { + return p.stats +} + +// Schema implements Plan Schema interface. +func (p *baseLogicalPlan) Schema() *expression.Schema { + return p.children[0].Schema() +} + +func (p *baseLogicalPlan) OutputNames() types.NameSlice { + return p.children[0].OutputNames() +} + +func (p *baseLogicalPlan) SetOutputNames(names types.NameSlice) { + p.children[0].SetOutputNames(names) +} + +// Schema implements Plan Schema interface. +func (p *basePhysicalPlan) Schema() *expression.Schema { + return p.children[0].Schema() +} + +// Children implements LogicalPlan Children interface. +func (p *baseLogicalPlan) Children() []LogicalPlan { + return p.children +} + +// Children implements PhysicalPlan Children interface. +func (p *basePhysicalPlan) Children() []PhysicalPlan { + return p.children +} + +// SetChildren implements LogicalPlan SetChildren interface. +func (p *baseLogicalPlan) SetChildren(children ...LogicalPlan) { + p.children = children +} + +// SetChildren implements PhysicalPlan SetChildren interface. +func (p *basePhysicalPlan) SetChildren(children ...PhysicalPlan) { + p.children = children +} + +// SetChild implements LogicalPlan SetChild interface. +func (p *baseLogicalPlan) SetChild(i int, child LogicalPlan) { + p.children[i] = child +} + +// SetChild implements PhysicalPlan SetChild interface. +func (p *basePhysicalPlan) SetChild(i int, child PhysicalPlan) { + p.children[i] = child +} + +// Context implements Plan Context interface. +func (p *basePlan) SCtx() sessionctx.Context { + return p.ctx +} diff --git a/planner/core/plan_to_pb.go b/planner/core/plan_to_pb.go new file mode 100644 index 0000000..ec2c892 --- /dev/null +++ b/planner/core/plan_to_pb.go @@ -0,0 +1,156 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "github.com/pingcap/errors" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/expression/aggregation" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/util/ranger" + "github.com/pingcap/tipb/go-tipb" +) + +// ToPB implements PhysicalPlan ToPB interface. +func (p *basePhysicalPlan) ToPB(_ sessionctx.Context) (*tipb.Executor, error) { + return nil, errors.Errorf("plan %s fails converts to PB", p.basePlan.ExplainID()) +} + +// ToPB implements PhysicalPlan ToPB interface. +func (p *PhysicalHashAgg) ToPB(ctx sessionctx.Context) (*tipb.Executor, error) { + sc := ctx.GetSessionVars().StmtCtx + client := ctx.GetClient() + aggExec := &tipb.Aggregation{ + GroupBy: expression.ExpressionsToPBList(sc, p.GroupByItems, client), + } + for _, aggFunc := range p.AggFuncs { + aggExec.AggFunc = append(aggExec.AggFunc, aggregation.AggFuncToPBExpr(sc, client, aggFunc)) + } + return &tipb.Executor{Tp: tipb.ExecType_TypeAggregation, Aggregation: aggExec}, nil +} + +// ToPB implements PhysicalPlan ToPB interface. +func (p *PhysicalSelection) ToPB(ctx sessionctx.Context) (*tipb.Executor, error) { + sc := ctx.GetSessionVars().StmtCtx + client := ctx.GetClient() + selExec := &tipb.Selection{ + Conditions: expression.ExpressionsToPBList(sc, p.Conditions, client), + } + return &tipb.Executor{Tp: tipb.ExecType_TypeSelection, Selection: selExec}, nil +} + +// ToPB implements PhysicalPlan ToPB interface. +func (p *PhysicalTopN) ToPB(ctx sessionctx.Context) (*tipb.Executor, error) { + sc := ctx.GetSessionVars().StmtCtx + client := ctx.GetClient() + topNExec := &tipb.TopN{ + Limit: p.Count, + } + for _, item := range p.ByItems { + topNExec.OrderBy = append(topNExec.OrderBy, expression.SortByItemToPB(sc, client, item.Expr, item.Desc)) + } + return &tipb.Executor{Tp: tipb.ExecType_TypeTopN, TopN: topNExec}, nil +} + +// ToPB implements PhysicalPlan ToPB interface. +func (p *PhysicalLimit) ToPB(ctx sessionctx.Context) (*tipb.Executor, error) { + limitExec := &tipb.Limit{ + Limit: p.Count, + } + return &tipb.Executor{Tp: tipb.ExecType_TypeLimit, Limit: limitExec}, nil +} + +// ToPB implements PhysicalPlan ToPB interface. +func (p *PhysicalTableScan) ToPB(ctx sessionctx.Context) (*tipb.Executor, error) { + tsExec := &tipb.TableScan{ + TableId: p.Table.ID, + Columns: model.ColumnsToProto(p.Columns, p.Table.PKIsHandle), + Desc: p.Desc, + } + err := SetPBColumnsDefaultValue(ctx, tsExec.Columns, p.Columns) + return &tipb.Executor{Tp: tipb.ExecType_TypeTableScan, TblScan: tsExec}, err +} + +// checkCoverIndex checks whether we can pass unique info to TiKV. We should push it if and only if the length of +// range and index are equal. +func checkCoverIndex(idx *model.IndexInfo, ranges []*ranger.Range) bool { + // If the index is (c1, c2) but the query range only contains c1, it is not a unique get. + if !idx.Unique { + return false + } + for _, rg := range ranges { + if len(rg.LowVal) != len(idx.Columns) { + return false + } + } + return true +} + +func findColumnInfoByID(infos []*model.ColumnInfo, id int64) *model.ColumnInfo { + for _, info := range infos { + if info.ID == id { + return info + } + } + return nil +} + +// ToPB implements PhysicalPlan ToPB interface. +func (p *PhysicalIndexScan) ToPB(ctx sessionctx.Context) (*tipb.Executor, error) { + columns := make([]*model.ColumnInfo, 0, p.schema.Len()) + tableColumns := p.Table.Cols() + for _, col := range p.schema.Columns { + if col.ID == model.ExtraHandleID { + columns = append(columns, model.NewExtraHandleColInfo()) + } else { + columns = append(columns, findColumnInfoByID(tableColumns, col.ID)) + } + } + idxExec := &tipb.IndexScan{ + TableId: p.Table.ID, + IndexId: p.Index.ID, + Columns: model.ColumnsToProto(columns, p.Table.PKIsHandle), + Desc: p.Desc, + } + unique := checkCoverIndex(p.Index, p.Ranges) + idxExec.Unique = &unique + return &tipb.Executor{Tp: tipb.ExecType_TypeIndexScan, IdxScan: idxExec}, nil +} + +// SetPBColumnsDefaultValue sets the default values of tipb.ColumnInfos. +func SetPBColumnsDefaultValue(ctx sessionctx.Context, pbColumns []*tipb.ColumnInfo, columns []*model.ColumnInfo) error { + for i, c := range columns { + if c.OriginDefaultValue == nil { + continue + } + + sessVars := ctx.GetSessionVars() + originStrict := sessVars.StrictSQLMode + sessVars.StrictSQLMode = false + d, err := table.GetColOriginDefaultValue(ctx, c) + sessVars.StrictSQLMode = originStrict + if err != nil { + return err + } + + pbColumns[i].DefaultVal, err = tablecodec.EncodeValue(sessVars.StmtCtx, nil, d) + if err != nil { + return err + } + } + return nil +} diff --git a/planner/core/plan_to_pb_test.go b/planner/core/plan_to_pb_test.go new file mode 100644 index 0000000..94e493d --- /dev/null +++ b/planner/core/plan_to_pb_test.go @@ -0,0 +1,163 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/testleak" + "github.com/pingcap/tipb/go-tipb" +) + +var _ = Suite(&testDistsqlSuite{}) + +type testDistsqlSuite struct{} + +func (s *testDistsqlSuite) TestColumnToProto(c *C) { + defer testleak.AfterTest(c)() + // Make sure the Flag is set in tipb.ColumnInfo + tp := types.NewFieldType(mysql.TypeLong) + tp.Flag = 10 + tp.Collate = "utf8_bin" + col := &model.ColumnInfo{ + FieldType: *tp, + } + pc := model.ColumnToProto(col) + expect := &tipb.ColumnInfo{ColumnId: 0, Tp: 3, Collation: mysql.DefaultCollationID, ColumnLen: -1, Decimal: -1, Flag: 10, Elems: []string(nil), DefaultVal: []uint8(nil), PkHandle: false, XXX_unrecognized: []uint8(nil)} + c.Assert(pc, DeepEquals, expect) + + cols := []*model.ColumnInfo{col, col} + pcs := model.ColumnsToProto(cols, false) + for _, v := range pcs { + c.Assert(v.GetFlag(), Equals, int32(10)) + } + pcs = model.ColumnsToProto(cols, true) + for _, v := range pcs { + c.Assert(v.GetFlag(), Equals, int32(10)) + } + + // Make sure we only convert to supported collate. + tp = types.NewFieldType(mysql.TypeVarchar) + tp.Flag = 10 + tp.Collate = "latin1_swedish_ci" + col = &model.ColumnInfo{ + FieldType: *tp, + } + pc = model.ColumnToProto(col) + c.Assert(pc.Collation, Equals, int32(mysql.DefaultCollationID)) +} + +func (s *testDistsqlSuite) TestIndexToProto(c *C) { + defer testleak.AfterTest(c)() + cols := []*model.ColumnInfo{ + { + ID: 1, + Name: model.NewCIStr("col1"), + Offset: 1, + }, + { + ID: 2, + Name: model.NewCIStr("col2"), + Offset: 2, + }, + } + cols[0].Flag |= mysql.PriKeyFlag + + idxCols := []*model.IndexColumn{ + { + Name: model.NewCIStr("col1"), + Offset: 1, + Length: 1, + }, + { + Name: model.NewCIStr("col1"), + Offset: 1, + Length: 1, + }, + } + + idxInfos := []*model.IndexInfo{ + { + ID: 1, + Name: model.NewCIStr("idx1"), + Table: model.NewCIStr("test"), + Columns: idxCols, + Unique: true, + Primary: true, + }, + { + ID: 2, + Name: model.NewCIStr("idx2"), + Table: model.NewCIStr("test"), + Columns: idxCols, + Unique: true, + Primary: true, + }, + } + + tbInfo := model.TableInfo{ + ID: 1, + Name: model.NewCIStr("test"), + Columns: cols, + Indices: idxInfos, + PKIsHandle: true, + } + + pIdx := model.IndexToProto(&tbInfo, idxInfos[0]) + c.Assert(pIdx.TableId, Equals, int64(1)) + c.Assert(pIdx.IndexId, Equals, int64(1)) + c.Assert(pIdx.Unique, Equals, true) +} + +func (s *testDistsqlSuite) TestIndexScanToProto(c *C) { + tp := types.NewFieldType(mysql.TypeLong) + tp.Flag = 10 + tp.Collate = "utf8_bin" + + name := model.NewCIStr("a") + col := &model.ColumnInfo{ + ID: 1, + Name: name, + State: model.StatePublic, + FieldType: *tp, + } + idxInfo := &model.IndexInfo{ + ID: 2, + Name: name, + State: model.StatePublic, + Columns: []*model.IndexColumn{ + {Length: types.UnspecifiedLength}, + }, + } + p := new(PhysicalIndexScan) + p.Table = &model.TableInfo{ + ID: 1, + Columns: []*model.ColumnInfo{col}, + Indices: []*model.IndexInfo{idxInfo}, + } + p.Index = idxInfo + p.schema = expression.NewSchema(&expression.Column{ + ID: model.ExtraHandleID, + }) + pbExec, err := p.ToPB(nil) + c.Assert(err, IsNil) + idxScan := pbExec.IdxScan + pbColumn := idxScan.Columns[0] + c.Assert(pbColumn.Tp, Equals, int32(mysql.TypeLonglong)) + c.Assert(pbColumn.ColumnId, Equals, int64(model.ExtraHandleID)) + c.Assert(pbColumn.PkHandle, Equals, true) +} diff --git a/planner/core/planbuilder.go b/planner/core/planbuilder.go new file mode 100644 index 0000000..9ced10c --- /dev/null +++ b/planner/core/planbuilder.go @@ -0,0 +1,984 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "bytes" + "context" + "fmt" + "strings" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/charset" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/opcode" + "github.com/pingcap/tidb/planner/util" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/types" + driver "github.com/pingcap/tidb/types/parser_driver" +) + +type tableHintInfo struct { + sortMergeJoinTables []hintTableInfo + hashJoinTables []hintTableInfo + indexHintList []indexHintInfo +} + +type hintTableInfo struct { + dbName model.CIStr + tblName model.CIStr + matched bool +} + +type indexHintInfo struct { + dbName model.CIStr + tblName model.CIStr + indexHint *ast.IndexHint +} + +func tableNames2HintTableInfo(ctx sessionctx.Context, hintTables []ast.HintTable) []hintTableInfo { + if len(hintTables) == 0 { + return nil + } + hintTableInfos := make([]hintTableInfo, len(hintTables)) + defaultDBName := model.NewCIStr(ctx.GetSessionVars().CurrentDB) + for i, hintTable := range hintTables { + tableInfo := hintTableInfo{tblName: hintTable.TableName} + if tableInfo.dbName.L == "" { + tableInfo.dbName = defaultDBName + } + hintTableInfos[i] = tableInfo + } + return hintTableInfos +} + +func (info *tableHintInfo) ifPreferMergeJoin(tableNames ...*hintTableInfo) bool { + return info.matchTableName(tableNames, info.sortMergeJoinTables) +} + +func (info *tableHintInfo) ifPreferHashJoin(tableNames ...*hintTableInfo) bool { + return info.matchTableName(tableNames, info.hashJoinTables) +} + +// matchTableName checks whether the hint hit the need. +// Only need either side matches one on the list. +// Even though you can put 2 tables on the list, +// it doesn't mean optimizer will reorder to make them +// join directly. +// Which it joins on with depend on sequence of traverse +// and without reorder, user might adjust themselves. +// This is similar to MySQL hints. +func (info *tableHintInfo) matchTableName(tables []*hintTableInfo, hintTables []hintTableInfo) bool { + hintMatched := false + for _, table := range tables { + for i, curEntry := range hintTables { + if table == nil { + continue + } + if curEntry.dbName.L == table.dbName.L && curEntry.tblName.L == table.tblName.L { + hintTables[i].matched = true + hintMatched = true + break + } + } + } + return hintMatched +} + +func restore2JoinHint(hintType string, hintTables []hintTableInfo) string { + buffer := bytes.NewBufferString("/*+ ") + buffer.WriteString(strings.ToUpper(hintType)) + buffer.WriteString("(") + for i, table := range hintTables { + buffer.WriteString(table.tblName.L) + if i < len(hintTables)-1 { + buffer.WriteString(", ") + } + } + buffer.WriteString(") */") + return buffer.String() +} + +func extractUnmatchedTables(hintTables []hintTableInfo) []string { + var tableNames []string + for _, table := range hintTables { + if !table.matched { + tableNames = append(tableNames, table.tblName.O) + } + } + return tableNames +} + +// clauseCode indicates in which clause the column is currently. +type clauseCode int + +const ( + unknowClause clauseCode = iota + fieldList + havingClause + onClause + orderByClause + whereClause + groupByClause + showStatement + globalOrderByClause +) + +var clauseMsg = map[clauseCode]string{ + unknowClause: "", + fieldList: "field list", + havingClause: "having clause", + onClause: "on clause", + orderByClause: "order clause", + whereClause: "where clause", + groupByClause: "group statement", + showStatement: "show statement", + globalOrderByClause: "global ORDER clause", +} + +// PlanBuilder builds Plan from an ast.Node. +// It just builds the ast node straightforwardly. +type PlanBuilder struct { + ctx sessionctx.Context + is infoschema.InfoSchema + // colMapper stores the column that must be pre-resolved. + colMapper map[*ast.ColumnNameExpr]int + + tableHintInfo []tableHintInfo + // optFlag indicates the flags of the optimizer rules. + optFlag uint64 + + curClause clauseCode + + // rewriterPool stores the expressionRewriter we have created to reuse it if it has been released. + // rewriterCounter counts how many rewriter is being used. + rewriterPool []*expressionRewriter + rewriterCounter int + + // inStraightJoin represents whether the current "SELECT" statement has + // "STRAIGHT_JOIN" option. + inStraightJoin bool + + // handleHelper records the handle column position for tables. Delete/Update/SelectLock/UnionScan may need this information. + // It collects the information by the following procedure: + // Since we build the plan tree from bottom to top, we maintain a stack to record the current handle information. + // If it's a dataSource/tableDual node, we create a new map. + // If it's a aggregation, we pop the map and push a nil map since no handle information left. + // If it's a union, we pop all children's and push a nil map. + // If it's a join, we pop its children's out then merge them and push the new map to stack. + // If we meet a subquery, it's clearly that it's a independent problem so we just pop one map out when we finish building the subquery. + handleHelper *handleColHelper +} + +type handleColHelper struct { + id2HandleMapStack []map[int64][]*expression.Column + stackTail int +} + +func (hch *handleColHelper) popMap() map[int64][]*expression.Column { + ret := hch.id2HandleMapStack[hch.stackTail-1] + hch.stackTail-- + hch.id2HandleMapStack = hch.id2HandleMapStack[:hch.stackTail] + return ret +} + +func (hch *handleColHelper) pushMap(m map[int64][]*expression.Column) { + hch.id2HandleMapStack = append(hch.id2HandleMapStack, m) + hch.stackTail++ +} + +func (hch *handleColHelper) mergeAndPush(m1, m2 map[int64][]*expression.Column) { + newMap := make(map[int64][]*expression.Column) + for k, v := range m1 { + newMap[k] = make([]*expression.Column, len(v)) + copy(newMap[k], v) + } + for k, v := range m2 { + if _, ok := newMap[k]; ok { + newMap[k] = append(newMap[k], v...) + } else { + newMap[k] = make([]*expression.Column, len(v)) + copy(newMap[k], v) + } + } + hch.pushMap(newMap) +} + +func (hch *handleColHelper) tailMap() map[int64][]*expression.Column { + return hch.id2HandleMapStack[hch.stackTail-1] +} + +// GetOptFlag gets the optFlag of the PlanBuilder. +func (b *PlanBuilder) GetOptFlag() uint64 { + return b.optFlag +} + +// NewPlanBuilder creates a new PlanBuilder. +func NewPlanBuilder(sctx sessionctx.Context, is infoschema.InfoSchema) *PlanBuilder { + return &PlanBuilder{ + ctx: sctx, + is: is, + colMapper: make(map[*ast.ColumnNameExpr]int), + handleHelper: &handleColHelper{id2HandleMapStack: make([]map[int64][]*expression.Column, 0)}, + } +} + +// Build builds the ast node to a Plan. +func (b *PlanBuilder) Build(ctx context.Context, node ast.Node) (Plan, error) { + b.optFlag = flagPrunColumns + switch x := node.(type) { + case *ast.AdminStmt: + return b.buildAdmin(ctx, x) + case *ast.DeleteStmt: + return b.buildDelete(ctx, x) + case *ast.ExplainStmt: + return b.buildExplain(ctx, x) + case *ast.InsertStmt: + return b.buildInsert(ctx, x) + case *ast.SelectStmt: + return b.buildSelect(ctx, x) + case *ast.ShowStmt: + return b.buildShow(ctx, x) + case *ast.SetStmt: + return b.buildSet(ctx, x) + case *ast.AnalyzeTableStmt: + return b.buildAnalyze(x) + case *ast.UseStmt, *ast.BeginStmt, *ast.CommitStmt, *ast.RollbackStmt: + return b.buildSimple(node.(ast.StmtNode)) + case ast.DDLNode: + return b.buildDDL(ctx, x) + } + return nil, ErrUnsupportedType.GenWithStack("Unsupported type %T", node) +} + +func (b *PlanBuilder) buildSet(ctx context.Context, v *ast.SetStmt) (Plan, error) { + p := &Set{} + for _, vars := range v.Variables { + assign := &expression.VarAssignment{ + Name: vars.Name, + IsGlobal: vars.IsGlobal, + IsSystem: vars.IsSystem, + } + if _, ok := vars.Value.(*ast.DefaultExpr); !ok { + if cn, ok2 := vars.Value.(*ast.ColumnNameExpr); ok2 && cn.Name.Table.L == "" { + // Convert column name expression to string value expression. + vars.Value = ast.NewValueExpr(cn.Name.Name.O) + } + mockTablePlan := LogicalTableDual{}.Init(b.ctx) + var err error + assign.Expr, _, err = b.rewrite(ctx, vars.Value, mockTablePlan, nil, true) + if err != nil { + return nil, err + } + } else { + assign.IsDefault = true + } + p.VarAssigns = append(p.VarAssigns, assign) + } + return p, nil +} + +// detectSelectAgg detects an aggregate function or GROUP BY clause. +func (b *PlanBuilder) detectSelectAgg(sel *ast.SelectStmt) bool { + if sel.GroupBy != nil { + return true + } + for _, f := range sel.Fields.Fields { + if ast.HasAggFlag(f.Expr) { + return true + } + } + if sel.Having != nil { + if ast.HasAggFlag(sel.Having.Expr) { + return true + } + } + if sel.OrderBy != nil { + for _, item := range sel.OrderBy.Items { + if ast.HasAggFlag(item.Expr) { + return true + } + } + } + return false +} + +func getPathByIndexName(paths []*util.AccessPath, idxName model.CIStr, tblInfo *model.TableInfo) *util.AccessPath { + var tablePath *util.AccessPath + for _, path := range paths { + if path.IsTablePath { + tablePath = path + continue + } + if path.Index.Name.L == idxName.L { + return path + } + } + if isPrimaryIndex(idxName) && tblInfo.PKIsHandle { + return tablePath + } + return nil +} + +func isPrimaryIndex(indexName model.CIStr) bool { + return indexName.L == "primary" +} + +func (b *PlanBuilder) getPossibleAccessPaths(indexHints []*ast.IndexHint, tbl table.Table, dbName, tblName model.CIStr) ([]*util.AccessPath, error) { + tblInfo := tbl.Meta() + publicPaths := make([]*util.AccessPath, 0, len(tblInfo.Indices)+2) + publicPaths = append(publicPaths, &util.AccessPath{IsTablePath: true}) + for _, index := range tblInfo.Indices { + if index.State == model.StatePublic { + publicPaths = append(publicPaths, &util.AccessPath{Index: index}) + } + } + + hasScanHint, hasUseOrForce := false, false + available := make([]*util.AccessPath, 0, len(publicPaths)) + ignored := make([]*util.AccessPath, 0, len(publicPaths)) + + // Extract comment-style index hint like /*+ INDEX(t, idx1, idx2) */. + indexHintsLen := len(indexHints) + if hints := b.TableHints(); hints != nil { + for _, hint := range hints.indexHintList { + if hint.dbName.L == dbName.L && hint.tblName.L == tblName.L { + indexHints = append(indexHints, hint.indexHint) + } + } + } + + for i, hint := range indexHints { + if hint.HintScope != ast.HintForScan { + continue + } + + hasScanHint = true + + // It is syntactically valid to omit index_list for USE INDEX, which means “use no indexes”. + // Omitting index_list for FORCE INDEX or IGNORE INDEX is a syntax error. + // See https://dev.mysql.com/doc/refman/8.0/en/index-hints.html. + if hint.IndexNames == nil && hint.HintType != ast.HintIgnore { + if path := getTablePath(publicPaths); path != nil { + hasUseOrForce = true + path.Forced = true + available = append(available, path) + } + } + for _, idxName := range hint.IndexNames { + path := getPathByIndexName(publicPaths, idxName, tblInfo) + if path == nil { + err := ErrKeyDoesNotExist.GenWithStackByArgs(idxName, tblInfo.Name) + // if hint is from comment-style sql hints, we should throw a warning instead of error. + if i < indexHintsLen { + return nil, err + } + b.ctx.GetSessionVars().StmtCtx.AppendWarning(err) + continue + } + if hint.HintType == ast.HintIgnore { + // Collect all the ignored index hints. + ignored = append(ignored, path) + continue + } + // Currently we don't distinguish between "FORCE" and "USE" because + // our cost estimation is not reliable. + hasUseOrForce = true + path.Forced = true + available = append(available, path) + } + } + + if !hasScanHint || !hasUseOrForce { + available = publicPaths + } + + available = removeIgnoredPaths(available, ignored, tblInfo) + + // If we have got "FORCE" or "USE" index hint but got no available index, + // we have to use table scan. + if len(available) == 0 { + available = append(available, &util.AccessPath{IsTablePath: true}) + } + return available, nil +} + +func removeIgnoredPaths(paths, ignoredPaths []*util.AccessPath, tblInfo *model.TableInfo) []*util.AccessPath { + if len(ignoredPaths) == 0 { + return paths + } + remainedPaths := make([]*util.AccessPath, 0, len(paths)) + for _, path := range paths { + if path.IsTablePath || getPathByIndexName(ignoredPaths, path.Index.Name, tblInfo) == nil { + remainedPaths = append(remainedPaths, path) + } + } + return remainedPaths +} + +func (b *PlanBuilder) buildAdmin(ctx context.Context, as *ast.AdminStmt) (Plan, error) { + var ret Plan + var err error + switch as.Tp { + case ast.AdminShowDDL: + p := &ShowDDL{} + p.setSchemaAndNames(buildShowDDLFields()) + ret = p + case ast.AdminShowDDLJobs: + p := LogicalShowDDLJobs{JobNumber: as.JobNumber}.Init(b.ctx) + p.setSchemaAndNames(buildShowDDLJobsFields()) + for _, col := range p.schema.Columns { + col.UniqueID = b.ctx.GetSessionVars().AllocPlanColumnID() + } + ret = p + if as.Where != nil { + ret, err = b.buildSelection(ctx, p, as.Where, nil) + if err != nil { + return nil, err + } + } + default: + return nil, ErrUnsupportedType.GenWithStack("Unsupported ast.AdminStmt(%T) for buildAdmin", as) + } + return ret, nil +} + +// getColsInfo returns the info of index columns, normal columns and primary key. +func getColsInfo(tn *ast.TableName) (indicesInfo []*model.IndexInfo, colsInfo []*model.ColumnInfo, pkCol *model.ColumnInfo) { + tbl := tn.TableInfo + for _, col := range tbl.Columns { + if tbl.PKIsHandle && mysql.HasPriKeyFlag(col.Flag) { + pkCol = col + } else { + colsInfo = append(colsInfo, col) + } + } + for _, idx := range tn.TableInfo.Indices { + if idx.State == model.StatePublic { + indicesInfo = append(indicesInfo, idx) + } + } + return +} + +func (b *PlanBuilder) buildAnalyze(as *ast.AnalyzeTableStmt) (Plan, error) { + p := &Analyze{} + for _, tbl := range as.TableNames { + idxInfo, colInfo, pkInfo := getColsInfo(tbl) + for _, idx := range idxInfo { + info := analyzeInfo{DBName: tbl.Schema.O, TableName: tbl.Name.O, PhysicalTableID: tbl.TableInfo.ID} + p.IdxTasks = append(p.IdxTasks, AnalyzeIndexTask{ + IndexInfo: idx, + analyzeInfo: info, + TblInfo: tbl.TableInfo, + }) + } + if len(colInfo) > 0 || pkInfo != nil { + info := analyzeInfo{DBName: tbl.Schema.O, TableName: tbl.Name.O, PhysicalTableID: tbl.TableInfo.ID} + p.ColTasks = append(p.ColTasks, AnalyzeColumnsTask{ + PKInfo: pkInfo, + ColsInfo: colInfo, + analyzeInfo: info, + TblInfo: tbl.TableInfo, + }) + } + } + return p, nil +} + +func buildShowDDLFields() (*expression.Schema, types.NameSlice) { + schema := newColumnsWithNames(6) + schema.Append(buildColumnWithName("", "SCHEMA_VER", mysql.TypeLonglong, 4)) + schema.Append(buildColumnWithName("", "OWNER_ID", mysql.TypeVarchar, 64)) + schema.Append(buildColumnWithName("", "OWNER_ADDRESS", mysql.TypeVarchar, 32)) + schema.Append(buildColumnWithName("", "RUNNING_JOBS", mysql.TypeVarchar, 256)) + schema.Append(buildColumnWithName("", "SELF_ID", mysql.TypeVarchar, 64)) + schema.Append(buildColumnWithName("", "QUERY", mysql.TypeVarchar, 256)) + + return schema.col2Schema(), schema.names +} + +func buildShowDDLJobsFields() (*expression.Schema, types.NameSlice) { + schema := newColumnsWithNames(11) + schema.Append(buildColumnWithName("", "JOB_ID", mysql.TypeLonglong, 4)) + schema.Append(buildColumnWithName("", "DB_NAME", mysql.TypeVarchar, 64)) + schema.Append(buildColumnWithName("", "TABLE_NAME", mysql.TypeVarchar, 64)) + schema.Append(buildColumnWithName("", "JOB_TYPE", mysql.TypeVarchar, 64)) + schema.Append(buildColumnWithName("", "SCHEMA_STATE", mysql.TypeVarchar, 64)) + schema.Append(buildColumnWithName("", "SCHEMA_ID", mysql.TypeLonglong, 4)) + schema.Append(buildColumnWithName("", "TABLE_ID", mysql.TypeLonglong, 4)) + schema.Append(buildColumnWithName("", "ROW_COUNT", mysql.TypeLonglong, 4)) + schema.Append(buildColumnWithName("", "START_TIME", mysql.TypeVarchar, 64)) + schema.Append(buildColumnWithName("", "END_TIME", mysql.TypeVarchar, 64)) + schema.Append(buildColumnWithName("", "STATE", mysql.TypeVarchar, 64)) + return schema.col2Schema(), schema.names +} + +func buildColumnWithName(tableName, name string, tp byte, size int) (*expression.Column, *types.FieldName) { + cs, cl := types.DefaultCharsetForType(tp) + flag := mysql.UnsignedFlag + if tp == mysql.TypeVarchar || tp == mysql.TypeBlob { + cs = charset.CharsetUTF8MB4 + cl = charset.CollationUTF8MB4 + flag = 0 + } + + fieldType := &types.FieldType{ + Charset: cs, + Collate: cl, + Tp: tp, + Flen: size, + Flag: flag, + } + return &expression.Column{ + RetType: fieldType, + }, &types.FieldName{DBName: model.NewCIStr(infoschema.Name), TblName: model.NewCIStr(tableName), ColName: model.NewCIStr(name)} +} + +type columnsWithNames struct { + cols []*expression.Column + names types.NameSlice +} + +func newColumnsWithNames(cap int) *columnsWithNames { + return &columnsWithNames{ + cols: make([]*expression.Column, 0, 2), + names: make(types.NameSlice, 0, 2), + } +} + +func (cwn *columnsWithNames) Append(col *expression.Column, name *types.FieldName) { + cwn.cols = append(cwn.cols, col) + cwn.names = append(cwn.names, name) +} + +func (cwn *columnsWithNames) col2Schema() *expression.Schema { + return expression.NewSchema(cwn.cols...) +} + +// splitWhere split a where expression to a list of AND conditions. +func splitWhere(where ast.ExprNode) []ast.ExprNode { + var conditions []ast.ExprNode + switch x := where.(type) { + case nil: + case *ast.BinaryOperationExpr: + if x.Op == opcode.LogicAnd { + conditions = append(conditions, splitWhere(x.L)...) + conditions = append(conditions, splitWhere(x.R)...) + } else { + conditions = append(conditions, x) + } + case *ast.ParenthesesExpr: + conditions = append(conditions, splitWhere(x.Expr)...) + default: + conditions = append(conditions, where) + } + return conditions +} + +func (b *PlanBuilder) buildShow(ctx context.Context, show *ast.ShowStmt) (Plan, error) { + p := LogicalShow{ + ShowContents: ShowContents{ + Tp: show.Tp, + DBName: show.DBName, + Table: show.Table, + Column: show.Column, + IndexName: show.IndexName, + Flag: show.Flag, + Full: show.Full, + IfNotExists: show.IfNotExists, + GlobalScope: show.GlobalScope, + }, + }.Init(b.ctx) + switch show.Tp { + case ast.ShowTables: + if p.DBName == "" { + return nil, ErrNoDB + } + } + schema, names := buildShowSchema(show) + p.SetSchema(schema) + p.names = names + for _, col := range p.schema.Columns { + col.UniqueID = b.ctx.GetSessionVars().AllocPlanColumnID() + } + var err error + var np LogicalPlan + np = p + if show.Where != nil { + np, err = b.buildSelection(ctx, np, show.Where, nil) + if err != nil { + return nil, err + } + } + if np != p { + b.optFlag |= flagEliminateProjection + fieldsLen := len(p.schema.Columns) + proj := LogicalProjection{Exprs: make([]expression.Expression, 0, fieldsLen)}.Init(b.ctx) + schema := expression.NewSchema(make([]*expression.Column, 0, fieldsLen)...) + for _, col := range p.schema.Columns { + proj.Exprs = append(proj.Exprs, col) + newCol := col.Clone().(*expression.Column) + newCol.UniqueID = b.ctx.GetSessionVars().AllocPlanColumnID() + schema.Append(newCol) + } + proj.SetSchema(schema) + proj.SetChildren(np) + proj.SetOutputNames(np.OutputNames()) + return proj, nil + } + return p, nil +} + +func (b *PlanBuilder) buildSimple(node ast.StmtNode) (Plan, error) { + p := &Simple{Statement: node} + + switch raw := node.(type) { + case *ast.UseStmt: + if raw.DBName == "" { + return nil, ErrNoDB + } + } + return p, nil +} + +func (b *PlanBuilder) getDefaultValue(col *table.Column) (*expression.Constant, error) { + value, err := table.GetColDefaultValue(b.ctx, col.ToInfo()) + if err != nil { + return nil, err + } + return &expression.Constant{Value: value, RetType: &col.FieldType}, nil +} + +func (b *PlanBuilder) findDefaultValue(cols []*table.Column, name *ast.ColumnName) (*expression.Constant, error) { + for _, col := range cols { + if col.Name.L == name.Name.L { + return b.getDefaultValue(col) + } + } + return nil, ErrUnknownColumn.GenWithStackByArgs(name.Name.O, "field_list") +} + +func (b *PlanBuilder) buildInsert(ctx context.Context, insert *ast.InsertStmt) (Plan, error) { + ts, ok := insert.Table.TableRefs.Left.(*ast.TableSource) + if !ok { + return nil, infoschema.ErrTableNotExists.GenWithStackByArgs() + } + tn, ok := ts.Source.(*ast.TableName) + if !ok { + return nil, infoschema.ErrTableNotExists.GenWithStackByArgs() + } + tableInfo := tn.TableInfo + // Build Schema with DBName otherwise ColumnRef with DBName cannot match any Column in Schema. + schema, names := expression.TableInfo2SchemaAndNames(b.ctx, tn.Schema, tableInfo) + tableInPlan, ok := b.is.TableByID(tableInfo.ID) + if !ok { + return nil, errors.Errorf("Can't get table %s.", tableInfo.Name.O) + } + + insertPlan := Insert{ + Table: tableInPlan, + Columns: insert.Columns, + tableSchema: schema, + tableColNames: names, + IsReplace: insert.IsReplace, + }.Init(b.ctx) + + mockTablePlan := LogicalTableDual{}.Init(b.ctx) + mockTablePlan.SetSchema(insertPlan.tableSchema) + mockTablePlan.names = insertPlan.tableColNames + + checkRefColumn := func(n ast.Node) ast.Node { + if insertPlan.NeedFillDefaultValue { + return n + } + switch n.(type) { + case *ast.ColumnName, *ast.ColumnNameExpr: + insertPlan.NeedFillDefaultValue = true + } + return n + } + + if len(insert.Setlist) > 0 { + // Branch for `INSERT ... SET ...`. + err := b.buildSetValuesOfInsert(ctx, insert, insertPlan, mockTablePlan, checkRefColumn) + if err != nil { + return nil, err + } + } else if len(insert.Lists) > 0 { + // Branch for `INSERT ... VALUES ...`. + err := b.buildValuesListOfInsert(ctx, insert, insertPlan, mockTablePlan, checkRefColumn) + if err != nil { + return nil, err + } + } else { + // Branch for `INSERT ... SELECT ...`. + err := b.buildSelectPlanOfInsert(ctx, insert, insertPlan) + if err != nil { + return nil, err + } + } + + err := insertPlan.ResolveIndices() + return insertPlan, err +} + +func (b *PlanBuilder) getAffectCols(insertStmt *ast.InsertStmt, insertPlan *Insert) (affectedValuesCols []*table.Column, err error) { + if len(insertStmt.Columns) > 0 { + // This branch is for the following scenarios: + // 1. `INSERT INTO tbl_name (col_name [, col_name] ...) {VALUES | VALUE} (value_list) [, (value_list)] ...`, + // 2. `INSERT INTO tbl_name (col_name [, col_name] ...) SELECT ...`. + colName := make([]string, 0, len(insertStmt.Columns)) + for _, col := range insertStmt.Columns { + colName = append(colName, col.Name.O) + } + affectedValuesCols, err = table.FindCols(insertPlan.Table.Cols(), colName, insertPlan.Table.Meta().PKIsHandle) + if err != nil { + return nil, err + } + + } else if len(insertStmt.Setlist) == 0 { + // This branch is for the following scenarios: + // 1. `INSERT INTO tbl_name {VALUES | VALUE} (value_list) [, (value_list)] ...`, + // 2. `INSERT INTO tbl_name SELECT ...`. + affectedValuesCols = insertPlan.Table.Cols() + } + return affectedValuesCols, nil +} + +func (b *PlanBuilder) buildSetValuesOfInsert(ctx context.Context, insert *ast.InsertStmt, insertPlan *Insert, mockTablePlan *LogicalTableDual, checkRefColumn func(n ast.Node) ast.Node) error { + colNames := make([]string, 0, len(insert.Setlist)) + exprCols := make([]*expression.Column, 0, len(insert.Setlist)) + for _, assign := range insert.Setlist { + idx, err := expression.FindFieldName(insertPlan.tableColNames, assign.Column) + if err != nil { + return err + } + if idx < 0 { + return errors.Errorf("Can't find column %s", assign.Column) + } + colNames = append(colNames, assign.Column.Name.L) + exprCols = append(exprCols, insertPlan.tableSchema.Columns[idx]) + } + + insertPlan.AllAssignmentsAreConstant = true + for i, assign := range insert.Setlist { + defaultExpr := extractDefaultExpr(assign.Expr) + if defaultExpr != nil { + defaultExpr.Name = assign.Column + } + expr, _, err := b.rewriteWithPreprocess(ctx, assign.Expr, mockTablePlan, nil, true, checkRefColumn) + if err != nil { + return err + } + if insertPlan.AllAssignmentsAreConstant { + _, isConstant := expr.(*expression.Constant) + insertPlan.AllAssignmentsAreConstant = isConstant + } + + insertPlan.SetList = append(insertPlan.SetList, &expression.Assignment{ + Col: exprCols[i], + ColName: model.NewCIStr(colNames[i]), + Expr: expr, + }) + } + return nil +} + +func (b *PlanBuilder) buildValuesListOfInsert(ctx context.Context, insert *ast.InsertStmt, insertPlan *Insert, mockTablePlan *LogicalTableDual, checkRefColumn func(n ast.Node) ast.Node) error { + affectedValuesCols, err := b.getAffectCols(insert, insertPlan) + if err != nil { + return err + } + + // If value_list and col_list are empty and we have a generated column, we can still write data to this table. + // For example, insert into t values(); can be executed successfully if t has a generated column. + if len(insert.Columns) > 0 || len(insert.Lists[0]) > 0 { + // If value_list or col_list is not empty, the length of value_list should be the same with that of col_list. + if len(insert.Lists[0]) != len(affectedValuesCols) { + fmt.Printf("%v %v\n", len(insert.Lists[0]), affectedValuesCols) + return ErrWrongValueCountOnRow.GenWithStackByArgs(1) + } + } + + insertPlan.AllAssignmentsAreConstant = true + totalTableCols := insertPlan.Table.Cols() + for i, valuesItem := range insert.Lists { + // The length of all the value_list should be the same. + // "insert into t values (), ()" is valid. + // "insert into t values (), (1)" is not valid. + // "insert into t values (1), ()" is not valid. + // "insert into t values (1,2), (1)" is not valid. + if i > 0 && len(insert.Lists[i-1]) != len(insert.Lists[i]) { + return ErrWrongValueCountOnRow.GenWithStackByArgs(i + 1) + } + exprList := make([]expression.Expression, 0, len(valuesItem)) + for j, valueItem := range valuesItem { + var expr expression.Expression + var err error + switch x := valueItem.(type) { + case *ast.DefaultExpr: + if x.Name != nil { + expr, err = b.findDefaultValue(totalTableCols, x.Name) + } else { + expr, err = b.getDefaultValue(affectedValuesCols[j]) + } + case *driver.ValueExpr: + expr = &expression.Constant{ + Value: x.Datum, + RetType: &x.Type, + } + default: + expr, _, err = b.rewriteWithPreprocess(ctx, valueItem, mockTablePlan, nil, true, checkRefColumn) + } + if err != nil { + return err + } + if insertPlan.AllAssignmentsAreConstant { + _, isConstant := expr.(*expression.Constant) + insertPlan.AllAssignmentsAreConstant = isConstant + } + exprList = append(exprList, expr) + } + insertPlan.Lists = append(insertPlan.Lists, exprList) + } + return nil +} + +func (b *PlanBuilder) buildSelectPlanOfInsert(ctx context.Context, insert *ast.InsertStmt, insertPlan *Insert) error { + affectedValuesCols, err := b.getAffectCols(insert, insertPlan) + if err != nil { + return err + } + selectPlan, err := b.Build(ctx, insert.Select) + if err != nil { + return err + } + + // Check to guarantee that the length of the row returned by select is equal to that of affectedValuesCols. + if selectPlan.Schema().Len() != len(affectedValuesCols) { + return ErrWrongValueCountOnRow.GenWithStackByArgs(1) + } + + names := selectPlan.OutputNames() + insertPlan.SelectPlan, err = DoOptimize(ctx, b.optFlag, selectPlan.(LogicalPlan)) + if err != nil { + return err + } + + // schema4NewRow is the schema for the newly created data record based on + // the result of the select statement. + schema4NewRow := expression.NewSchema(make([]*expression.Column, len(insertPlan.Table.Cols()))...) + names4NewRow := make(types.NameSlice, len(insertPlan.Table.Cols())) + // TODO: don't clone it. + for i, selCol := range insertPlan.SelectPlan.Schema().Columns { + ordinal := affectedValuesCols[i].Offset + schema4NewRow.Columns[ordinal] = &expression.Column{} + *schema4NewRow.Columns[ordinal] = *selCol + + schema4NewRow.Columns[ordinal].RetType = &types.FieldType{} + *schema4NewRow.Columns[ordinal].RetType = affectedValuesCols[i].FieldType + + names4NewRow[ordinal] = names[i] + } + for i := range schema4NewRow.Columns { + if schema4NewRow.Columns[i] == nil { + schema4NewRow.Columns[i] = &expression.Column{UniqueID: insertPlan.ctx.GetSessionVars().AllocPlanColumnID()} + names4NewRow[i] = types.EmptyName + } + } + return nil +} + +func (b *PlanBuilder) buildDDL(ctx context.Context, node ast.DDLNode) (Plan, error) { + p := &DDL{Statement: node} + return p, nil +} + +func (b *PlanBuilder) buildExplainPlan(targetPlan Plan, format string, execStmt ast.StmtNode) (Plan, error) { + p := &Explain{ + TargetPlan: targetPlan, + Format: format, + ExecStmt: execStmt, + } + p.ctx = b.ctx + return p, p.prepareSchema() +} + +func (b *PlanBuilder) buildExplain(ctx context.Context, explain *ast.ExplainStmt) (Plan, error) { + if show, ok := explain.Stmt.(*ast.ShowStmt); ok { + return b.buildShow(ctx, show) + } + targetPlan, _, err := OptimizeAstNode(ctx, b.ctx, explain.Stmt, b.is) + if err != nil { + return nil, err + } + + return b.buildExplainPlan(targetPlan, explain.Format, explain.Stmt) +} + +func buildShowWarningsSchema() (*expression.Schema, types.NameSlice) { + tblName := "WARNINGS" + schema := newColumnsWithNames(3) + schema.Append(buildColumnWithName(tblName, "Level", mysql.TypeVarchar, 64)) + schema.Append(buildColumnWithName(tblName, "Code", mysql.TypeLong, 19)) + schema.Append(buildColumnWithName(tblName, "Message", mysql.TypeVarchar, 64)) + return schema.col2Schema(), schema.names +} + +// buildShowSchema builds column info for ShowStmt including column name and type. +func buildShowSchema(s *ast.ShowStmt) (schema *expression.Schema, outputNames []*types.FieldName) { + var names []string + var ftypes []byte + switch s.Tp { + case ast.ShowWarnings, ast.ShowErrors: + return buildShowWarningsSchema() + case ast.ShowDatabases: + names = []string{"Database"} + case ast.ShowTables: + names = []string{fmt.Sprintf("Tables_in_%s", s.DBName)} + if s.Full { + names = append(names, "Table_type") + } + case ast.ShowVariables: + names = []string{"Variable_name", "Value"} + case ast.ShowCreateTable: + names = []string{"Table", "Create Table"} + case ast.ShowCreateDatabase: + names = []string{"Database", "Create Database"} + } + + schema = expression.NewSchema(make([]*expression.Column, 0, len(names))...) + outputNames = make([]*types.FieldName, 0, len(names)) + for i := range names { + col := &expression.Column{} + outputNames = append(outputNames, &types.FieldName{ColName: model.NewCIStr(names[i])}) + // User varchar as the default return column type. + tp := mysql.TypeVarchar + if len(ftypes) != 0 && ftypes[i] != mysql.TypeUnspecified { + tp = ftypes[i] + } + fieldType := types.NewFieldType(tp) + fieldType.Flen, fieldType.Decimal = mysql.GetDefaultFieldLengthAndDecimal(tp) + fieldType.Charset, fieldType.Collate = types.DefaultCharsetForType(tp) + col.RetType = fieldType + schema.Append(col) + } + return +} diff --git a/planner/core/planbuilder_test.go b/planner/core/planbuilder_test.go new file mode 100644 index 0000000..afb9ab2 --- /dev/null +++ b/planner/core/planbuilder_test.go @@ -0,0 +1,109 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "context" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/planner/util" + "github.com/pingcap/tidb/types" +) + +var _ = Suite(&testPlanBuilderSuite{}) + +func (s *testPlanBuilderSuite) SetUpSuite(c *C) { +} + +type testPlanBuilderSuite struct { +} + +func (s *testPlanBuilderSuite) TestShow(c *C) { + node := &ast.ShowStmt{} + tps := []ast.ShowStmtType{ + ast.ShowDatabases, + ast.ShowTables, + ast.ShowWarnings, + ast.ShowVariables, + ast.ShowCreateTable, + ast.ShowCreateDatabase, + } + for _, tp := range tps { + node.Tp = tp + schema, _ := buildShowSchema(node) + for _, col := range schema.Columns { + c.Assert(col.RetType.Flen, Greater, 0) + } + } +} + +func (s *testPlanBuilderSuite) TestGetPathByIndexName(c *C) { + tblInfo := &model.TableInfo{ + Indices: make([]*model.IndexInfo, 0), + PKIsHandle: true, + } + + accessPath := []*util.AccessPath{ + {IsTablePath: true}, + {Index: &model.IndexInfo{Name: model.NewCIStr("idx")}}, + } + + path := getPathByIndexName(accessPath, model.NewCIStr("idx"), tblInfo) + c.Assert(path, NotNil) + c.Assert(path, Equals, accessPath[1]) + + path = getPathByIndexName(accessPath, model.NewCIStr("primary"), tblInfo) + c.Assert(path, NotNil) + c.Assert(path, Equals, accessPath[0]) + + path = getPathByIndexName(accessPath, model.NewCIStr("not exists"), tblInfo) + c.Assert(path, IsNil) + + tblInfo = &model.TableInfo{ + Indices: make([]*model.IndexInfo, 0), + PKIsHandle: false, + } + + path = getPathByIndexName(accessPath, model.NewCIStr("primary"), tblInfo) + c.Assert(path, IsNil) +} + +func (s *testPlanBuilderSuite) TestRewriterPool(c *C) { + builder := NewPlanBuilder(MockContext(), nil) + + // Make sure PlanBuilder.getExpressionRewriter() provides clean rewriter from pool. + // First, pick one rewriter from the pool and make it dirty. + builder.rewriterCounter++ + dirtyRewriter := builder.getExpressionRewriter(context.TODO(), nil) + dirtyRewriter.asScalar = true + dirtyRewriter.aggrMap = make(map[*ast.AggregateFuncExpr]int) + dirtyRewriter.preprocess = func(ast.Node) ast.Node { return nil } + dirtyRewriter.insertPlan = &Insert{} + dirtyRewriter.ctxStack = make([]expression.Expression, 2) + dirtyRewriter.ctxNameStk = make([]*types.FieldName, 2) + builder.rewriterCounter-- + // Then, pick again and check if it's cleaned up. + builder.rewriterCounter++ + cleanRewriter := builder.getExpressionRewriter(context.TODO(), nil) + c.Assert(cleanRewriter, Equals, dirtyRewriter) // Rewriter should be reused. + c.Assert(cleanRewriter.asScalar, Equals, false) + c.Assert(cleanRewriter.aggrMap, IsNil) + c.Assert(cleanRewriter.preprocess, IsNil) + c.Assert(cleanRewriter.insertPlan, IsNil) + c.Assert(len(cleanRewriter.ctxStack), Equals, 0) + builder.rewriterCounter-- +} diff --git a/planner/core/preprocess.go b/planner/core/preprocess.go new file mode 100644 index 0000000..1a249d2 --- /dev/null +++ b/planner/core/preprocess.go @@ -0,0 +1,539 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "fmt" + "math" + "strings" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/meta/autoid" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/types/parser_driver" +) + +// PreprocessOpt presents optional parameters to `Preprocess` method. +type PreprocessOpt func(*preprocessor) + +// Preprocess resolves table names of the node, and checks some statements validation. +func Preprocess(ctx sessionctx.Context, node ast.Node, is infoschema.InfoSchema, preprocessOpt ...PreprocessOpt) error { + v := preprocessor{is: is, ctx: ctx, tableAliasInJoin: make([]map[string]interface{}, 0)} + for _, optFn := range preprocessOpt { + optFn(&v) + } + node.Accept(&v) + return errors.Trace(v.err) +} + +type preprocessorFlag uint8 + +const ( + // inCreateOrDropTable is set when visiting create/drop table statement. + inCreateOrDropTable preprocessorFlag = 1 << iota + // parentIsJoin is set when visiting node's parent is join. + parentIsJoin +) + +// preprocessor is an ast.Visitor that preprocess +// ast Nodes parsed from parser. +type preprocessor struct { + is infoschema.InfoSchema + ctx sessionctx.Context + err error + flag preprocessorFlag + + // tableAliasInJoin is a stack that keeps the table alias names for joins. + // len(tableAliasInJoin) may bigger than 1 because the left/right child of join may be subquery that contains `JOIN` + tableAliasInJoin []map[string]interface{} +} + +func (p *preprocessor) Enter(in ast.Node) (out ast.Node, skipChildren bool) { + switch node := in.(type) { + case *ast.CreateTableStmt: + p.flag |= inCreateOrDropTable + p.checkCreateTableGrammar(node) + case *ast.DropTableStmt: + p.flag |= inCreateOrDropTable + p.checkDropTableGrammar(node) + case *ast.CreateIndexStmt: + p.checkCreateIndexGrammar(node) + case *ast.AlterTableStmt: + p.resolveAlterTableStmt(node) + p.checkAlterTableGrammar(node) + case *ast.CreateDatabaseStmt: + p.checkCreateDatabaseGrammar(node) + case *ast.DropDatabaseStmt: + p.checkDropDatabaseGrammar(node) + case *ast.ShowStmt: + p.resolveShowStmt(node) + case *ast.Join: + p.checkNonUniqTableAlias(node) + default: + p.flag &= ^parentIsJoin + } + return in, p.err != nil +} + +func (p *preprocessor) Leave(in ast.Node) (out ast.Node, ok bool) { + switch x := in.(type) { + case *ast.CreateTableStmt: + p.flag &= ^inCreateOrDropTable + p.checkAutoIncrement(x) + p.checkContainDotColumn(x) + case *ast.DropTableStmt, *ast.AlterTableStmt: + p.flag &= ^inCreateOrDropTable + case *ast.ExplainStmt: + if _, ok := x.Stmt.(*ast.ShowStmt); ok { + break + } + valid := false + for i, length := 0, len(ast.ExplainFormats); i < length; i++ { + if strings.ToLower(x.Format) == ast.ExplainFormats[i] { + valid = true + break + } + } + if !valid { + p.err = ErrUnknownExplainFormat.GenWithStackByArgs(x.Format) + } + case *ast.TableName: + p.handleTableName(x) + case *ast.Join: + if len(p.tableAliasInJoin) > 0 { + p.tableAliasInJoin = p.tableAliasInJoin[:len(p.tableAliasInJoin)-1] + } + case *ast.FuncCallExpr: + } + + return in, p.err == nil +} + +func checkAutoIncrementOp(colDef *ast.ColumnDef, num int) (bool, error) { + var hasAutoIncrement bool + + if colDef.Options[num].Tp == ast.ColumnOptionAutoIncrement { + hasAutoIncrement = true + if len(colDef.Options) == num+1 { + return hasAutoIncrement, nil + } + for _, op := range colDef.Options[num+1:] { + if op.Tp == ast.ColumnOptionDefaultValue { + if tmp, ok := op.Expr.(*driver.ValueExpr); ok { + if !tmp.Datum.IsNull() { + return hasAutoIncrement, errors.Errorf("Invalid default value for '%s'", colDef.Name.Name.O) + } + } + } + } + } + if colDef.Options[num].Tp == ast.ColumnOptionDefaultValue && len(colDef.Options) != num+1 { + if tmp, ok := colDef.Options[num].Expr.(*driver.ValueExpr); ok { + if tmp.Datum.IsNull() { + return hasAutoIncrement, nil + } + } + for _, op := range colDef.Options[num+1:] { + if op.Tp == ast.ColumnOptionAutoIncrement { + return hasAutoIncrement, errors.Errorf("Invalid default value for '%s'", colDef.Name.Name.O) + } + } + } + + return hasAutoIncrement, nil +} + +func (p *preprocessor) checkAutoIncrement(stmt *ast.CreateTableStmt) { + var ( + count int + autoIncrementCol *ast.ColumnDef + ) + + for _, colDef := range stmt.Cols { + var hasAutoIncrement bool + for i := range colDef.Options { + ok, err := checkAutoIncrementOp(colDef, i) + if err != nil { + p.err = err + return + } + if ok { + hasAutoIncrement = true + } + } + if hasAutoIncrement { + count++ + autoIncrementCol = colDef + } + } + + if count < 1 { + return + } + if count > 1 { + p.err = autoid.ErrWrongAutoKey.GenWithStackByArgs() + } + + switch autoIncrementCol.Tp.Tp { + case mysql.TypeTiny, mysql.TypeShort, mysql.TypeLong, + mysql.TypeFloat, mysql.TypeDouble, mysql.TypeLonglong, mysql.TypeInt24: + default: + p.err = errors.Errorf("Incorrect column specifier for column '%s'", autoIncrementCol.Name.Name.O) + } +} + +func (p *preprocessor) checkCreateDatabaseGrammar(stmt *ast.CreateDatabaseStmt) { + if isIncorrectName(stmt.Name) { + p.err = ddl.ErrWrongDBName.GenWithStackByArgs(stmt.Name) + } +} + +func (p *preprocessor) checkDropDatabaseGrammar(stmt *ast.DropDatabaseStmt) { + if isIncorrectName(stmt.Name) { + p.err = ddl.ErrWrongDBName.GenWithStackByArgs(stmt.Name) + } +} + +func (p *preprocessor) checkCreateTableGrammar(stmt *ast.CreateTableStmt) { + tName := stmt.Table.Name.String() + if isIncorrectName(tName) { + p.err = ddl.ErrWrongTableName.GenWithStackByArgs(tName) + return + } + countPrimaryKey := 0 + for _, colDef := range stmt.Cols { + if err := checkColumn(colDef); err != nil { + p.err = err + return + } + isPrimary, err := checkColumnOptions(colDef.Options) + if err != nil { + p.err = err + return + } + countPrimaryKey += isPrimary + if countPrimaryKey > 1 { + p.err = infoschema.ErrMultiplePriKey + return + } + } + for _, constraint := range stmt.Constraints { + switch tp := constraint.Tp; tp { + case ast.ConstraintKey, ast.ConstraintIndex, ast.ConstraintUniq, ast.ConstraintUniqKey, ast.ConstraintUniqIndex: + err := checkIndexInfo(constraint.Name, constraint.Keys) + if err != nil { + p.err = err + return + } + case ast.ConstraintPrimaryKey: + if countPrimaryKey > 0 { + p.err = infoschema.ErrMultiplePriKey + return + } + countPrimaryKey++ + err := checkIndexInfo(constraint.Name, constraint.Keys) + if err != nil { + p.err = err + return + } + } + } + if len(stmt.Cols) == 0 && stmt.ReferTable == nil { + p.err = ddl.ErrTableMustHaveColumns + return + } +} + +func (p *preprocessor) checkDropTableGrammar(stmt *ast.DropTableStmt) { + for _, t := range stmt.Tables { + if isIncorrectName(t.Name.String()) { + p.err = ddl.ErrWrongTableName.GenWithStackByArgs(t.Name.String()) + return + } + } +} + +func (p *preprocessor) checkNonUniqTableAlias(stmt *ast.Join) { + if p.flag&parentIsJoin == 0 { + p.tableAliasInJoin = append(p.tableAliasInJoin, make(map[string]interface{})) + } + tableAliases := p.tableAliasInJoin[len(p.tableAliasInJoin)-1] + if err := isTableAliasDuplicate(stmt.Left, tableAliases); err != nil { + p.err = err + return + } + if err := isTableAliasDuplicate(stmt.Right, tableAliases); err != nil { + p.err = err + return + } + p.flag |= parentIsJoin +} + +func isTableAliasDuplicate(node ast.ResultSetNode, tableAliases map[string]interface{}) error { + if ts, ok := node.(*ast.TableSource); ok { + tabName := ts.AsName + if tabName.L == "" { + if tableNode, ok := ts.Source.(*ast.TableName); ok { + if tableNode.Schema.L != "" { + tabName = model.NewCIStr(fmt.Sprintf("%s.%s", tableNode.Schema.L, tableNode.Name.L)) + } else { + tabName = tableNode.Name + } + } + } + _, exists := tableAliases[tabName.L] + if len(tabName.L) != 0 && exists { + return ErrNonUniqTable.GenWithStackByArgs(tabName) + } + tableAliases[tabName.L] = nil + } + return nil +} + +func checkColumnOptions(ops []*ast.ColumnOption) (int, error) { + isPrimary, isGenerated, isStored := 0, 0, false + + for _, op := range ops { + switch op.Tp { + case ast.ColumnOptionPrimaryKey: + isPrimary = 1 + case ast.ColumnOptionGenerated: + isGenerated = 1 + isStored = op.Stored + } + } + + if isPrimary > 0 && isGenerated > 0 && !isStored { + return isPrimary, ErrUnsupportedOnGeneratedColumn.GenWithStackByArgs("Defining a virtual generated column as primary key") + } + + return isPrimary, nil +} + +func (p *preprocessor) checkCreateIndexGrammar(stmt *ast.CreateIndexStmt) { + tName := stmt.Table.Name.String() + if isIncorrectName(tName) { + p.err = ddl.ErrWrongTableName.GenWithStackByArgs(tName) + return + } + p.err = checkIndexInfo(stmt.IndexName, stmt.IndexPartSpecifications) +} + +func (p *preprocessor) checkAlterTableGrammar(stmt *ast.AlterTableStmt) { + tName := stmt.Table.Name.String() + if isIncorrectName(tName) { + p.err = ddl.ErrWrongTableName.GenWithStackByArgs(tName) + return + } + specs := stmt.Specs + for _, spec := range specs { + if spec.NewTable != nil { + ntName := spec.NewTable.Name.String() + if isIncorrectName(ntName) { + p.err = ddl.ErrWrongTableName.GenWithStackByArgs(ntName) + return + } + } + for _, colDef := range spec.NewColumns { + if p.err = checkColumn(colDef); p.err != nil { + return + } + } + switch spec.Tp { + case ast.AlterTableAddConstraint: + switch spec.Constraint.Tp { + case ast.ConstraintKey, ast.ConstraintIndex, ast.ConstraintUniq, ast.ConstraintUniqIndex, + ast.ConstraintUniqKey, ast.ConstraintPrimaryKey: + p.err = checkIndexInfo(spec.Constraint.Name, spec.Constraint.Keys) + if p.err != nil { + return + } + default: + // Nothing to do now. + } + default: + // Nothing to do now. + } + } +} + +// checkDuplicateColumnName checks if index exists duplicated columns. +func checkDuplicateColumnName(IndexPartSpecifications []*ast.IndexPartSpecification) error { + colNames := make(map[string]struct{}, len(IndexPartSpecifications)) + for _, IndexColNameWithExpr := range IndexPartSpecifications { + name := IndexColNameWithExpr.Column.Name + if _, ok := colNames[name.L]; ok { + return infoschema.ErrColumnExists.GenWithStackByArgs(name) + } + colNames[name.L] = struct{}{} + } + return nil +} + +// checkIndexInfo checks index name and index column names. +func checkIndexInfo(indexName string, IndexPartSpecifications []*ast.IndexPartSpecification) error { + if strings.EqualFold(indexName, mysql.PrimaryKeyName) { + return ddl.ErrWrongNameForIndex.GenWithStackByArgs(indexName) + } + if len(IndexPartSpecifications) > mysql.MaxKeyParts { + return infoschema.ErrTooManyKeyParts.GenWithStackByArgs(mysql.MaxKeyParts) + } + return checkDuplicateColumnName(IndexPartSpecifications) +} + +// checkColumn checks if the column definition is valid. +// See https://dev.mysql.com/doc/refman/5.7/en/storage-requirements.html +func checkColumn(colDef *ast.ColumnDef) error { + // Check column name. + cName := colDef.Name.Name.String() + if isIncorrectName(cName) { + return ddl.ErrWrongColumnName.GenWithStackByArgs(cName) + } + + // Check column type. + tp := colDef.Tp + if tp == nil { + return nil + } + if tp.Flen > math.MaxUint32 { + return types.ErrTooBigDisplayWidth.GenWithStack("Display width out of range for column '%s' (max = %d)", colDef.Name.Name.O, math.MaxUint32) + } + + switch tp.Tp { + case mysql.TypeString: + if tp.Flen != types.UnspecifiedLength && tp.Flen > mysql.MaxFieldCharLength { + return types.ErrTooBigFieldLength.GenWithStack("Column length too big for column '%s' (max = %d); use BLOB or TEXT instead", colDef.Name.Name.O, mysql.MaxFieldCharLength) + } + case mysql.TypeVarchar: + if len(tp.Charset) == 0 { + // It's not easy to get the schema charset and table charset here. + // The charset is determined by the order ColumnDefaultCharset --> TableDefaultCharset-->DatabaseDefaultCharset-->SystemDefaultCharset. + // return nil, to make the check in the ddl.CreateTable. + return nil + } + err := ddl.IsTooBigFieldLength(colDef.Tp.Flen, colDef.Name.Name.O, tp.Charset) + if err != nil { + return err + } + case mysql.TypeFloat, mysql.TypeDouble: + if tp.Decimal > mysql.MaxFloatingTypeScale { + return types.ErrTooBigScale.GenWithStackByArgs(tp.Decimal, colDef.Name.Name.O, mysql.MaxFloatingTypeScale) + } + if tp.Flen > mysql.MaxFloatingTypeWidth { + return types.ErrTooBigPrecision.GenWithStackByArgs(tp.Flen, colDef.Name.Name.O, mysql.MaxFloatingTypeWidth) + } + case mysql.TypeSet: + if len(tp.Elems) > mysql.MaxTypeSetMembers { + return types.ErrTooBigSet.GenWithStack("Too many strings for column %s and SET", colDef.Name.Name.O) + } + // Check set elements. See https://dev.mysql.com/doc/refman/5.7/en/set.html. + for _, str := range colDef.Tp.Elems { + if strings.Contains(str, ",") { + return types.ErrIllegalValueForType.GenWithStackByArgs(types.TypeStr(tp.Tp), str) + } + } + case mysql.TypeNewDecimal: + if tp.Decimal > mysql.MaxDecimalScale { + return types.ErrTooBigScale.GenWithStackByArgs(tp.Decimal, colDef.Name.Name.O, mysql.MaxDecimalScale) + } + + if tp.Flen > mysql.MaxDecimalWidth { + return types.ErrTooBigPrecision.GenWithStackByArgs(tp.Flen, colDef.Name.Name.O, mysql.MaxDecimalWidth) + } + case mysql.TypeBit: + if tp.Flen <= 0 { + return types.ErrInvalidFieldSize.GenWithStackByArgs(colDef.Name.Name.O) + } + if tp.Flen > mysql.MaxBitDisplayWidth { + return types.ErrTooBigDisplayWidth.GenWithStackByArgs(colDef.Name.Name.O, mysql.MaxBitDisplayWidth) + } + default: + // TODO: Add more types. + } + return nil +} + +// isIncorrectName checks if the identifier is incorrect. +// See https://dev.mysql.com/doc/refman/5.7/en/identifiers.html +func isIncorrectName(name string) bool { + if len(name) == 0 { + return true + } + if name[len(name)-1] == ' ' { + return true + } + return false +} + +// checkContainDotColumn checks field contains the table name. +// for example :create table t (c1.c2 int default null). +func (p *preprocessor) checkContainDotColumn(stmt *ast.CreateTableStmt) { + tName := stmt.Table.Name.String() + sName := stmt.Table.Schema.String() + + for _, colDef := range stmt.Cols { + // check schema and table names. + if colDef.Name.Schema.O != sName && len(colDef.Name.Schema.O) != 0 { + p.err = ddl.ErrWrongDBName.GenWithStackByArgs(colDef.Name.Schema.O) + return + } + if colDef.Name.Table.O != tName && len(colDef.Name.Table.O) != 0 { + p.err = ddl.ErrWrongTableName.GenWithStackByArgs(colDef.Name.Table.O) + return + } + } +} + +func (p *preprocessor) handleTableName(tn *ast.TableName) { + if tn.Schema.L == "" { + currentDB := p.ctx.GetSessionVars().CurrentDB + if currentDB == "" { + p.err = errors.Trace(ErrNoDB) + return + } + tn.Schema = model.NewCIStr(currentDB) + } + if p.flag&inCreateOrDropTable > 0 { + return + } + + table, err := p.is.TableByName(tn.Schema, tn.Name) + if err != nil { + p.err = err + return + } + tn.TableInfo = table.Meta() + dbInfo, _ := p.is.SchemaByName(tn.Schema) + tn.DBInfo = dbInfo +} + +func (p *preprocessor) resolveShowStmt(node *ast.ShowStmt) { + if node.DBName == "" { + if node.Table != nil && node.Table.Schema.L != "" { + node.DBName = node.Table.Schema.O + } else { + node.DBName = p.ctx.GetSessionVars().CurrentDB + } + } else if node.Table != nil && node.Table.Schema.L == "" { + node.Table.Schema = model.NewCIStr(node.DBName) + } +} + +func (p *preprocessor) resolveAlterTableStmt(node *ast.AlterTableStmt) {} diff --git a/planner/core/property_cols_prune.go b/planner/core/property_cols_prune.go new file mode 100644 index 0000000..c321aad --- /dev/null +++ b/planner/core/property_cols_prune.go @@ -0,0 +1,194 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "github.com/pingcap/tidb/expression" +) + +// preparePossibleProperties traverses the plan tree by a post-order method, +// recursively calls LogicalPlan PreparePossibleProperties interface. +func preparePossibleProperties(lp LogicalPlan) [][]*expression.Column { + childrenProperties := make([][][]*expression.Column, 0, len(lp.Children())) + for _, child := range lp.Children() { + childrenProperties = append(childrenProperties, preparePossibleProperties(child)) + } + return lp.PreparePossibleProperties(lp.Schema(), childrenProperties...) +} + +// PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface. +func (ds *DataSource) PreparePossibleProperties(schema *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { + result := make([][]*expression.Column, 0, len(ds.possibleAccessPaths)) + + for _, path := range ds.possibleAccessPaths { + if path.IsTablePath { + col := ds.getPKIsHandleCol() + if col != nil { + result = append(result, []*expression.Column{col}) + } + continue + } + + if len(path.IdxCols) == 0 { + continue + } + result = append(result, make([]*expression.Column, len(path.IdxCols))) + copy(result[len(result)-1], path.IdxCols) + for i := 0; i < path.EqCondCount && i+1 < len(path.IdxCols); i++ { + result = append(result, make([]*expression.Column, len(path.IdxCols)-i-1)) + copy(result[len(result)-1], path.IdxCols[i+1:]) + } + } + return result +} + +// PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface. +func (ts *LogicalTableScan) PreparePossibleProperties(schema *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { + if ts.Handle != nil { + return [][]*expression.Column{{ts.Handle}} + } + return nil +} + +// PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface. +func (is *LogicalIndexScan) PreparePossibleProperties(schema *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { + if len(is.IdxCols) == 0 { + return nil + } + result := make([][]*expression.Column, 0, is.EqCondCount+1) + for i := 0; i <= is.EqCondCount; i++ { + result = append(result, make([]*expression.Column, len(is.IdxCols)-i)) + copy(result[i], is.IdxCols[i:]) + } + return result +} + +// PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface. +func (p *TiKVSingleGather) PreparePossibleProperties(schema *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { + return childrenProperties[0] +} + +// PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface. +func (p *LogicalSelection) PreparePossibleProperties(schema *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { + return childrenProperties[0] +} + +// PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface. +func (p *LogicalSort) PreparePossibleProperties(schema *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { + propCols := getPossiblePropertyFromByItems(p.ByItems) + if len(propCols) == 0 { + return nil + } + return [][]*expression.Column{propCols} +} + +// PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface. +func (p *LogicalTopN) PreparePossibleProperties(schema *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { + propCols := getPossiblePropertyFromByItems(p.ByItems) + if len(propCols) == 0 { + return nil + } + return [][]*expression.Column{propCols} +} + +func getPossiblePropertyFromByItems(items []*ByItems) []*expression.Column { + cols := make([]*expression.Column, 0, len(items)) + for _, item := range items { + if col, ok := item.Expr.(*expression.Column); ok { + cols = append(cols, col) + } else { + break + } + } + return cols +} + +// PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface. +func (p *baseLogicalPlan) PreparePossibleProperties(schema *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { + return nil +} + +// PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface. +func (p *LogicalProjection) PreparePossibleProperties(schema *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { + childProperties := childrenProperties[0] + oldCols := make([]*expression.Column, 0, p.schema.Len()) + newCols := make([]*expression.Column, 0, p.schema.Len()) + for i, expr := range p.Exprs { + if col, ok := expr.(*expression.Column); ok { + newCols = append(newCols, p.schema.Columns[i]) + oldCols = append(oldCols, col) + } + } + tmpSchema := expression.NewSchema(oldCols...) + for i := len(childProperties) - 1; i >= 0; i-- { + for j, col := range childProperties[i] { + pos := tmpSchema.ColumnIndex(col) + if pos >= 0 { + childProperties[i][j] = newCols[pos] + } else { + childProperties[i] = childProperties[i][:j] + break + } + } + if len(childProperties[i]) == 0 { + childProperties = append(childProperties[:i], childProperties[i+1:]...) + } + } + return childProperties +} + +// PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface. +func (p *LogicalJoin) PreparePossibleProperties(schema *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { + leftProperties := childrenProperties[0] + rightProperties := childrenProperties[1] + // TODO: We should consider properties propagation. + p.leftProperties = leftProperties + p.rightProperties = rightProperties + if p.JoinType == LeftOuterJoin { + rightProperties = nil + } else if p.JoinType == RightOuterJoin { + leftProperties = nil + } + resultProperties := make([][]*expression.Column, len(leftProperties)+len(rightProperties)) + for i, cols := range leftProperties { + resultProperties[i] = make([]*expression.Column, len(cols)) + copy(resultProperties[i], cols) + } + leftLen := len(leftProperties) + for i, cols := range rightProperties { + resultProperties[leftLen+i] = make([]*expression.Column, len(cols)) + copy(resultProperties[leftLen+i], cols) + } + return resultProperties +} + +// PreparePossibleProperties implements LogicalPlan PreparePossibleProperties interface. +func (la *LogicalAggregation) PreparePossibleProperties(schema *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column { + childProps := childrenProperties[0] + // If there's no group-by item, the stream aggregation could have no order property. So we can add an empty property + // when its group-by item is empty. + if len(la.GroupByItems) == 0 { + la.possibleProperties = [][]*expression.Column{nil} + return nil + } + resultProperties := make([][]*expression.Column, 0, len(childProps)) + for _, possibleChildProperty := range childProps { + sortColOffsets := getMaxSortPrefix(possibleChildProperty, la.groupByCols) + if len(sortColOffsets) == len(la.groupByCols) { + resultProperties = append(resultProperties, possibleChildProperty[:len(la.groupByCols)]) + } + } + la.possibleProperties = resultProperties + return la.possibleProperties +} diff --git a/planner/core/resolve_indices.go b/planner/core/resolve_indices.go new file mode 100644 index 0000000..10e2c45 --- /dev/null +++ b/planner/core/resolve_indices.go @@ -0,0 +1,330 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/util/disjointset" +) + +// ResolveIndices implements Plan interface. +func (p *PhysicalProjection) ResolveIndices() (err error) { + err = p.physicalSchemaProducer.ResolveIndices() + if err != nil { + return err + } + for i, expr := range p.Exprs { + p.Exprs[i], err = expr.ResolveIndices(p.children[0].Schema()) + if err != nil { + return err + } + } + childProj, isProj := p.children[0].(*PhysicalProjection) + if !isProj { + return + } + refine4NeighbourProj(p, childProj) + return +} + +// refine4NeighbourProj refines the index for p.Exprs whose type is *Column when +// there is two neighbouring Projections. +// This function is introduced because that different childProj.Expr may refer +// to the same index of childProj.Schema, so we need to keep this relation +// between the specified expressions in the parent Projection. +func refine4NeighbourProj(p, childProj *PhysicalProjection) { + inputIdx2OutputIdxes := make(map[int][]int) + for i, expr := range childProj.Exprs { + col, isCol := expr.(*expression.Column) + if !isCol { + continue + } + inputIdx2OutputIdxes[col.Index] = append(inputIdx2OutputIdxes[col.Index], i) + } + childSchemaUnionSet := disjointset.NewIntSet(childProj.schema.Len()) + for _, outputIdxes := range inputIdx2OutputIdxes { + if len(outputIdxes) <= 1 { + continue + } + for i := 1; i < len(outputIdxes); i++ { + childSchemaUnionSet.Union(outputIdxes[0], outputIdxes[i]) + } + } + + for _, expr := range p.Exprs { + col, isCol := expr.(*expression.Column) + if !isCol { + continue + } + col.Index = childSchemaUnionSet.FindRoot(col.Index) + } +} + +// ResolveIndices implements Plan interface. +func (p *PhysicalHashJoin) ResolveIndices() (err error) { + err = p.physicalSchemaProducer.ResolveIndices() + if err != nil { + return err + } + lSchema := p.children[0].Schema() + rSchema := p.children[1].Schema() + for i, fun := range p.EqualConditions { + lArg, err := fun.GetArgs()[0].ResolveIndices(lSchema) + if err != nil { + return err + } + p.LeftJoinKeys[i] = lArg.(*expression.Column) + rArg, err := fun.GetArgs()[1].ResolveIndices(rSchema) + if err != nil { + return err + } + p.RightJoinKeys[i] = rArg.(*expression.Column) + p.EqualConditions[i] = expression.NewFunctionInternal(fun.GetCtx(), fun.FuncName.L, fun.GetType(), lArg, rArg).(*expression.ScalarFunction) + } + for i, expr := range p.LeftConditions { + p.LeftConditions[i], err = expr.ResolveIndices(lSchema) + if err != nil { + return err + } + } + for i, expr := range p.RightConditions { + p.RightConditions[i], err = expr.ResolveIndices(rSchema) + if err != nil { + return err + } + } + for i, expr := range p.OtherConditions { + p.OtherConditions[i], err = expr.ResolveIndices(expression.MergeSchema(lSchema, rSchema)) + if err != nil { + return err + } + } + return +} + +// ResolveIndices implements Plan interface. +func (p *PhysicalMergeJoin) ResolveIndices() (err error) { + err = p.physicalSchemaProducer.ResolveIndices() + if err != nil { + return err + } + lSchema := p.children[0].Schema() + rSchema := p.children[1].Schema() + for i, col := range p.LeftJoinKeys { + newKey, err := col.ResolveIndices(lSchema) + if err != nil { + return err + } + p.LeftJoinKeys[i] = newKey.(*expression.Column) + } + for i, col := range p.RightJoinKeys { + newKey, err := col.ResolveIndices(rSchema) + if err != nil { + return err + } + p.RightJoinKeys[i] = newKey.(*expression.Column) + } + for i, expr := range p.LeftConditions { + p.LeftConditions[i], err = expr.ResolveIndices(lSchema) + if err != nil { + return err + } + } + for i, expr := range p.RightConditions { + p.RightConditions[i], err = expr.ResolveIndices(rSchema) + if err != nil { + return err + } + } + for i, expr := range p.OtherConditions { + p.OtherConditions[i], err = expr.ResolveIndices(expression.MergeSchema(lSchema, rSchema)) + if err != nil { + return err + } + } + return +} + +// ResolveIndices implements Plan interface. +func (p *PhysicalUnionScan) ResolveIndices() (err error) { + err = p.basePhysicalPlan.ResolveIndices() + if err != nil { + return err + } + for i, expr := range p.Conditions { + p.Conditions[i], err = expr.ResolveIndices(p.children[0].Schema()) + if err != nil { + return err + } + } + resolvedHandleCol, err := p.HandleCol.ResolveIndices(p.children[0].Schema()) + if err != nil { + return err + } + p.HandleCol = resolvedHandleCol.(*expression.Column) + return +} + +// ResolveIndices implements Plan interface. +func (p *PhysicalTableReader) ResolveIndices() error { + return p.tablePlan.ResolveIndices() +} + +// ResolveIndices implements Plan interface. +func (p *PhysicalIndexReader) ResolveIndices() (err error) { + err = p.physicalSchemaProducer.ResolveIndices() + if err != nil { + return err + } + err = p.indexPlan.ResolveIndices() + if err != nil { + return err + } + for i, col := range p.OutputColumns { + newCol, err := col.ResolveIndices(p.indexPlan.Schema()) + if err != nil { + return err + } + p.OutputColumns[i] = newCol.(*expression.Column) + } + return +} + +// ResolveIndices implements Plan interface. +func (p *PhysicalIndexLookUpReader) ResolveIndices() (err error) { + err = p.tablePlan.ResolveIndices() + if err != nil { + return err + } + err = p.indexPlan.ResolveIndices() + if err != nil { + return err + } + if p.ExtraHandleCol != nil { + newCol, err := p.ExtraHandleCol.ResolveIndices(p.tablePlan.Schema()) + if err != nil { + return err + } + p.ExtraHandleCol = newCol.(*expression.Column) + } + return +} + +// ResolveIndices implements Plan interface. +func (p *PhysicalSelection) ResolveIndices() (err error) { + err = p.basePhysicalPlan.ResolveIndices() + if err != nil { + return err + } + for i, expr := range p.Conditions { + p.Conditions[i], err = expr.ResolveIndices(p.children[0].Schema()) + if err != nil { + return err + } + } + return +} + +// ResolveIndices implements Plan interface. +func (p *basePhysicalAgg) ResolveIndices() (err error) { + err = p.physicalSchemaProducer.ResolveIndices() + if err != nil { + return err + } + for _, aggFun := range p.AggFuncs { + for i, arg := range aggFun.Args { + aggFun.Args[i], err = arg.ResolveIndices(p.children[0].Schema()) + if err != nil { + return err + } + } + } + for i, item := range p.GroupByItems { + p.GroupByItems[i], err = item.ResolveIndices(p.children[0].Schema()) + if err != nil { + return err + } + } + return +} + +// ResolveIndices implements Plan interface. +func (p *PhysicalSort) ResolveIndices() (err error) { + err = p.basePhysicalPlan.ResolveIndices() + if err != nil { + return err + } + for _, item := range p.ByItems { + item.Expr, err = item.Expr.ResolveIndices(p.children[0].Schema()) + if err != nil { + return err + } + } + return err +} + +// ResolveIndices implements Plan interface. +func (p *PhysicalTopN) ResolveIndices() (err error) { + err = p.basePhysicalPlan.ResolveIndices() + if err != nil { + return err + } + for _, item := range p.ByItems { + item.Expr, err = item.Expr.ResolveIndices(p.children[0].Schema()) + if err != nil { + return err + } + } + return +} + +// ResolveIndices implements Plan interface. +func (p *Insert) ResolveIndices() (err error) { + err = p.baseSchemaProducer.ResolveIndices() + if err != nil { + return err + } + for _, set := range p.SetList { + newCol, err := set.Col.ResolveIndices(p.tableSchema) + if err != nil { + return err + } + set.Col = newCol.(*expression.Column) + set.Expr, err = set.Expr.ResolveIndices(p.tableSchema) + if err != nil { + return err + } + } + return +} + +func (p *physicalSchemaProducer) ResolveIndices() (err error) { + err = p.basePhysicalPlan.ResolveIndices() + return err +} + +func (p *baseSchemaProducer) ResolveIndices() (err error) { + return +} + +// ResolveIndices implements Plan interface. +func (p *basePhysicalPlan) ResolveIndices() (err error) { + for _, child := range p.children { + err = child.ResolveIndices() + if err != nil { + return err + } + } + return +} diff --git a/planner/core/rule_aggregation_elimination.go b/planner/core/rule_aggregation_elimination.go new file mode 100644 index 0000000..ff3a739 --- /dev/null +++ b/planner/core/rule_aggregation_elimination.go @@ -0,0 +1,123 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "context" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/expression/aggregation" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" +) + +type aggregationEliminator struct { + aggregationEliminateChecker +} + +type aggregationEliminateChecker struct { +} + +// tryToEliminateAggregation will eliminate aggregation grouped by unique key. +// e.g. select min(b) from t group by a. If a is a unique key, then this sql is equal to `select b from t group by a`. +// For count(expr), sum(expr), avg(expr), count(distinct expr, [expr...]) we may need to rewrite the expr. Details are shown below. +// If we can eliminate agg successful, we return a projection. Else we return a nil pointer. +func (a *aggregationEliminateChecker) tryToEliminateAggregation(agg *LogicalAggregation) *LogicalProjection { + schemaByGroupby := expression.NewSchema(agg.groupByCols...) + coveredByUniqueKey := false + for _, key := range agg.children[0].Schema().Keys { + if schemaByGroupby.ColumnsIndices(key) != nil { + coveredByUniqueKey = true + break + } + } + if coveredByUniqueKey { + // GroupByCols has unique key, so this aggregation can be removed. + proj := a.convertAggToProj(agg) + proj.SetChildren(agg.children[0]) + return proj + } + return nil +} + +func (a *aggregationEliminateChecker) convertAggToProj(agg *LogicalAggregation) *LogicalProjection { + proj := LogicalProjection{ + Exprs: make([]expression.Expression, 0, len(agg.AggFuncs)), + }.Init(agg.ctx) + for _, fun := range agg.AggFuncs { + expr := a.rewriteExpr(agg.ctx, fun) + proj.Exprs = append(proj.Exprs, expr) + } + proj.SetSchema(agg.schema.Clone()) + return proj +} + +// rewriteExpr will rewrite the aggregate function to expression doesn't contain aggregate function. +func (a *aggregationEliminateChecker) rewriteExpr(ctx sessionctx.Context, aggFunc *aggregation.AggFuncDesc) expression.Expression { + switch aggFunc.Name { + case ast.AggFuncCount: + if aggFunc.Mode == aggregation.FinalMode { + return aggFunc.Args[0] + } + return a.rewriteCount(ctx, aggFunc.Args, aggFunc.RetTp) + case ast.AggFuncSum, ast.AggFuncAvg, ast.AggFuncFirstRow, ast.AggFuncMax, ast.AggFuncMin: + return aggFunc.Args[0] + default: + panic("Unsupported function") + } +} + +func (a *aggregationEliminateChecker) rewriteCount(ctx sessionctx.Context, exprs []expression.Expression, targetTp *types.FieldType) expression.Expression { + // If is count(expr), we will change it to if(isnull(expr), 0, 1). + // If is count(distinct x, y, z) we will change it to if(isnull(x) or isnull(y) or isnull(z), 0, 1). + // If is count(expr not null), we will change it to constant 1. + isNullExprs := make([]expression.Expression, 0, len(exprs)) + for _, expr := range exprs { + if mysql.HasNotNullFlag(expr.GetType().Flag) { + isNullExprs = append(isNullExprs, expression.Zero) + } else { + isNullExpr := expression.NewFunctionInternal(ctx, ast.IsNull, types.NewFieldType(mysql.TypeTiny), expr) + isNullExprs = append(isNullExprs, isNullExpr) + } + } + + innerExpr := expression.ComposeDNFCondition(ctx, isNullExprs...) + newExpr := expression.NewFunctionInternal(ctx, ast.If, targetTp, innerExpr, expression.Zero, expression.One) + return newExpr +} + +func (a *aggregationEliminator) optimize(ctx context.Context, p LogicalPlan) (LogicalPlan, error) { + newChildren := make([]LogicalPlan, 0, len(p.Children())) + for _, child := range p.Children() { + newChild, err := a.optimize(ctx, child) + if err != nil { + return nil, err + } + newChildren = append(newChildren, newChild) + } + p.SetChildren(newChildren...) + agg, ok := p.(*LogicalAggregation) + if !ok { + return p, nil + } + if proj := a.tryToEliminateAggregation(agg); proj != nil { + return proj, nil + } + return p, nil +} + +func (*aggregationEliminator) name() string { + return "aggregation_eliminate" +} diff --git a/planner/core/rule_aggregation_push_down.go b/planner/core/rule_aggregation_push_down.go new file mode 100644 index 0000000..e2968ea --- /dev/null +++ b/planner/core/rule_aggregation_push_down.go @@ -0,0 +1,349 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// // Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "context" + + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/expression/aggregation" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" +) + +type aggregationPushDownSolver struct { + aggregationEliminateChecker +} + +// isDecomposable checks if an aggregate function is decomposable. An aggregation function $F$ is decomposable +// if there exist aggregation functions F_1 and F_2 such that F(S_1 union all S_2) = F_2(F_1(S_1),F_1(S_2)), +// where S_1 and S_2 are two sets of values. We call S_1 and S_2 partial groups. +func (a *aggregationPushDownSolver) isDecomposable(fun *aggregation.AggFuncDesc) bool { + switch fun.Name { + case ast.AggFuncAvg: + // TODO: Support avg push down. + return false + case ast.AggFuncMax, ast.AggFuncMin, ast.AggFuncFirstRow, ast.AggFuncSum, ast.AggFuncCount: + return true + default: + return false + } +} + +// getAggFuncChildIdx gets which children it belongs to, 0 stands for left, 1 stands for right, -1 stands for both. +func (a *aggregationPushDownSolver) getAggFuncChildIdx(aggFunc *aggregation.AggFuncDesc, schema *expression.Schema) int { + fromLeft, fromRight := false, false + var cols []*expression.Column + cols = expression.ExtractColumnsFromExpressions(cols, aggFunc.Args, nil) + for _, col := range cols { + if schema.Contains(col) { + fromLeft = true + } else { + fromRight = true + } + } + if fromLeft && fromRight { + return -1 + } else if fromLeft { + return 0 + } + return 1 +} + +// collectAggFuncs collects all aggregate functions and splits them into two parts: "leftAggFuncs" and "rightAggFuncs" whose +// arguments are all from left child or right child separately. If some aggregate functions have the arguments that have +// columns both from left and right children, the whole aggregation is forbidden to push down. +func (a *aggregationPushDownSolver) collectAggFuncs(agg *LogicalAggregation, join *LogicalJoin) (valid bool, leftAggFuncs, rightAggFuncs []*aggregation.AggFuncDesc) { + valid = true + leftChild := join.children[0] + for _, aggFunc := range agg.AggFuncs { + if !a.isDecomposable(aggFunc) { + return false, nil, nil + } + index := a.getAggFuncChildIdx(aggFunc, leftChild.Schema()) + switch index { + case 0: + leftAggFuncs = append(leftAggFuncs, aggFunc) + case 1: + rightAggFuncs = append(rightAggFuncs, aggFunc) + default: + return false, nil, nil + } + } + return +} + +// collectGbyCols collects all columns from gby-items and join-conditions and splits them into two parts: "leftGbyCols" and +// "rightGbyCols". e.g. For query "SELECT SUM(B.id) FROM A, B WHERE A.c1 = B.c1 AND A.c2 != B.c2 GROUP BY B.c3" , the optimized +// query should be "SELECT SUM(B.agg) FROM A, (SELECT SUM(id) as agg, c1, c2, c3 FROM B GROUP BY id, c1, c2, c3) as B +// WHERE A.c1 = B.c1 AND A.c2 != B.c2 GROUP BY B.c3". As you see, all the columns appearing in join-conditions should be +// treated as group by columns in join subquery. +func (a *aggregationPushDownSolver) collectGbyCols(agg *LogicalAggregation, join *LogicalJoin) (leftGbyCols, rightGbyCols []*expression.Column) { + leftChild := join.children[0] + ctx := agg.ctx + for _, gbyExpr := range agg.GroupByItems { + cols := expression.ExtractColumns(gbyExpr) + for _, col := range cols { + if leftChild.Schema().Contains(col) { + leftGbyCols = append(leftGbyCols, col) + } else { + rightGbyCols = append(rightGbyCols, col) + } + } + } + // extract equal conditions + for _, eqFunc := range join.EqualConditions { + leftGbyCols = a.addGbyCol(ctx, leftGbyCols, eqFunc.GetArgs()[0].(*expression.Column)) + rightGbyCols = a.addGbyCol(ctx, rightGbyCols, eqFunc.GetArgs()[1].(*expression.Column)) + } + for _, leftCond := range join.LeftConditions { + cols := expression.ExtractColumns(leftCond) + leftGbyCols = a.addGbyCol(ctx, leftGbyCols, cols...) + } + for _, rightCond := range join.RightConditions { + cols := expression.ExtractColumns(rightCond) + rightGbyCols = a.addGbyCol(ctx, rightGbyCols, cols...) + } + for _, otherCond := range join.OtherConditions { + cols := expression.ExtractColumns(otherCond) + for _, col := range cols { + if leftChild.Schema().Contains(col) { + leftGbyCols = a.addGbyCol(ctx, leftGbyCols, col) + } else { + rightGbyCols = a.addGbyCol(ctx, rightGbyCols, col) + } + } + } + return +} + +func (a *aggregationPushDownSolver) splitAggFuncsAndGbyCols(agg *LogicalAggregation, join *LogicalJoin) (valid bool, + leftAggFuncs, rightAggFuncs []*aggregation.AggFuncDesc, + leftGbyCols, rightGbyCols []*expression.Column) { + valid, leftAggFuncs, rightAggFuncs = a.collectAggFuncs(agg, join) + if !valid { + return + } + leftGbyCols, rightGbyCols = a.collectGbyCols(agg, join) + return +} + +// addGbyCol adds a column to gbyCols. If a group by column has existed, it will not be added repeatedly. +func (a *aggregationPushDownSolver) addGbyCol(ctx sessionctx.Context, gbyCols []*expression.Column, cols ...*expression.Column) []*expression.Column { + for _, c := range cols { + duplicate := false + for _, gbyCol := range gbyCols { + if c.Equal(ctx, gbyCol) { + duplicate = true + break + } + } + if !duplicate { + gbyCols = append(gbyCols, c) + } + } + return gbyCols +} + +// checkValidJoin checks if this join should be pushed across. +func (a *aggregationPushDownSolver) checkValidJoin(join *LogicalJoin) bool { + return join.JoinType == InnerJoin || join.JoinType == LeftOuterJoin || join.JoinType == RightOuterJoin +} + +// decompose splits an aggregate function to two parts: a final mode function and a partial mode function. Currently +// there are no differences between partial mode and complete mode, so we can confuse them. +func (a *aggregationPushDownSolver) decompose(ctx sessionctx.Context, aggFunc *aggregation.AggFuncDesc, schema *expression.Schema) ([]*aggregation.AggFuncDesc, *expression.Schema) { + // Result is a slice because avg should be decomposed to sum and count. Currently we don't process this case. + result := []*aggregation.AggFuncDesc{aggFunc.Clone()} + for _, aggFunc := range result { + schema.Append(&expression.Column{ + UniqueID: ctx.GetSessionVars().AllocPlanColumnID(), + RetType: aggFunc.RetTp, + }) + } + aggFunc.Args = expression.Column2Exprs(schema.Columns[schema.Len()-len(result):]) + aggFunc.Mode = aggregation.FinalMode + return result, schema +} + +// tryToPushDownAgg tries to push down an aggregate function into a join path. If all aggFuncs are first row, we won't +// process it temporarily. If not, We will add additional group by columns and first row functions. We make a new aggregation operator. +// If the pushed aggregation is grouped by unique key, it's no need to push it down. +func (a *aggregationPushDownSolver) tryToPushDownAgg(aggFuncs []*aggregation.AggFuncDesc, gbyCols []*expression.Column, join *LogicalJoin, childIdx int) (_ LogicalPlan, err error) { + child := join.children[childIdx] + if aggregation.IsAllFirstRow(aggFuncs) { + return child, nil + } + // If the join is multiway-join, we forbid pushing down. + if _, ok := join.children[childIdx].(*LogicalJoin); ok { + return child, nil + } + tmpSchema := expression.NewSchema(gbyCols...) + for _, key := range child.Schema().Keys { + if tmpSchema.ColumnsIndices(key) != nil { + return child, nil + } + } + agg, err := a.makeNewAgg(join.ctx, aggFuncs, gbyCols) + if err != nil { + return nil, err + } + agg.SetChildren(child) + // If agg has no group-by item, it will return a default value, which may cause some bugs. + // So here we add a group-by item forcely. + if len(agg.GroupByItems) == 0 { + agg.GroupByItems = []expression.Expression{&expression.Constant{ + Value: types.NewDatum(0), + RetType: types.NewFieldType(mysql.TypeLong)}} + } + if (childIdx == 0 && join.JoinType == RightOuterJoin) || (childIdx == 1 && join.JoinType == LeftOuterJoin) { + var existsDefaultValues bool + join.DefaultValues, existsDefaultValues = a.getDefaultValues(agg) + if !existsDefaultValues { + return child, nil + } + } + return agg, nil +} + +func (a *aggregationPushDownSolver) getDefaultValues(agg *LogicalAggregation) ([]types.Datum, bool) { + defaultValues := make([]types.Datum, 0, agg.Schema().Len()) + for _, aggFunc := range agg.AggFuncs { + value, existsDefaultValue := aggFunc.EvalNullValueInOuterJoin(agg.ctx, agg.children[0].Schema()) + if !existsDefaultValue { + return nil, false + } + defaultValues = append(defaultValues, value) + } + return defaultValues, true +} + +func (a *aggregationPushDownSolver) checkAnyCountAndSum(aggFuncs []*aggregation.AggFuncDesc) bool { + for _, fun := range aggFuncs { + if fun.Name == ast.AggFuncSum || fun.Name == ast.AggFuncCount { + return true + } + } + return false +} + +func (a *aggregationPushDownSolver) makeNewAgg(ctx sessionctx.Context, aggFuncs []*aggregation.AggFuncDesc, gbyCols []*expression.Column) (*LogicalAggregation, error) { + agg := LogicalAggregation{ + GroupByItems: expression.Column2Exprs(gbyCols), + groupByCols: gbyCols, + }.Init(ctx) + aggLen := len(aggFuncs) + len(gbyCols) + newAggFuncDescs := make([]*aggregation.AggFuncDesc, 0, aggLen) + schema := expression.NewSchema(make([]*expression.Column, 0, aggLen)...) + for _, aggFunc := range aggFuncs { + var newFuncs []*aggregation.AggFuncDesc + newFuncs, schema = a.decompose(ctx, aggFunc, schema) + newAggFuncDescs = append(newAggFuncDescs, newFuncs...) + } + for _, gbyCol := range gbyCols { + firstRow, err := aggregation.NewAggFuncDesc(agg.ctx, ast.AggFuncFirstRow, []expression.Expression{gbyCol}) + if err != nil { + return nil, err + } + newCol, _ := gbyCol.Clone().(*expression.Column) + newCol.RetType = firstRow.RetTp + newAggFuncDescs = append(newAggFuncDescs, firstRow) + schema.Append(newCol) + } + agg.AggFuncs = newAggFuncDescs + agg.SetSchema(schema) + return agg, nil +} + +func (a *aggregationPushDownSolver) optimize(ctx context.Context, p LogicalPlan) (LogicalPlan, error) { + if !p.SCtx().GetSessionVars().AllowAggPushDown { + return p, nil + } + return a.aggPushDown(p) +} + +// aggPushDown tries to push down aggregate functions to join paths. +func (a *aggregationPushDownSolver) aggPushDown(p LogicalPlan) (_ LogicalPlan, err error) { + if agg, ok := p.(*LogicalAggregation); ok { + proj := a.tryToEliminateAggregation(agg) + if proj != nil { + p = proj + } else { + child := agg.children[0] + if join, ok1 := child.(*LogicalJoin); ok1 && a.checkValidJoin(join) { + if valid, leftAggFuncs, rightAggFuncs, leftGbyCols, rightGbyCols := a.splitAggFuncsAndGbyCols(agg, join); valid { + var lChild, rChild LogicalPlan + // If there exist count or sum functions in left join path, we can't push any + // aggregate function into right join path. + rightInvalid := a.checkAnyCountAndSum(leftAggFuncs) + leftInvalid := a.checkAnyCountAndSum(rightAggFuncs) + if rightInvalid { + rChild = join.children[1] + } else { + rChild, err = a.tryToPushDownAgg(rightAggFuncs, rightGbyCols, join, 1) + if err != nil { + return nil, err + } + } + if leftInvalid { + lChild = join.children[0] + } else { + lChild, err = a.tryToPushDownAgg(leftAggFuncs, leftGbyCols, join, 0) + if err != nil { + return nil, err + } + } + join.SetChildren(lChild, rChild) + join.SetSchema(expression.MergeSchema(lChild.Schema(), rChild.Schema())) + buildKeyInfo(join) + proj := a.tryToEliminateAggregation(agg) + if proj != nil { + p = proj + } + } + } else if proj, ok1 := child.(*LogicalProjection); ok1 { + // TODO: This optimization is not always reasonable. We have not supported pushing projection to kv layer yet, + // so we must do this optimization. + for i, gbyItem := range agg.GroupByItems { + agg.GroupByItems[i] = expression.ColumnSubstitute(gbyItem, proj.schema, proj.Exprs) + } + agg.collectGroupByColumns() + for _, aggFunc := range agg.AggFuncs { + newArgs := make([]expression.Expression, 0, len(aggFunc.Args)) + for _, arg := range aggFunc.Args { + newArgs = append(newArgs, expression.ColumnSubstitute(arg, proj.schema, proj.Exprs)) + } + aggFunc.Args = newArgs + } + projChild := proj.children[0] + agg.SetChildren(projChild) + } + } + } + newChildren := make([]LogicalPlan, 0, len(p.Children())) + for _, child := range p.Children() { + newChild, err := a.aggPushDown(child) + if err != nil { + return nil, err + } + newChildren = append(newChildren, newChild) + } + p.SetChildren(newChildren...) + return p, nil +} + +func (*aggregationPushDownSolver) name() string { + return "aggregation_push_down" +} diff --git a/planner/core/rule_build_key_info.go b/planner/core/rule_build_key_info.go new file mode 100644 index 0000000..cc3cee0 --- /dev/null +++ b/planner/core/rule_build_key_info.go @@ -0,0 +1,241 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "context" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" +) + +type buildKeySolver struct{} + +func (s *buildKeySolver) optimize(ctx context.Context, lp LogicalPlan) (LogicalPlan, error) { + buildKeyInfo(lp) + return lp, nil +} + +// buildKeyInfo recursively calls LogicalPlan's BuildKeyInfo method. +func buildKeyInfo(lp LogicalPlan) { + for _, child := range lp.Children() { + buildKeyInfo(child) + } + childSchema := make([]*expression.Schema, len(lp.Children())) + for i, child := range lp.Children() { + childSchema[i] = child.Schema() + } + lp.BuildKeyInfo(lp.Schema(), childSchema) +} + +// BuildKeyInfo implements LogicalPlan BuildKeyInfo interface. +func (la *LogicalAggregation) BuildKeyInfo(selfSchema *expression.Schema, childSchema []*expression.Schema) { + if la.IsPartialModeAgg() { + return + } + la.logicalSchemaProducer.BuildKeyInfo(selfSchema, childSchema) + for _, key := range childSchema[0].Keys { + indices := selfSchema.ColumnsIndices(key) + if indices == nil { + continue + } + newKey := make([]*expression.Column, 0, len(key)) + for _, i := range indices { + newKey = append(newKey, selfSchema.Columns[i]) + } + selfSchema.Keys = append(selfSchema.Keys, newKey) + } + if len(la.groupByCols) == len(la.GroupByItems) && len(la.GroupByItems) > 0 { + indices := selfSchema.ColumnsIndices(la.groupByCols) + if indices != nil { + newKey := make([]*expression.Column, 0, len(indices)) + for _, i := range indices { + newKey = append(newKey, selfSchema.Columns[i]) + } + selfSchema.Keys = append(selfSchema.Keys, newKey) + } + } +} + +// BuildKeyInfo implements LogicalPlan BuildKeyInfo interface. +func (p *LogicalSelection) BuildKeyInfo(selfSchema *expression.Schema, childSchema []*expression.Schema) { + p.baseLogicalPlan.BuildKeyInfo(selfSchema, childSchema) +} + +// BuildKeyInfo implements LogicalPlan BuildKeyInfo interface. +func (p *LogicalLimit) BuildKeyInfo(selfSchema *expression.Schema, childSchema []*expression.Schema) { + p.baseLogicalPlan.BuildKeyInfo(selfSchema, childSchema) +} + +// A bijection exists between columns of a projection's schema and this projection's Exprs. +// Sometimes we need a schema made by expr of Exprs to convert a column in child's schema to a column in this projection's Schema. +func (p *LogicalProjection) buildSchemaByExprs(selfSchema *expression.Schema) *expression.Schema { + schema := expression.NewSchema(make([]*expression.Column, 0, selfSchema.Len())...) + for _, expr := range p.Exprs { + if col, isCol := expr.(*expression.Column); isCol { + schema.Append(col) + } else { + // If the expression is not a column, we add a column to occupy the position. + schema.Append(&expression.Column{ + UniqueID: p.ctx.GetSessionVars().AllocPlanColumnID(), + RetType: expr.GetType(), + }) + } + } + return schema +} + +// BuildKeyInfo implements LogicalPlan BuildKeyInfo interface. +func (p *LogicalProjection) BuildKeyInfo(selfSchema *expression.Schema, childSchema []*expression.Schema) { + p.logicalSchemaProducer.BuildKeyInfo(selfSchema, childSchema) + schema := p.buildSchemaByExprs(selfSchema) + for _, key := range childSchema[0].Keys { + indices := schema.ColumnsIndices(key) + if indices == nil { + continue + } + newKey := make([]*expression.Column, 0, len(key)) + for _, i := range indices { + newKey = append(newKey, selfSchema.Columns[i]) + } + selfSchema.Keys = append(selfSchema.Keys, newKey) + } +} + +// BuildKeyInfo implements LogicalPlan BuildKeyInfo interface. +func (p *LogicalJoin) BuildKeyInfo(selfSchema *expression.Schema, childSchema []*expression.Schema) { + p.logicalSchemaProducer.BuildKeyInfo(selfSchema, childSchema) + switch p.JoinType { + case InnerJoin, LeftOuterJoin, RightOuterJoin: + // If there is no equal conditions, then cartesian product can't be prevented and unique key information will destroy. + if len(p.EqualConditions) == 0 { + return + } + lOk := false + rOk := false + // Such as 'select * from t1 join t2 where t1.a = t2.a and t1.b = t2.b'. + // If one sides (a, b) is a unique key, then the unique key information is remained. + // But we don't consider this situation currently. + // Only key made by one column is considered now. + for _, expr := range p.EqualConditions { + ln := expr.GetArgs()[0].(*expression.Column) + rn := expr.GetArgs()[1].(*expression.Column) + for _, key := range childSchema[0].Keys { + if len(key) == 1 && key[0].Equal(p.ctx, ln) { + lOk = true + break + } + } + for _, key := range childSchema[1].Keys { + if len(key) == 1 && key[0].Equal(p.ctx, rn) { + rOk = true + break + } + } + } + // For inner join, if one side of one equal condition is unique key, + // another side's unique key information will all be reserved. + // If it's an outer join, NULL value will fill some position, which will destroy the unique key information. + if lOk && p.JoinType != LeftOuterJoin { + selfSchema.Keys = append(selfSchema.Keys, childSchema[1].Keys...) + } + if rOk && p.JoinType != RightOuterJoin { + selfSchema.Keys = append(selfSchema.Keys, childSchema[0].Keys...) + } + } +} + +// checkIndexCanBeKey checks whether an Index can be a Key in schema. +func checkIndexCanBeKey(idx *model.IndexInfo, columns []*model.ColumnInfo, schema *expression.Schema) expression.KeyInfo { + if !idx.Unique { + return nil + } + newKey := make([]*expression.Column, 0, len(idx.Columns)) + ok := true + for _, idxCol := range idx.Columns { + // The columns of this index should all occur in column schema. + // Since null value could be duplicate in unique key. So we check NotNull flag of every column. + find := false + for i, col := range columns { + if idxCol.Name.L == col.Name.L { + if !mysql.HasNotNullFlag(col.Flag) { + break + } + newKey = append(newKey, schema.Columns[i]) + find = true + break + } + } + if !find { + ok = false + break + } + } + if ok { + return newKey + } + return nil +} + +// BuildKeyInfo implements LogicalPlan BuildKeyInfo interface. +func (ds *DataSource) BuildKeyInfo(selfSchema *expression.Schema, childSchema []*expression.Schema) { + selfSchema.Keys = nil + for _, path := range ds.possibleAccessPaths { + if path.IsTablePath { + continue + } + if newKey := checkIndexCanBeKey(path.Index, ds.Columns, selfSchema); newKey != nil { + selfSchema.Keys = append(selfSchema.Keys, newKey) + } + } + if ds.tableInfo.PKIsHandle { + for i, col := range ds.Columns { + if mysql.HasPriKeyFlag(col.Flag) { + selfSchema.Keys = append(selfSchema.Keys, []*expression.Column{selfSchema.Columns[i]}) + break + } + } + } +} + +// BuildKeyInfo implements LogicalPlan BuildKeyInfo interface. +func (ts *LogicalTableScan) BuildKeyInfo(selfSchema *expression.Schema, childSchema []*expression.Schema) { + ts.Source.BuildKeyInfo(selfSchema, childSchema) +} + +// BuildKeyInfo implements LogicalPlan BuildKeyInfo interface. +func (is *LogicalIndexScan) BuildKeyInfo(selfSchema *expression.Schema, childSchema []*expression.Schema) { + selfSchema.Keys = nil + for _, path := range is.Source.possibleAccessPaths { + if path.IsTablePath { + continue + } + if newKey := checkIndexCanBeKey(path.Index, is.Columns, selfSchema); newKey != nil { + selfSchema.Keys = append(selfSchema.Keys, newKey) + } + } + handle := is.getPKIsHandleCol(selfSchema) + if handle != nil { + selfSchema.Keys = append(selfSchema.Keys, []*expression.Column{handle}) + } +} + +// BuildKeyInfo implements LogicalPlan BuildKeyInfo interface. +func (tg *TiKVSingleGather) BuildKeyInfo(selfSchema *expression.Schema, childSchema []*expression.Schema) { + selfSchema.Keys = childSchema[0].Keys +} + +func (*buildKeySolver) name() string { + return "build_keys" +} diff --git a/planner/core/rule_column_pruning.go b/planner/core/rule_column_pruning.go new file mode 100644 index 0000000..cabbd54 --- /dev/null +++ b/planner/core/rule_column_pruning.go @@ -0,0 +1,266 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "context" + + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/expression/aggregation" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" +) + +type columnPruner struct { +} + +func (s *columnPruner) optimize(ctx context.Context, lp LogicalPlan) (LogicalPlan, error) { + err := lp.PruneColumns(lp.Schema().Columns) + return lp, err +} + +func getUsedList(usedCols []*expression.Column, schema *expression.Schema) []bool { + tmpSchema := expression.NewSchema(usedCols...) + used := make([]bool, schema.Len()) + for i, col := range schema.Columns { + used[i] = tmpSchema.Contains(col) + } + return used +} + +// ExprsHasSideEffects checks if any of the expressions has side effects. +func ExprsHasSideEffects(exprs []expression.Expression) bool { + for _, expr := range exprs { + if exprHasSetVar(expr) { + return true + } + } + return false +} + +// exprHasSetVar checks if the expression has SetVar function. +func exprHasSetVar(expr expression.Expression) bool { + scalaFunc, isScalaFunc := expr.(*expression.ScalarFunction) + if !isScalaFunc { + return false + } + if scalaFunc.FuncName.L == ast.SetVar { + return true + } + for _, arg := range scalaFunc.GetArgs() { + if exprHasSetVar(arg) { + return true + } + } + return false +} + +// PruneColumns implements LogicalPlan interface. +// If any expression has SetVar function or Sleep function, we do not prune it. +func (p *LogicalProjection) PruneColumns(parentUsedCols []*expression.Column) error { + child := p.children[0] + used := getUsedList(parentUsedCols, p.schema) + + for i := len(used) - 1; i >= 0; i-- { + if !used[i] && !exprHasSetVar(p.Exprs[i]) { + p.schema.Columns = append(p.schema.Columns[:i], p.schema.Columns[i+1:]...) + p.Exprs = append(p.Exprs[:i], p.Exprs[i+1:]...) + } + } + selfUsedCols := make([]*expression.Column, 0, len(p.Exprs)) + selfUsedCols = expression.ExtractColumnsFromExpressions(selfUsedCols, p.Exprs, nil) + return child.PruneColumns(selfUsedCols) +} + +// PruneColumns implements LogicalPlan interface. +func (p *LogicalSelection) PruneColumns(parentUsedCols []*expression.Column) error { + child := p.children[0] + parentUsedCols = expression.ExtractColumnsFromExpressions(parentUsedCols, p.Conditions, nil) + return child.PruneColumns(parentUsedCols) +} + +// PruneColumns implements LogicalPlan interface. +func (la *LogicalAggregation) PruneColumns(parentUsedCols []*expression.Column) error { + child := la.children[0] + used := getUsedList(parentUsedCols, la.Schema()) + + for i := len(used) - 1; i >= 0; i-- { + if !used[i] { + la.schema.Columns = append(la.schema.Columns[:i], la.schema.Columns[i+1:]...) + la.AggFuncs = append(la.AggFuncs[:i], la.AggFuncs[i+1:]...) + } + } + var selfUsedCols []*expression.Column + for _, aggrFunc := range la.AggFuncs { + selfUsedCols = expression.ExtractColumnsFromExpressions(selfUsedCols, aggrFunc.Args, nil) + } + if len(la.AggFuncs) == 0 { + // If all the aggregate functions are pruned, we should add an aggregate function to keep the correctness. + one, err := aggregation.NewAggFuncDesc(la.ctx, ast.AggFuncFirstRow, []expression.Expression{expression.One}) + if err != nil { + return err + } + la.AggFuncs = []*aggregation.AggFuncDesc{one} + col := &expression.Column{ + UniqueID: la.ctx.GetSessionVars().AllocPlanColumnID(), + RetType: types.NewFieldType(mysql.TypeLonglong), + } + la.schema.Columns = []*expression.Column{col} + } + + if len(la.GroupByItems) > 0 { + for i := len(la.GroupByItems) - 1; i >= 0; i-- { + cols := expression.ExtractColumns(la.GroupByItems[i]) + if len(cols) == 0 { + la.GroupByItems = append(la.GroupByItems[:i], la.GroupByItems[i+1:]...) + } else { + selfUsedCols = append(selfUsedCols, cols...) + } + } + // If all the group by items are pruned, we should add a constant 1 to keep the correctness. + // Because `select count(*) from t` is different from `select count(*) from t group by 1`. + if len(la.GroupByItems) == 0 { + la.GroupByItems = []expression.Expression{expression.One} + } + } + return child.PruneColumns(selfUsedCols) +} + +// PruneColumns implements LogicalPlan interface. +// If any expression can view as a constant in execution stage, such as correlated column, constant, +// we do prune them. Note that we can't prune the expressions contain non-deterministic functions, such as rand(). +func (ls *LogicalSort) PruneColumns(parentUsedCols []*expression.Column) error { + child := ls.children[0] + for i := len(ls.ByItems) - 1; i >= 0; i-- { + cols := expression.ExtractColumns(ls.ByItems[i].Expr) + if len(cols) == 0 { + if expression.IsMutableEffectsExpr(ls.ByItems[i].Expr) { + continue + } + ls.ByItems = append(ls.ByItems[:i], ls.ByItems[i+1:]...) + } else if ls.ByItems[i].Expr.GetType().Tp == mysql.TypeNull { + ls.ByItems = append(ls.ByItems[:i], ls.ByItems[i+1:]...) + } else { + parentUsedCols = append(parentUsedCols, cols...) + } + } + return child.PruneColumns(parentUsedCols) +} + +// PruneColumns implements LogicalPlan interface. +func (p *LogicalUnionScan) PruneColumns(parentUsedCols []*expression.Column) error { + parentUsedCols = append(parentUsedCols, p.handleCol) + return p.children[0].PruneColumns(parentUsedCols) +} + +// PruneColumns implements LogicalPlan interface. +func (ds *DataSource) PruneColumns(parentUsedCols []*expression.Column) error { + used := getUsedList(parentUsedCols, ds.schema) + + var ( + handleCol *expression.Column + handleColInfo *model.ColumnInfo + ) + if ds.handleCol != nil { + handleCol = ds.handleCol + handleColInfo = ds.Columns[ds.schema.ColumnIndex(handleCol)] + } + for i := len(used) - 1; i >= 0; i-- { + if !used[i] { + ds.schema.Columns = append(ds.schema.Columns[:i], ds.schema.Columns[i+1:]...) + ds.Columns = append(ds.Columns[:i], ds.Columns[i+1:]...) + } + } + // For SQL like `select 1 from t`, tikv's response will be empty if no column is in schema. + // So we'll force to push one if schema doesn't have any column. + if ds.schema.Len() == 0 { + if handleCol == nil { + handleCol = ds.newExtraHandleSchemaCol() + handleColInfo = model.NewExtraHandleColInfo() + } + ds.Columns = append(ds.Columns, handleColInfo) + ds.schema.Append(handleCol) + } + if ds.handleCol != nil && ds.schema.ColumnIndex(ds.handleCol) == -1 { + ds.handleCol = nil + } + return nil +} + +// PruneColumns implements LogicalPlan interface. +func (p *LogicalTableDual) PruneColumns(parentUsedCols []*expression.Column) error { + used := getUsedList(parentUsedCols, p.Schema()) + + for i := len(used) - 1; i >= 0; i-- { + if !used[i] { + p.schema.Columns = append(p.schema.Columns[:i], p.schema.Columns[i+1:]...) + } + } + return nil +} + +func (p *LogicalJoin) extractUsedCols(parentUsedCols []*expression.Column) (leftCols []*expression.Column, rightCols []*expression.Column) { + for _, eqCond := range p.EqualConditions { + parentUsedCols = append(parentUsedCols, expression.ExtractColumns(eqCond)...) + } + for _, leftCond := range p.LeftConditions { + parentUsedCols = append(parentUsedCols, expression.ExtractColumns(leftCond)...) + } + for _, rightCond := range p.RightConditions { + parentUsedCols = append(parentUsedCols, expression.ExtractColumns(rightCond)...) + } + for _, otherCond := range p.OtherConditions { + parentUsedCols = append(parentUsedCols, expression.ExtractColumns(otherCond)...) + } + lChild := p.children[0] + rChild := p.children[1] + for _, col := range parentUsedCols { + if lChild.Schema().Contains(col) { + leftCols = append(leftCols, col) + } else if rChild.Schema().Contains(col) { + rightCols = append(rightCols, col) + } + } + return leftCols, rightCols +} + +func (p *LogicalJoin) mergeSchema() { + lChild := p.children[0] + rChild := p.children[1] + p.schema = expression.MergeSchema(lChild.Schema(), rChild.Schema()) +} + +// PruneColumns implements LogicalPlan interface. +func (p *LogicalJoin) PruneColumns(parentUsedCols []*expression.Column) error { + leftCols, rightCols := p.extractUsedCols(parentUsedCols) + + err := p.children[0].PruneColumns(leftCols) + if err != nil { + return err + } + + err = p.children[1].PruneColumns(rightCols) + if err != nil { + return err + } + + p.mergeSchema() + return nil +} + +func (*columnPruner) name() string { + return "column_prune" +} diff --git a/planner/core/rule_eliminate_projection.go b/planner/core/rule_eliminate_projection.go new file mode 100644 index 0000000..81722f4 --- /dev/null +++ b/planner/core/rule_eliminate_projection.go @@ -0,0 +1,219 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "context" + + "github.com/pingcap/tidb/expression" +) + +// canProjectionBeEliminatedLoose checks whether a projection can be eliminated, +// returns true if every expression is a single column. +func canProjectionBeEliminatedLoose(p *LogicalProjection) bool { + for _, expr := range p.Exprs { + _, ok := expr.(*expression.Column) + if !ok { + return false + } + } + return true +} + +// canProjectionBeEliminatedStrict checks whether a projection can be +// eliminated, returns true if the projection just copy its child's output. +func canProjectionBeEliminatedStrict(p *PhysicalProjection) bool { + if p.Schema().Len() == 0 { + return true + } + child := p.Children()[0] + if p.Schema().Len() != child.Schema().Len() { + return false + } + for i, expr := range p.Exprs { + col, ok := expr.(*expression.Column) + if !ok || !col.Equal(nil, child.Schema().Columns[i]) { + return false + } + } + return true +} + +func resolveColumnAndReplace(origin *expression.Column, replace map[string]*expression.Column) { + dst := replace[string(origin.HashCode(nil))] + if dst != nil { + retType := origin.RetType + *origin = *dst + origin.RetType = retType + } +} + +// ResolveExprAndReplace replaces columns fields of expressions by children logical plans. +func ResolveExprAndReplace(origin expression.Expression, replace map[string]*expression.Column) { + switch expr := origin.(type) { + case *expression.Column: + resolveColumnAndReplace(expr, replace) + case *expression.ScalarFunction: + for _, arg := range expr.GetArgs() { + ResolveExprAndReplace(arg, replace) + } + } +} + +func doPhysicalProjectionElimination(p PhysicalPlan) PhysicalPlan { + for i, child := range p.Children() { + p.Children()[i] = doPhysicalProjectionElimination(child) + } + + proj, isProj := p.(*PhysicalProjection) + if !isProj || !canProjectionBeEliminatedStrict(proj) { + return p + } + child := p.Children()[0] + return child +} + +// eliminatePhysicalProjection should be called after physical optimization to +// eliminate the redundant projection left after logical projection elimination. +func eliminatePhysicalProjection(p PhysicalPlan) PhysicalPlan { + oldSchema := p.Schema() + newRoot := doPhysicalProjectionElimination(p) + newCols := newRoot.Schema().Columns + for i, oldCol := range oldSchema.Columns { + oldCol.Index = newCols[i].Index + oldCol.ID = newCols[i].ID + oldCol.UniqueID = newCols[i].UniqueID + newRoot.Schema().Columns[i] = oldCol + } + return newRoot +} + +type projectionEliminator struct { +} + +// optimize implements the logicalOptRule interface. +func (pe *projectionEliminator) optimize(ctx context.Context, lp LogicalPlan) (LogicalPlan, error) { + root := pe.eliminate(lp, make(map[string]*expression.Column), false) + return root, nil +} + +// eliminate eliminates the redundant projection in a logical plan. +func (pe *projectionEliminator) eliminate(p LogicalPlan, replace map[string]*expression.Column, canEliminate bool) LogicalPlan { + proj, isProj := p.(*LogicalProjection) + childFlag := canEliminate + if _, isAgg := p.(*LogicalAggregation); isAgg || isProj { + childFlag = true + } + for i, child := range p.Children() { + p.Children()[i] = pe.eliminate(child, replace, childFlag) + } + + switch x := p.(type) { + case *LogicalJoin: + x.schema = buildLogicalJoinSchema(x.JoinType, x) + default: + for _, dst := range p.Schema().Columns { + resolveColumnAndReplace(dst, replace) + } + } + p.replaceExprColumns(replace) + if isProj { + if child, ok := p.Children()[0].(*LogicalProjection); ok && !ExprsHasSideEffects(child.Exprs) { + for i := range proj.Exprs { + proj.Exprs[i] = ReplaceColumnOfExpr(proj.Exprs[i], child, child.Schema()) + } + p.Children()[0] = child.Children()[0] + } + } + + if !(isProj && canEliminate && canProjectionBeEliminatedLoose(proj)) { + return p + } + exprs := proj.Exprs + for i, col := range proj.Schema().Columns { + replace[string(col.HashCode(nil))] = exprs[i].(*expression.Column) + } + return p.Children()[0] +} + +// ReplaceColumnOfExpr replaces column of expression by another LogicalProjection. +func ReplaceColumnOfExpr(expr expression.Expression, proj *LogicalProjection, schema *expression.Schema) expression.Expression { + switch v := expr.(type) { + case *expression.Column: + idx := schema.ColumnIndex(v) + if idx != -1 && idx < len(proj.Exprs) { + return proj.Exprs[idx] + } + case *expression.ScalarFunction: + for i := range v.GetArgs() { + v.GetArgs()[i] = ReplaceColumnOfExpr(v.GetArgs()[i], proj, schema) + } + } + return expr +} + +func (p *LogicalJoin) replaceExprColumns(replace map[string]*expression.Column) { + for _, equalExpr := range p.EqualConditions { + ResolveExprAndReplace(equalExpr, replace) + } + for _, leftExpr := range p.LeftConditions { + ResolveExprAndReplace(leftExpr, replace) + } + for _, rightExpr := range p.RightConditions { + ResolveExprAndReplace(rightExpr, replace) + } + for _, otherExpr := range p.OtherConditions { + ResolveExprAndReplace(otherExpr, replace) + } +} + +func (p *LogicalProjection) replaceExprColumns(replace map[string]*expression.Column) { + for _, expr := range p.Exprs { + ResolveExprAndReplace(expr, replace) + } +} + +func (la *LogicalAggregation) replaceExprColumns(replace map[string]*expression.Column) { + for _, agg := range la.AggFuncs { + for _, aggExpr := range agg.Args { + ResolveExprAndReplace(aggExpr, replace) + } + } + for _, gbyItem := range la.GroupByItems { + ResolveExprAndReplace(gbyItem, replace) + } + la.collectGroupByColumns() +} + +func (p *LogicalSelection) replaceExprColumns(replace map[string]*expression.Column) { + for _, expr := range p.Conditions { + ResolveExprAndReplace(expr, replace) + } +} + +func (ls *LogicalSort) replaceExprColumns(replace map[string]*expression.Column) { + for _, byItem := range ls.ByItems { + ResolveExprAndReplace(byItem.Expr, replace) + } +} + +func (lt *LogicalTopN) replaceExprColumns(replace map[string]*expression.Column) { + for _, byItem := range lt.ByItems { + ResolveExprAndReplace(byItem.Expr, replace) + } +} + +func (*projectionEliminator) name() string { + return "projection_eliminate" +} diff --git a/planner/core/rule_inject_extra_projection.go b/planner/core/rule_inject_extra_projection.go new file mode 100644 index 0000000..8108caf --- /dev/null +++ b/planner/core/rule_inject_extra_projection.go @@ -0,0 +1,188 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/expression/aggregation" +) + +// injectExtraProjection is used to extract the expressions of specific +// operators into a physical Projection operator and inject the Projection below +// the operators. Thus we can accelerate the expression evaluation by eager +// evaluation. +func injectExtraProjection(plan PhysicalPlan) PhysicalPlan { + return NewProjInjector().inject(plan) +} + +type projInjector struct { +} + +// NewProjInjector builds a projInjector. +func NewProjInjector() *projInjector { + return &projInjector{} +} + +func (pe *projInjector) inject(plan PhysicalPlan) PhysicalPlan { + for i, child := range plan.Children() { + plan.Children()[i] = pe.inject(child) + } + + switch p := plan.(type) { + case *PhysicalHashAgg: + plan = InjectProjBelowAgg(plan, p.AggFuncs, p.GroupByItems) + case *PhysicalSort: + plan = InjectProjBelowSort(p, p.ByItems) + case *PhysicalTopN: + plan = InjectProjBelowSort(p, p.ByItems) + } + return plan +} + +// InjectProjBelowAgg injects a ProjOperator below AggOperator. If all the args +// of `aggFuncs`, and all the item of `groupByItems` are columns or constants, +// we do not need to build the `proj`. +func InjectProjBelowAgg(aggPlan PhysicalPlan, aggFuncs []*aggregation.AggFuncDesc, groupByItems []expression.Expression) PhysicalPlan { + hasScalarFunc := false + + for i := 0; !hasScalarFunc && i < len(aggFuncs); i++ { + for _, arg := range aggFuncs[i].Args { + _, isScalarFunc := arg.(*expression.ScalarFunction) + hasScalarFunc = hasScalarFunc || isScalarFunc + } + } + for i := 0; !hasScalarFunc && i < len(groupByItems); i++ { + _, isScalarFunc := groupByItems[i].(*expression.ScalarFunction) + hasScalarFunc = hasScalarFunc || isScalarFunc + } + if !hasScalarFunc { + return aggPlan + } + + projSchemaCols := make([]*expression.Column, 0, len(aggFuncs)+len(groupByItems)) + projExprs := make([]expression.Expression, 0, cap(projSchemaCols)) + cursor := 0 + + for _, f := range aggFuncs { + for i, arg := range f.Args { + if _, isCnst := arg.(*expression.Constant); isCnst { + continue + } + projExprs = append(projExprs, arg) + newArg := &expression.Column{ + UniqueID: aggPlan.SCtx().GetSessionVars().AllocPlanColumnID(), + RetType: arg.GetType(), + Index: cursor, + } + projSchemaCols = append(projSchemaCols, newArg) + f.Args[i] = newArg + cursor++ + } + } + + for i, item := range groupByItems { + if _, isCnst := item.(*expression.Constant); isCnst { + continue + } + projExprs = append(projExprs, item) + newArg := &expression.Column{ + UniqueID: aggPlan.SCtx().GetSessionVars().AllocPlanColumnID(), + RetType: item.GetType(), + Index: cursor, + } + projSchemaCols = append(projSchemaCols, newArg) + groupByItems[i] = newArg + cursor++ + } + + child := aggPlan.Children()[0] + prop := aggPlan.GetChildReqProps(0).Clone() + proj := PhysicalProjection{ + Exprs: projExprs, + }.Init(aggPlan.SCtx(), child.statsInfo().ScaleByExpectCnt(prop.ExpectedCnt), prop) + proj.SetSchema(expression.NewSchema(projSchemaCols...)) + proj.SetChildren(child) + + aggPlan.SetChildren(proj) + return aggPlan +} + +// InjectProjBelowSort extracts the ScalarFunctions of `orderByItems` into a +// PhysicalProjection and injects it below PhysicalTopN/PhysicalSort. The schema +// of PhysicalSort and PhysicalTopN are the same as the schema of their +// children. When a projection is injected as the child of PhysicalSort and +// PhysicalTopN, some extra columns will be added into the schema of the +// Projection, thus we need to add another Projection upon them to prune the +// redundant columns. +func InjectProjBelowSort(p PhysicalPlan, orderByItems []*ByItems) PhysicalPlan { + hasScalarFunc, numOrderByItems := false, len(orderByItems) + for i := 0; !hasScalarFunc && i < numOrderByItems; i++ { + _, isScalarFunc := orderByItems[i].Expr.(*expression.ScalarFunction) + hasScalarFunc = hasScalarFunc || isScalarFunc + } + if !hasScalarFunc { + return p + } + + topProjExprs := make([]expression.Expression, 0, p.Schema().Len()) + for i := range p.Schema().Columns { + col := p.Schema().Columns[i].Clone().(*expression.Column) + col.Index = i + topProjExprs = append(topProjExprs, col) + } + topProj := PhysicalProjection{ + Exprs: topProjExprs, + }.Init(p.SCtx(), p.statsInfo(), nil) + topProj.SetSchema(p.Schema().Clone()) + topProj.SetChildren(p) + + childPlan := p.Children()[0] + bottomProjSchemaCols := make([]*expression.Column, 0, len(childPlan.Schema().Columns)+numOrderByItems) + bottomProjExprs := make([]expression.Expression, 0, len(childPlan.Schema().Columns)+numOrderByItems) + for _, col := range childPlan.Schema().Columns { + newCol := col.Clone().(*expression.Column) + newCol.Index = childPlan.Schema().ColumnIndex(newCol) + bottomProjSchemaCols = append(bottomProjSchemaCols, newCol) + bottomProjExprs = append(bottomProjExprs, newCol) + } + + for _, item := range orderByItems { + itemExpr := item.Expr + if _, isScalarFunc := itemExpr.(*expression.ScalarFunction); !isScalarFunc { + continue + } + bottomProjExprs = append(bottomProjExprs, itemExpr) + newArg := &expression.Column{ + UniqueID: p.SCtx().GetSessionVars().AllocPlanColumnID(), + RetType: itemExpr.GetType(), + Index: len(bottomProjSchemaCols), + } + bottomProjSchemaCols = append(bottomProjSchemaCols, newArg) + item.Expr = newArg + } + + childProp := p.GetChildReqProps(0).Clone() + bottomProj := PhysicalProjection{ + Exprs: bottomProjExprs, + }.Init(p.SCtx(), childPlan.statsInfo().ScaleByExpectCnt(childProp.ExpectedCnt), childProp) + bottomProj.SetSchema(expression.NewSchema(bottomProjSchemaCols...)) + bottomProj.SetChildren(childPlan) + p.SetChildren(bottomProj) + + if origChildProj, isChildProj := childPlan.(*PhysicalProjection); isChildProj { + refine4NeighbourProj(bottomProj, origChildProj) + } + + return topProj +} diff --git a/planner/core/rule_join_elimination.go b/planner/core/rule_join_elimination.go new file mode 100644 index 0000000..54853ba --- /dev/null +++ b/planner/core/rule_join_elimination.go @@ -0,0 +1,233 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "context" + + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/util/set" +) + +type outerJoinEliminator struct { +} + +// tryToEliminateOuterJoin will eliminate outer join plan base on the following rules +// 1. outer join elimination: For example left outer join, if the parent only use the +// columns from left table and the join key of right table(the inner table) is a unique +// key of the right table. the left outer join can be eliminated. +// 2. outer join elimination with duplicate agnostic aggregate functions: For example left outer join. +// If the parent only use the columns from left table with 'distinct' label. The left outer join can +// be eliminated. +func (o *outerJoinEliminator) tryToEliminateOuterJoin(p *LogicalJoin, aggCols []*expression.Column, parentCols []*expression.Column) (LogicalPlan, bool, error) { + var innerChildIdx int + switch p.JoinType { + case LeftOuterJoin: + innerChildIdx = 1 + case RightOuterJoin: + innerChildIdx = 0 + default: + return p, false, nil + } + + outerPlan := p.children[1^innerChildIdx] + innerPlan := p.children[innerChildIdx] + outerUniqueIDs := set.NewInt64Set() + for _, outerCol := range outerPlan.Schema().Columns { + outerUniqueIDs.Insert(outerCol.UniqueID) + } + matched := o.isColsAllFromOuterTable(parentCols, outerUniqueIDs) + if !matched { + return p, false, nil + } + // outer join elimination with duplicate agnostic aggregate functions + matched = o.isColsAllFromOuterTable(aggCols, outerUniqueIDs) + if matched { + return outerPlan, true, nil + } + // outer join elimination without duplicate agnostic aggregate functions + innerJoinKeys := o.extractInnerJoinKeys(p, innerChildIdx) + contain, err := o.isInnerJoinKeysContainUniqueKey(innerPlan, innerJoinKeys) + if err != nil { + return p, false, err + } + if contain { + return outerPlan, true, nil + } + contain, err = o.isInnerJoinKeysContainIndex(innerPlan, innerJoinKeys) + if err != nil { + return p, false, err + } + if contain { + return outerPlan, true, nil + } + + return p, false, nil +} + +// extract join keys as a schema for inner child of a outer join +func (o *outerJoinEliminator) extractInnerJoinKeys(join *LogicalJoin, innerChildIdx int) *expression.Schema { + joinKeys := make([]*expression.Column, 0, len(join.EqualConditions)) + for _, eqCond := range join.EqualConditions { + joinKeys = append(joinKeys, eqCond.GetArgs()[innerChildIdx].(*expression.Column)) + } + return expression.NewSchema(joinKeys...) +} + +// check whether the cols all from outer plan +func (o *outerJoinEliminator) isColsAllFromOuterTable(cols []*expression.Column, outerUniqueIDs set.Int64Set) bool { + // There are two cases "return false" here: + // 1. If cols represents aggCols, then "len(cols) == 0" means not all aggregate functions are duplicate agnostic before. + // 2. If cols represents parentCols, then "len(cols) == 0" means no parent logical plan of this join plan. + if len(cols) == 0 { + return false + } + for _, col := range cols { + if !outerUniqueIDs.Exist(col.UniqueID) { + return false + } + } + return true +} + +// check whether one of unique keys sets is contained by inner join keys +func (o *outerJoinEliminator) isInnerJoinKeysContainUniqueKey(innerPlan LogicalPlan, joinKeys *expression.Schema) (bool, error) { + for _, keyInfo := range innerPlan.Schema().Keys { + joinKeysContainKeyInfo := true + for _, col := range keyInfo { + if !joinKeys.Contains(col) { + joinKeysContainKeyInfo = false + break + } + } + if joinKeysContainKeyInfo { + return true, nil + } + } + return false, nil +} + +// check whether one of index sets is contained by inner join index +func (o *outerJoinEliminator) isInnerJoinKeysContainIndex(innerPlan LogicalPlan, joinKeys *expression.Schema) (bool, error) { + ds, ok := innerPlan.(*DataSource) + if !ok { + return false, nil + } + for _, path := range ds.possibleAccessPaths { + if path.IsTablePath { + continue + } + if !path.Index.Unique { + continue + } + joinKeysContainIndex := true + for _, idxCol := range path.IdxCols { + if !joinKeys.Contains(idxCol) { + joinKeysContainIndex = false + break + } + } + if joinKeysContainIndex { + return true, nil + } + } + return false, nil +} + +// getDupAgnosticAggCols checks whether a LogicalPlan is LogicalAggregation. +// It extracts all the columns from the duplicate agnostic aggregate functions. +// The returned column set is nil if not all the aggregate functions are duplicate agnostic. +// Only the following functions are considered to be duplicate agnostic: +// 1. MAX(arg) +// 2. MIN(arg) +// 3. FIRST_ROW(arg) +func (o *outerJoinEliminator) getDupAgnosticAggCols( + p LogicalPlan, + oldAggCols []*expression.Column, // Reuse the original buffer. +) (isAgg bool, newAggCols []*expression.Column) { + agg, ok := p.(*LogicalAggregation) + if !ok { + return false, nil + } + newAggCols = oldAggCols[:0] + for _, aggDesc := range agg.AggFuncs { + if aggDesc.Name != ast.AggFuncFirstRow && + aggDesc.Name != ast.AggFuncMax && + aggDesc.Name != ast.AggFuncMin { + // If not all aggregate functions are duplicate agnostic, + // we should clean the aggCols, so `return true, newAggCols[:0]`. + return true, newAggCols[:0] + } + for _, expr := range aggDesc.Args { + newAggCols = append(newAggCols, expression.ExtractColumns(expr)...) + } + } + return true, newAggCols +} + +func (o *outerJoinEliminator) doOptimize(p LogicalPlan, aggCols []*expression.Column, parentCols []*expression.Column) (LogicalPlan, error) { + var err error + var isEliminated bool + for join, isJoin := p.(*LogicalJoin); isJoin; join, isJoin = p.(*LogicalJoin) { + p, isEliminated, err = o.tryToEliminateOuterJoin(join, aggCols, parentCols) + if err != nil { + return p, err + } + if !isEliminated { + break + } + } + + switch x := p.(type) { + case *LogicalProjection: + parentCols = parentCols[:0] + for _, expr := range x.Exprs { + parentCols = append(parentCols, expression.ExtractColumns(expr)...) + } + case *LogicalAggregation: + parentCols = parentCols[:0] + for _, groupByItem := range x.GroupByItems { + parentCols = append(parentCols, expression.ExtractColumns(groupByItem)...) + } + for _, aggDesc := range x.AggFuncs { + for _, expr := range aggDesc.Args { + parentCols = append(parentCols, expression.ExtractColumns(expr)...) + } + } + default: + parentCols = append(parentCols[:0], p.Schema().Columns...) + } + + if ok, newCols := o.getDupAgnosticAggCols(p, aggCols); ok { + aggCols = newCols + } + + for i, child := range p.Children() { + newChild, err := o.doOptimize(child, aggCols, parentCols) + if err != nil { + return nil, err + } + p.SetChild(i, newChild) + } + return p, nil +} + +func (o *outerJoinEliminator) optimize(ctx context.Context, p LogicalPlan) (LogicalPlan, error) { + return o.doOptimize(p, nil, nil) +} + +func (*outerJoinEliminator) name() string { + return "outer_join_eliminate" +} diff --git a/planner/core/rule_join_reorder.go b/planner/core/rule_join_reorder.go new file mode 100644 index 0000000..32fe320 --- /dev/null +++ b/planner/core/rule_join_reorder.go @@ -0,0 +1,174 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "context" + + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/sessionctx" +) + +// extractJoinGroup extracts all the join nodes connected with continuous +// InnerJoins to construct a join group. This join group is further used to +// construct a new join order based on a reorder algorithm. +// +// For example: "InnerJoin(InnerJoin(a, b), LeftJoin(c, d))" +// results in a join group {a, b, LeftJoin(c, d)}. +func extractJoinGroup(p LogicalPlan) (group []LogicalPlan, eqEdges []*expression.ScalarFunction, otherConds []expression.Expression) { + join, isJoin := p.(*LogicalJoin) + if !isJoin || join.preferJoinType > uint(0) || join.JoinType != InnerJoin || join.StraightJoin { + return []LogicalPlan{p}, nil, nil + } + + lhsGroup, lhsEqualConds, lhsOtherConds := extractJoinGroup(join.children[0]) + rhsGroup, rhsEqualConds, rhsOtherConds := extractJoinGroup(join.children[1]) + + group = append(group, lhsGroup...) + group = append(group, rhsGroup...) + eqEdges = append(eqEdges, join.EqualConditions...) + eqEdges = append(eqEdges, lhsEqualConds...) + eqEdges = append(eqEdges, rhsEqualConds...) + otherConds = append(otherConds, join.OtherConditions...) + otherConds = append(otherConds, lhsOtherConds...) + otherConds = append(otherConds, rhsOtherConds...) + return group, eqEdges, otherConds +} + +type joinReOrderSolver struct { +} + +type jrNode struct { + p LogicalPlan + cumCost float64 +} + +func (s *joinReOrderSolver) optimize(ctx context.Context, p LogicalPlan) (LogicalPlan, error) { + return s.optimizeRecursive(p.SCtx(), p) +} + +// optimizeRecursive recursively collects join groups and applies join reorder algorithm for each group. +func (s *joinReOrderSolver) optimizeRecursive(ctx sessionctx.Context, p LogicalPlan) (LogicalPlan, error) { + var err error + curJoinGroup, eqEdges, otherConds := extractJoinGroup(p) + if len(curJoinGroup) > 1 { + for i := range curJoinGroup { + curJoinGroup[i], err = s.optimizeRecursive(ctx, curJoinGroup[i]) + if err != nil { + return nil, err + } + } + baseGroupSolver := &baseSingleGroupJoinOrderSolver{ + ctx: ctx, + otherConds: otherConds, + } + if len(curJoinGroup) > ctx.GetSessionVars().TiDBOptJoinReorderThreshold { + groupSolver := &joinReorderGreedySolver{ + baseSingleGroupJoinOrderSolver: baseGroupSolver, + eqEdges: eqEdges, + } + p, err = groupSolver.solve(curJoinGroup) + } else { + dpSolver := &joinReorderDPSolver{ + baseSingleGroupJoinOrderSolver: baseGroupSolver, + } + dpSolver.newJoin = dpSolver.newJoinWithEdges + p, err = dpSolver.solve(curJoinGroup, expression.ScalarFuncs2Exprs(eqEdges)) + } + if err != nil { + return nil, err + } + return p, nil + } + newChildren := make([]LogicalPlan, 0, len(p.Children())) + for _, child := range p.Children() { + newChild, err := s.optimizeRecursive(ctx, child) + if err != nil { + return nil, err + } + newChildren = append(newChildren, newChild) + } + p.SetChildren(newChildren...) + return p, nil +} + +type baseSingleGroupJoinOrderSolver struct { + ctx sessionctx.Context + curJoinGroup []*jrNode + otherConds []expression.Expression +} + +// baseNodeCumCost calculate the cumulative cost of the node in the join group. +func (s *baseSingleGroupJoinOrderSolver) baseNodeCumCost(groupNode LogicalPlan) float64 { + cost := groupNode.statsInfo().RowCount + for _, child := range groupNode.Children() { + cost += s.baseNodeCumCost(child) + } + return cost +} + +// makeBushyJoin build bushy tree for the nodes which have no equal condition to connect them. +func (s *baseSingleGroupJoinOrderSolver) makeBushyJoin(cartesianJoinGroup []LogicalPlan) LogicalPlan { + resultJoinGroup := make([]LogicalPlan, 0, (len(cartesianJoinGroup)+1)/2) + for len(cartesianJoinGroup) > 1 { + resultJoinGroup = resultJoinGroup[:0] + for i := 0; i < len(cartesianJoinGroup); i += 2 { + if i+1 == len(cartesianJoinGroup) { + resultJoinGroup = append(resultJoinGroup, cartesianJoinGroup[i]) + break + } + newJoin := s.newCartesianJoin(cartesianJoinGroup[i], cartesianJoinGroup[i+1]) + for i := len(s.otherConds) - 1; i >= 0; i-- { + cols := expression.ExtractColumns(s.otherConds[i]) + if newJoin.schema.ColumnsIndices(cols) != nil { + newJoin.OtherConditions = append(newJoin.OtherConditions, s.otherConds[i]) + s.otherConds = append(s.otherConds[:i], s.otherConds[i+1:]...) + } + } + resultJoinGroup = append(resultJoinGroup, newJoin) + } + cartesianJoinGroup, resultJoinGroup = resultJoinGroup, cartesianJoinGroup + } + return cartesianJoinGroup[0] +} + +func (s *baseSingleGroupJoinOrderSolver) newCartesianJoin(lChild, rChild LogicalPlan) *LogicalJoin { + join := LogicalJoin{ + JoinType: InnerJoin, + reordered: true, + }.Init(s.ctx) + join.SetSchema(expression.MergeSchema(lChild.Schema(), rChild.Schema())) + join.SetChildren(lChild, rChild) + return join +} + +func (s *baseSingleGroupJoinOrderSolver) newJoinWithEdges(lChild, rChild LogicalPlan, eqEdges []*expression.ScalarFunction, otherConds []expression.Expression) LogicalPlan { + newJoin := s.newCartesianJoin(lChild, rChild) + newJoin.EqualConditions = eqEdges + newJoin.OtherConditions = otherConds + for _, eqCond := range newJoin.EqualConditions { + newJoin.LeftJoinKeys = append(newJoin.LeftJoinKeys, eqCond.GetArgs()[0].(*expression.Column)) + newJoin.RightJoinKeys = append(newJoin.RightJoinKeys, eqCond.GetArgs()[1].(*expression.Column)) + } + return newJoin +} + +// calcJoinCumCost calculates the cumulative cost of the join node. +func (s *baseSingleGroupJoinOrderSolver) calcJoinCumCost(join LogicalPlan, lNode, rNode *jrNode) float64 { + return join.statsInfo().RowCount + lNode.cumCost + rNode.cumCost +} + +func (*joinReOrderSolver) name() string { + return "join_reorder" +} diff --git a/planner/core/rule_join_reorder_dp.go b/planner/core/rule_join_reorder_dp.go new file mode 100644 index 0000000..364865a --- /dev/null +++ b/planner/core/rule_join_reorder_dp.go @@ -0,0 +1,98 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "github.com/pingcap/errors" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/parser/ast" +) + +type joinReorderDPSolver struct { + *baseSingleGroupJoinOrderSolver + newJoin func(lChild, rChild LogicalPlan, eqConds []*expression.ScalarFunction, otherConds []expression.Expression) LogicalPlan +} + +type joinGroupEqEdge struct { + nodeIDs []int + edge *expression.ScalarFunction +} + +type joinGroupNonEqEdge struct { + nodeIDs []int + nodeIDMask uint + expr expression.Expression +} + +func (s *joinReorderDPSolver) solve(joinGroup []LogicalPlan, eqConds []expression.Expression) (LogicalPlan, error) { + // TODO: You need to implement the join reorder algo based on DP. + + // The pseudo code can be found in README. + // And there's some common struct and method like `baseNodeCumCost`, `calcJoinCumCost` you can use in `rule_join_reorder.go`. + // Also, you can take a look at `rule_join_reorder_greedy.go`, this file implement the join reorder algo based on greedy algorithm. + // You'll see some common usages in the greedy version. + + // Note that the join tree may be disconnected. i.e. You need to consider the case `select * from t, t1, t2`. + return nil, errors.Errorf("unimplemented") +} + +func (s *joinReorderDPSolver) newJoinWithEdge(leftPlan, rightPlan LogicalPlan, edges []joinGroupEqEdge, otherConds []expression.Expression) (LogicalPlan, error) { + var eqConds []*expression.ScalarFunction + for _, edge := range edges { + lCol := edge.edge.GetArgs()[0].(*expression.Column) + rCol := edge.edge.GetArgs()[1].(*expression.Column) + if leftPlan.Schema().Contains(lCol) { + eqConds = append(eqConds, edge.edge) + } else { + newSf := expression.NewFunctionInternal(s.ctx, ast.EQ, edge.edge.GetType(), rCol, lCol).(*expression.ScalarFunction) + eqConds = append(eqConds, newSf) + } + } + join := s.newJoin(leftPlan, rightPlan, eqConds, otherConds) + _, err := join.recursiveDeriveStats() + return join, err +} + +// Make cartesian join as bushy tree. +func (s *joinReorderDPSolver) makeBushyJoin(cartesianJoinGroup []LogicalPlan, otherConds []expression.Expression) LogicalPlan { + for len(cartesianJoinGroup) > 1 { + resultJoinGroup := make([]LogicalPlan, 0, len(cartesianJoinGroup)) + for i := 0; i < len(cartesianJoinGroup); i += 2 { + if i+1 == len(cartesianJoinGroup) { + resultJoinGroup = append(resultJoinGroup, cartesianJoinGroup[i]) + break + } + // TODO:Since the other condition may involve more than two tables, e.g. t1.a = t2.b+t3.c. + // So We'll need a extra stage to deal with it. + // Currently, we just add it when building cartesianJoinGroup. + mergedSchema := expression.MergeSchema(cartesianJoinGroup[i].Schema(), cartesianJoinGroup[i+1].Schema()) + var usedOtherConds []expression.Expression + otherConds, usedOtherConds = expression.FilterOutInPlace(otherConds, func(expr expression.Expression) bool { + return expression.ExprFromSchema(expr, mergedSchema) + }) + resultJoinGroup = append(resultJoinGroup, s.newJoin(cartesianJoinGroup[i], cartesianJoinGroup[i+1], nil, usedOtherConds)) + } + cartesianJoinGroup = resultJoinGroup + } + return cartesianJoinGroup[0] +} + +func findNodeIndexInGroup(group []LogicalPlan, col *expression.Column) (int, error) { + for i, plan := range group { + if plan.Schema().Contains(col) { + return i, nil + } + } + return -1, ErrUnknownColumn.GenWithStackByArgs(col, "JOIN REORDER RULE") +} diff --git a/planner/core/rule_join_reorder_dp_test.go b/planner/core/rule_join_reorder_dp_test.go new file mode 100644 index 0000000..253d071 --- /dev/null +++ b/planner/core/rule_join_reorder_dp_test.go @@ -0,0 +1,217 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "fmt" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/planner/property" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" +) + +var _ = Suite(&testJoinReorderDPSuite{}) + +type testJoinReorderDPSuite struct { + ctx sessionctx.Context + statsMap map[int]*property.StatsInfo +} + +func (s *testJoinReorderDPSuite) SetUpTest(c *C) { + s.ctx = MockContext() + s.ctx.GetSessionVars().PlanID = -1 +} + +type mockLogicalJoin struct { + logicalSchemaProducer + involvedNodeSet int + statsMap map[int]*property.StatsInfo +} + +func (mj mockLogicalJoin) init(ctx sessionctx.Context) *mockLogicalJoin { + mj.baseLogicalPlan = newBaseLogicalPlan(ctx, "MockLogicalJoin", &mj) + return &mj +} + +func (mj *mockLogicalJoin) recursiveDeriveStats() (*property.StatsInfo, error) { + if mj.stats == nil { + mj.stats = mj.statsMap[mj.involvedNodeSet] + } + return mj.statsMap[mj.involvedNodeSet], nil +} + +func (s *testJoinReorderDPSuite) newMockJoin(lChild, rChild LogicalPlan, eqConds []*expression.ScalarFunction, _ []expression.Expression) LogicalPlan { + retJoin := mockLogicalJoin{}.init(s.ctx) + retJoin.schema = expression.MergeSchema(lChild.Schema(), rChild.Schema()) + retJoin.statsMap = s.statsMap + if mj, ok := lChild.(*mockLogicalJoin); ok { + retJoin.involvedNodeSet = mj.involvedNodeSet + } else { + retJoin.involvedNodeSet = 1 << uint(lChild.ID()) + } + if mj, ok := rChild.(*mockLogicalJoin); ok { + retJoin.involvedNodeSet |= mj.involvedNodeSet + } else { + retJoin.involvedNodeSet |= 1 << uint(rChild.ID()) + } + retJoin.SetChildren(lChild, rChild) + return retJoin +} + +func (s *testJoinReorderDPSuite) mockStatsInfo(state int, count float64) { + s.statsMap[state] = &property.StatsInfo{ + RowCount: count, + } +} + +func (s *testJoinReorderDPSuite) makeStatsMapForTPCHQ5() { + // Labeled as lineitem -> 0, orders -> 1, customer -> 2, supplier 3, nation 4, region 5 + // This graph can be shown as following: + // +---------------+ +---------------+ + // | | | | + // | lineitem +------------+ orders | + // | | | | + // +-------+-------+ +-------+-------+ + // | | + // | | + // | | + // +-------+-------+ +-------+-------+ + // | | | | + // | supplier +------------+ customer | + // | | | | + // +-------+-------+ +-------+-------+ + // | | + // | | + // | | + // | | + // | +---------------+ | + // | | | | + // +------+ nation +-----+ + // | | + // +---------------+ + // | + // +---------------+ + // | | + // | region | + // | | + // +---------------+ + s.statsMap = make(map[int]*property.StatsInfo) + s.mockStatsInfo(3, 9103367) + s.mockStatsInfo(6, 2275919) + s.mockStatsInfo(7, 9103367) + s.mockStatsInfo(9, 59986052) + s.mockStatsInfo(11, 9103367) + s.mockStatsInfo(12, 5999974575) + s.mockStatsInfo(13, 59999974575) + s.mockStatsInfo(14, 9103543072) + s.mockStatsInfo(15, 99103543072) + s.mockStatsInfo(20, 1500000) + s.mockStatsInfo(22, 2275919) + s.mockStatsInfo(23, 7982159) + s.mockStatsInfo(24, 100000) + s.mockStatsInfo(25, 59986052) + s.mockStatsInfo(27, 9103367) + s.mockStatsInfo(28, 5999974575) + s.mockStatsInfo(29, 59999974575) + s.mockStatsInfo(30, 59999974575) + s.mockStatsInfo(31, 59999974575) + s.mockStatsInfo(48, 5) + s.mockStatsInfo(52, 299838) + s.mockStatsInfo(54, 454183) + s.mockStatsInfo(55, 1815222) + s.mockStatsInfo(56, 20042) + s.mockStatsInfo(57, 12022687) + s.mockStatsInfo(59, 1823514) + s.mockStatsInfo(60, 1201884359) + s.mockStatsInfo(61, 12001884359) + s.mockStatsInfo(62, 12001884359) + s.mockStatsInfo(63, 72985) + +} + +func (s *testJoinReorderDPSuite) newDataSource(name string, count int) LogicalPlan { + ds := DataSource{}.Init(s.ctx) + tan := model.NewCIStr(name) + ds.TableAsName = &tan + ds.schema = expression.NewSchema() + s.ctx.GetSessionVars().PlanColumnID++ + ds.schema.Append(&expression.Column{ + UniqueID: s.ctx.GetSessionVars().PlanColumnID, + RetType: types.NewFieldType(mysql.TypeLonglong), + }) + ds.stats = &property.StatsInfo{ + RowCount: float64(count), + } + return ds +} + +func (s *testJoinReorderDPSuite) planToString(plan LogicalPlan) string { + switch x := plan.(type) { + case *mockLogicalJoin: + return fmt.Sprintf("MockJoin{%v, %v}", s.planToString(x.children[0]), s.planToString(x.children[1])) + case *DataSource: + return x.TableAsName.L + } + return "" +} + +func (s *testJoinReorderDPSuite) TestDPReorderTPCHQ5(c *C) { + s.makeStatsMapForTPCHQ5() + joinGroups := make([]LogicalPlan, 0, 6) + joinGroups = append(joinGroups, s.newDataSource("lineitem", 59986052)) + joinGroups = append(joinGroups, s.newDataSource("orders", 15000000)) + joinGroups = append(joinGroups, s.newDataSource("customer", 1500000)) + joinGroups = append(joinGroups, s.newDataSource("supplier", 100000)) + joinGroups = append(joinGroups, s.newDataSource("nation", 25)) + joinGroups = append(joinGroups, s.newDataSource("region", 5)) + var eqConds []expression.Expression + eqConds = append(eqConds, expression.NewFunctionInternal(s.ctx, ast.EQ, types.NewFieldType(mysql.TypeTiny), joinGroups[0].Schema().Columns[0], joinGroups[1].Schema().Columns[0])) + eqConds = append(eqConds, expression.NewFunctionInternal(s.ctx, ast.EQ, types.NewFieldType(mysql.TypeTiny), joinGroups[1].Schema().Columns[0], joinGroups[2].Schema().Columns[0])) + eqConds = append(eqConds, expression.NewFunctionInternal(s.ctx, ast.EQ, types.NewFieldType(mysql.TypeTiny), joinGroups[2].Schema().Columns[0], joinGroups[3].Schema().Columns[0])) + eqConds = append(eqConds, expression.NewFunctionInternal(s.ctx, ast.EQ, types.NewFieldType(mysql.TypeTiny), joinGroups[0].Schema().Columns[0], joinGroups[3].Schema().Columns[0])) + eqConds = append(eqConds, expression.NewFunctionInternal(s.ctx, ast.EQ, types.NewFieldType(mysql.TypeTiny), joinGroups[2].Schema().Columns[0], joinGroups[4].Schema().Columns[0])) + eqConds = append(eqConds, expression.NewFunctionInternal(s.ctx, ast.EQ, types.NewFieldType(mysql.TypeTiny), joinGroups[3].Schema().Columns[0], joinGroups[4].Schema().Columns[0])) + eqConds = append(eqConds, expression.NewFunctionInternal(s.ctx, ast.EQ, types.NewFieldType(mysql.TypeTiny), joinGroups[4].Schema().Columns[0], joinGroups[5].Schema().Columns[0])) + solver := &joinReorderDPSolver{ + baseSingleGroupJoinOrderSolver: &baseSingleGroupJoinOrderSolver{ + ctx: s.ctx, + }, + newJoin: s.newMockJoin, + } + result, err := solver.solve(joinGroups, eqConds) + c.Assert(err, IsNil) + c.Assert(s.planToString(result), Equals, "MockJoin{supplier, MockJoin{lineitem, MockJoin{orders, MockJoin{customer, MockJoin{nation, region}}}}}") +} + +func (s *testJoinReorderDPSuite) TestDPReorderAllCartesian(c *C) { + joinGroup := make([]LogicalPlan, 0, 4) + joinGroup = append(joinGroup, s.newDataSource("a", 100)) + joinGroup = append(joinGroup, s.newDataSource("b", 100)) + joinGroup = append(joinGroup, s.newDataSource("c", 100)) + joinGroup = append(joinGroup, s.newDataSource("d", 100)) + solver := &joinReorderDPSolver{ + baseSingleGroupJoinOrderSolver: &baseSingleGroupJoinOrderSolver{ + ctx: s.ctx, + }, + newJoin: s.newMockJoin, + } + result, err := solver.solve(joinGroup, nil) + c.Assert(err, IsNil) + c.Assert(s.planToString(result), Equals, "MockJoin{MockJoin{a, b}, MockJoin{c, d}}") +} diff --git a/planner/core/rule_join_reorder_greedy.go b/planner/core/rule_join_reorder_greedy.go new file mode 100644 index 0000000..bc439ee --- /dev/null +++ b/planner/core/rule_join_reorder_greedy.go @@ -0,0 +1,132 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "math" + "sort" + + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/parser/ast" +) + +type joinReorderGreedySolver struct { + *baseSingleGroupJoinOrderSolver + eqEdges []*expression.ScalarFunction +} + +// solve reorders the join nodes in the group based on a greedy algorithm. +// +// For each node having a join equal condition with the current join tree in +// the group, calculate the cumulative join cost of that node and the join +// tree, choose the node with the smallest cumulative cost to join with the +// current join tree. +// +// cumulative join cost = CumCount(lhs) + CumCount(rhs) + RowCount(join) +// For base node, its CumCount equals to the sum of the count of its subtree. +// See baseNodeCumCost for more details. +// TODO: this formula can be changed to real physical cost in future. +// +// For the nodes and join trees which don't have a join equal condition to +// connect them, we make a bushy join tree to do the cartesian joins finally. +func (s *joinReorderGreedySolver) solve(joinNodePlans []LogicalPlan) (LogicalPlan, error) { + for _, node := range joinNodePlans { + _, err := node.recursiveDeriveStats() + if err != nil { + return nil, err + } + s.curJoinGroup = append(s.curJoinGroup, &jrNode{ + p: node, + cumCost: s.baseNodeCumCost(node), + }) + } + sort.SliceStable(s.curJoinGroup, func(i, j int) bool { + return s.curJoinGroup[i].cumCost < s.curJoinGroup[j].cumCost + }) + + var cartesianGroup []LogicalPlan + for len(s.curJoinGroup) > 0 { + newNode, err := s.constructConnectedJoinTree() + if err != nil { + return nil, err + } + cartesianGroup = append(cartesianGroup, newNode.p) + } + + return s.makeBushyJoin(cartesianGroup), nil +} + +func (s *joinReorderGreedySolver) constructConnectedJoinTree() (*jrNode, error) { + curJoinTree := s.curJoinGroup[0] + s.curJoinGroup = s.curJoinGroup[1:] + for { + bestCost := math.MaxFloat64 + bestIdx := -1 + var finalRemainOthers []expression.Expression + var bestJoin LogicalPlan + for i, node := range s.curJoinGroup { + newJoin, remainOthers := s.checkConnectionAndMakeJoin(curJoinTree.p, node.p) + if newJoin == nil { + continue + } + _, err := newJoin.recursiveDeriveStats() + if err != nil { + return nil, err + } + curCost := s.calcJoinCumCost(newJoin, curJoinTree, node) + if bestCost > curCost { + bestCost = curCost + bestJoin = newJoin + bestIdx = i + finalRemainOthers = remainOthers + } + } + // If we could find more join node, meaning that the sub connected graph have been totally explored. + if bestJoin == nil { + break + } + curJoinTree = &jrNode{ + p: bestJoin, + cumCost: bestCost, + } + s.curJoinGroup = append(s.curJoinGroup[:bestIdx], s.curJoinGroup[bestIdx+1:]...) + s.otherConds = finalRemainOthers + } + return curJoinTree, nil +} + +func (s *joinReorderGreedySolver) checkConnectionAndMakeJoin(leftNode, rightNode LogicalPlan) (LogicalPlan, []expression.Expression) { + var usedEdges []*expression.ScalarFunction + remainOtherConds := make([]expression.Expression, len(s.otherConds)) + copy(remainOtherConds, s.otherConds) + for _, edge := range s.eqEdges { + lCol := edge.GetArgs()[0].(*expression.Column) + rCol := edge.GetArgs()[1].(*expression.Column) + if leftNode.Schema().Contains(lCol) && rightNode.Schema().Contains(rCol) { + usedEdges = append(usedEdges, edge) + } else if rightNode.Schema().Contains(lCol) && leftNode.Schema().Contains(rCol) { + newSf := expression.NewFunctionInternal(s.ctx, ast.EQ, edge.GetType(), rCol, lCol).(*expression.ScalarFunction) + usedEdges = append(usedEdges, newSf) + } + } + if len(usedEdges) == 0 { + return nil, nil + } + var otherConds []expression.Expression + mergedSchema := expression.MergeSchema(leftNode.Schema(), rightNode.Schema()) + remainOtherConds, otherConds = expression.FilterOutInPlace(remainOtherConds, func(expr expression.Expression) bool { + return expression.ExprFromSchema(expr, mergedSchema) + }) + return s.newJoinWithEdges(leftNode, rightNode, usedEdges, otherConds), remainOtherConds +} diff --git a/planner/core/rule_max_min_eliminate.go b/planner/core/rule_max_min_eliminate.go new file mode 100644 index 0000000..7384b0e --- /dev/null +++ b/planner/core/rule_max_min_eliminate.go @@ -0,0 +1,232 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// // Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "context" + + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/expression/aggregation" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/planner/util" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/ranger" +) + +// maxMinEliminator tries to eliminate max/min aggregate function. +// For SQL like `select max(id) from t;`, we could optimize it to `select max(id) from (select id from t order by id desc limit 1 where id is not null) t;`. +// For SQL like `select min(id) from t;`, we could optimize it to `select max(id) from (select id from t order by id limit 1 where id is not null) t;`. +// For SQL like `select max(id), min(id) from t;`, we could optimize it to the cartesianJoin result of the two queries above if `id` has an index. +type maxMinEliminator struct { +} + +func (a *maxMinEliminator) optimize(ctx context.Context, p LogicalPlan) (LogicalPlan, error) { + return a.eliminateMaxMin(p), nil +} + +// composeAggsByInnerJoin composes the scalar aggregations by cartesianJoin. +func (a *maxMinEliminator) composeAggsByInnerJoin(aggs []*LogicalAggregation) (plan LogicalPlan) { + plan = aggs[0] + sctx := plan.SCtx() + for i := 1; i < len(aggs); i++ { + join := LogicalJoin{JoinType: InnerJoin}.Init(sctx) + join.SetChildren(plan, aggs[i]) + join.schema = buildLogicalJoinSchema(InnerJoin, join) + join.cartesianJoin = true + plan = join + } + return +} + +// checkColCanUseIndex checks whether there is an AccessPath satisfy the conditions: +// 1. all of the selection's condition can be pushed down as AccessConds of the path. +// 2. the path can keep order for `col` after pushing down the conditions. +func (a *maxMinEliminator) checkColCanUseIndex(plan LogicalPlan, col *expression.Column, conditions []expression.Expression) bool { + switch p := plan.(type) { + case *LogicalSelection: + conditions = append(conditions, p.Conditions...) + return a.checkColCanUseIndex(p.children[0], col, conditions) + case *DataSource: + // Check whether there is an AccessPath can use index for col. + for _, path := range p.possibleAccessPaths { + if path.IsTablePath { + // Since table path can contain accessConds of at most one column, + // we only need to check if all of the conditions can be pushed down as accessConds + // and `col` is the handle column. + if p.handleCol != nil && col.Equal(nil, p.handleCol) { + if _, filterConds := ranger.DetachCondsForColumn(p.ctx, conditions, col); len(filterConds) != 0 { + return false + } + return true + } + } else { + // For index paths, we have to check: + // 1. whether all of the conditions can be pushed down as accessConds. + // 2. whether the AccessPath can satisfy the order property of `col` with these accessConds. + result, err := ranger.DetachCondAndBuildRangeForIndex(p.ctx, conditions, path.FullIdxCols, path.FullIdxColLens) + if err != nil || len(result.RemainedConds) != 0 { + continue + } + for i := 0; i <= result.EqCondCount; i++ { + if i < len(path.FullIdxCols) && col.Equal(nil, path.FullIdxCols[i]) { + return true + } + } + } + } + return false + default: + return false + } +} + +// cloneSubPlans shallow clones the subPlan. We only consider `Selection` and `DataSource` here, +// because we have restricted the subPlan in `checkColCanUseIndex`. +func (a *maxMinEliminator) cloneSubPlans(plan LogicalPlan) LogicalPlan { + switch p := plan.(type) { + case *LogicalSelection: + newConditions := make([]expression.Expression, len(p.Conditions)) + copy(newConditions, p.Conditions) + sel := LogicalSelection{Conditions: newConditions}.Init(p.ctx) + sel.SetChildren(a.cloneSubPlans(p.children[0])) + return sel + case *DataSource: + // Quick clone a DataSource. + // ReadOnly fields uses a shallow copy, while the fields which will be overwritten must use a deep copy. + newDs := *p + newDs.baseLogicalPlan = newBaseLogicalPlan(p.ctx, p.tp, &newDs) + newDs.schema = p.schema.Clone() + newDs.Columns = make([]*model.ColumnInfo, len(p.Columns)) + copy(newDs.Columns, p.Columns) + newAccessPaths := make([]*util.AccessPath, 0, len(p.possibleAccessPaths)) + for _, path := range p.possibleAccessPaths { + newPath := *path + newAccessPaths = append(newAccessPaths, &newPath) + } + newDs.possibleAccessPaths = newAccessPaths + return &newDs + } + // This won't happen, because we have checked the subtree. + return nil +} + +// splitAggFuncAndCheckIndices splits the agg to multiple aggs and check whether each agg needs a sort +// after the transformation. For example, we firstly split the sql: `select max(a), min(a), max(b) from t` -> +// `select max(a) from t` + `select min(a) from t` + `select max(b) from t`. +// Then we check whether `a` and `b` have indices. If any of the used column has no index, we cannot eliminate +// this aggregation. +func (a *maxMinEliminator) splitAggFuncAndCheckIndices(agg *LogicalAggregation) (aggs []*LogicalAggregation, canEliminate bool) { + for _, f := range agg.AggFuncs { + // We must make sure the args of max/min is a simple single column. + col, ok := f.Args[0].(*expression.Column) + if !ok { + return nil, false + } + if !a.checkColCanUseIndex(agg.children[0], col, make([]expression.Expression, 0)) { + return nil, false + } + } + aggs = make([]*LogicalAggregation, 0, len(agg.AggFuncs)) + // we can split the aggregation only if all of the aggFuncs pass the check. + for i, f := range agg.AggFuncs { + newAgg := LogicalAggregation{AggFuncs: []*aggregation.AggFuncDesc{f}}.Init(agg.ctx) + newAgg.SetChildren(a.cloneSubPlans(agg.children[0])) + newAgg.schema = expression.NewSchema(agg.schema.Columns[i]) + if err := newAgg.PruneColumns([]*expression.Column{newAgg.schema.Columns[0]}); err != nil { + return nil, false + } + aggs = append(aggs, newAgg) + } + return aggs, true +} + +// eliminateSingleMaxMin tries to convert a single max/min to Limit+Sort operators. +func (a *maxMinEliminator) eliminateSingleMaxMin(agg *LogicalAggregation) *LogicalAggregation { + f := agg.AggFuncs[0] + child := agg.Children()[0] + ctx := agg.SCtx() + + // If there's no column in f.GetArgs()[0], we still need limit and read data from real table because the result should be NULL if the input is empty. + if len(expression.ExtractColumns(f.Args[0])) > 0 { + // If it can be NULL, we need to filter NULL out first. + if !mysql.HasNotNullFlag(f.Args[0].GetType().Flag) { + sel := LogicalSelection{}.Init(ctx) + isNullFunc := expression.NewFunctionInternal(ctx, ast.IsNull, types.NewFieldType(mysql.TypeTiny), f.Args[0]) + notNullFunc := expression.NewFunctionInternal(ctx, ast.UnaryNot, types.NewFieldType(mysql.TypeTiny), isNullFunc) + sel.Conditions = []expression.Expression{notNullFunc} + sel.SetChildren(agg.Children()[0]) + child = sel + } + + // Add Sort and Limit operators. + // For max function, the sort order should be desc. + desc := f.Name == ast.AggFuncMax + // Compose Sort operator. + sort := LogicalSort{}.Init(ctx) + sort.ByItems = append(sort.ByItems, &ByItems{f.Args[0], desc}) + sort.SetChildren(child) + child = sort + } + + // Compose Limit operator. + li := LogicalLimit{Count: 1}.Init(ctx) + li.SetChildren(child) + + // If no data in the child, we need to return NULL instead of empty. This cannot be done by sort and limit themselves. + // Since now there would be at most one row returned, the remained agg operator is not expensive anymore. + agg.SetChildren(li) + return agg +} + +// eliminateMaxMin tries to convert max/min to Limit+Sort operators. +func (a *maxMinEliminator) eliminateMaxMin(p LogicalPlan) LogicalPlan { + if agg, ok := p.(*LogicalAggregation); ok { + if len(agg.GroupByItems) != 0 { + return agg + } + // Make sure that all of the aggFuncs are Max or Min. + for _, aggFunc := range agg.AggFuncs { + if aggFunc.Name != ast.AggFuncMax && aggFunc.Name != ast.AggFuncMin { + return agg + } + } + if len(agg.AggFuncs) == 1 { + // If there is only one aggFunc, we don't need to guarantee that the child of it is a data + // source, or whether the sort can be eliminated. This transformation won't be worse than previous. + return a.eliminateSingleMaxMin(agg) + } + // If we have more than one aggFunc, we can eliminate this agg only if all of the aggFuncs can benefit from + // their column's index. + aggs, canEliminate := a.splitAggFuncAndCheckIndices(agg) + if !canEliminate { + return agg + } + for i := range aggs { + aggs[i] = a.eliminateSingleMaxMin(aggs[i]) + } + return a.composeAggsByInnerJoin(aggs) + } + + newChildren := make([]LogicalPlan, 0, len(p.Children())) + for _, child := range p.Children() { + newChildren = append(newChildren, a.eliminateMaxMin(child)) + } + p.SetChildren(newChildren...) + return p +} + +func (*maxMinEliminator) name() string { + return "max_min_eliminate" +} diff --git a/planner/core/rule_predicate_push_down.go b/planner/core/rule_predicate_push_down.go new file mode 100644 index 0000000..7ddad56 --- /dev/null +++ b/planner/core/rule_predicate_push_down.go @@ -0,0 +1,472 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// // Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "context" + + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" +) + +type ppdSolver struct{} + +func (s *ppdSolver) optimize(ctx context.Context, lp LogicalPlan) (LogicalPlan, error) { + _, p := lp.PredicatePushDown(nil) + return p, nil +} + +func addSelection(p LogicalPlan, child LogicalPlan, conditions []expression.Expression, chIdx int) { + if len(conditions) == 0 { + p.Children()[chIdx] = child + return + } + conditions = expression.PropagateConstant(p.SCtx(), conditions) + // Return table dual when filter is constant false or null. + dual := Conds2TableDual(child, conditions) + if dual != nil { + p.Children()[chIdx] = dual + return + } + selection := LogicalSelection{Conditions: conditions}.Init(p.SCtx()) + selection.SetChildren(child) + p.Children()[chIdx] = selection +} + +// PredicatePushDown implements LogicalPlan interface. +func (p *baseLogicalPlan) PredicatePushDown(predicates []expression.Expression) ([]expression.Expression, LogicalPlan) { + if len(p.children) == 0 { + return predicates, p.self + } + child := p.children[0] + rest, newChild := child.PredicatePushDown(predicates) + addSelection(p.self, newChild, rest, 0) + return nil, p.self +} + +func splitSetGetVarFunc(filters []expression.Expression) ([]expression.Expression, []expression.Expression) { + canBePushDown := make([]expression.Expression, 0, len(filters)) + canNotBePushDown := make([]expression.Expression, 0, len(filters)) + for _, expr := range filters { + if expression.HasGetSetVarFunc(expr) { + canNotBePushDown = append(canNotBePushDown, expr) + } else { + canBePushDown = append(canBePushDown, expr) + } + } + return canBePushDown, canNotBePushDown +} + +// PredicatePushDown implements LogicalPlan PredicatePushDown interface. +func (p *LogicalSelection) PredicatePushDown(predicates []expression.Expression) ([]expression.Expression, LogicalPlan) { + canBePushDown, canNotBePushDown := splitSetGetVarFunc(p.Conditions) + retConditions, child := p.children[0].PredicatePushDown(append(canBePushDown, predicates...)) + retConditions = append(retConditions, canNotBePushDown...) + if len(retConditions) > 0 { + p.Conditions = expression.PropagateConstant(p.ctx, retConditions) + // Return table dual when filter is constant false or null. + dual := Conds2TableDual(p, p.Conditions) + if dual != nil { + return nil, dual + } + return nil, p + } + return nil, child +} + +// PredicatePushDown implements LogicalPlan PredicatePushDown interface. +func (p *LogicalUnionScan) PredicatePushDown(predicates []expression.Expression) ([]expression.Expression, LogicalPlan) { + retainedPredicates, _ := p.children[0].PredicatePushDown(predicates) + p.conditions = make([]expression.Expression, 0, len(predicates)) + p.conditions = append(p.conditions, predicates...) + // The conditions in UnionScan is only used for added rows, so parent Selection should not be removed. + return retainedPredicates, p +} + +// PredicatePushDown implements LogicalPlan PredicatePushDown interface. +func (ds *DataSource) PredicatePushDown(predicates []expression.Expression) ([]expression.Expression, LogicalPlan) { + ds.allConds = predicates + _, ds.pushedDownConds, predicates = expression.ExpressionsToPB(ds.ctx.GetSessionVars().StmtCtx, predicates, ds.ctx.GetClient()) + return predicates, ds +} + +// PredicatePushDown implements LogicalPlan PredicatePushDown interface. +func (p *LogicalTableDual) PredicatePushDown(predicates []expression.Expression) ([]expression.Expression, LogicalPlan) { + return predicates, p +} + +// PredicatePushDown implements LogicalPlan PredicatePushDown interface. +func (p *LogicalJoin) PredicatePushDown(predicates []expression.Expression) (ret []expression.Expression, retPlan LogicalPlan) { + simplifyOuterJoin(p, predicates) + var equalCond []*expression.ScalarFunction + var leftPushCond, rightPushCond, otherCond, leftCond, rightCond []expression.Expression + switch p.JoinType { + case LeftOuterJoin: + predicates = p.outerJoinPropConst(predicates) + dual := Conds2TableDual(p, predicates) + if dual != nil { + return ret, dual + } + // Handle where conditions + predicates = expression.ExtractFiltersFromDNFs(p.ctx, predicates) + // Only derive left where condition, because right where condition cannot be pushed down + equalCond, leftPushCond, rightPushCond, otherCond = p.extractOnCondition(predicates, true, false) + leftCond = leftPushCond + // Handle join conditions, only derive right join condition, because left join condition cannot be pushed down + _, derivedRightJoinCond := deriveOtherConditions(p, false, true) + rightCond = append(p.RightConditions, derivedRightJoinCond...) + p.RightConditions = nil + ret = append(expression.ScalarFuncs2Exprs(equalCond), otherCond...) + ret = append(ret, rightPushCond...) + case RightOuterJoin: + predicates = p.outerJoinPropConst(predicates) + dual := Conds2TableDual(p, predicates) + if dual != nil { + return ret, dual + } + // Handle where conditions + predicates = expression.ExtractFiltersFromDNFs(p.ctx, predicates) + // Only derive right where condition, because left where condition cannot be pushed down + equalCond, leftPushCond, rightPushCond, otherCond = p.extractOnCondition(predicates, false, true) + rightCond = rightPushCond + // Handle join conditions, only derive left join condition, because right join condition cannot be pushed down + derivedLeftJoinCond, _ := deriveOtherConditions(p, true, false) + leftCond = append(p.LeftConditions, derivedLeftJoinCond...) + p.LeftConditions = nil + ret = append(expression.ScalarFuncs2Exprs(equalCond), otherCond...) + ret = append(ret, leftPushCond...) + case InnerJoin: + tempCond := make([]expression.Expression, 0, len(p.LeftConditions)+len(p.RightConditions)+len(p.EqualConditions)+len(p.OtherConditions)+len(predicates)) + tempCond = append(tempCond, p.LeftConditions...) + tempCond = append(tempCond, p.RightConditions...) + tempCond = append(tempCond, expression.ScalarFuncs2Exprs(p.EqualConditions)...) + tempCond = append(tempCond, p.OtherConditions...) + tempCond = append(tempCond, predicates...) + tempCond = expression.ExtractFiltersFromDNFs(p.ctx, tempCond) + tempCond = expression.PropagateConstant(p.ctx, tempCond) + // Return table dual when filter is constant false or null. + dual := Conds2TableDual(p, tempCond) + if dual != nil { + return ret, dual + } + equalCond, leftPushCond, rightPushCond, otherCond = p.extractOnCondition(tempCond, true, true) + p.LeftConditions = nil + p.RightConditions = nil + p.EqualConditions = equalCond + p.OtherConditions = otherCond + leftCond = leftPushCond + rightCond = rightPushCond + } + leftCond = expression.RemoveDupExprs(p.ctx, leftCond) + rightCond = expression.RemoveDupExprs(p.ctx, rightCond) + leftRet, lCh := p.children[0].PredicatePushDown(leftCond) + rightRet, rCh := p.children[1].PredicatePushDown(rightCond) + addSelection(p, lCh, leftRet, 0) + addSelection(p, rCh, rightRet, 1) + p.updateEQCond() + for _, eqCond := range p.EqualConditions { + p.LeftJoinKeys = append(p.LeftJoinKeys, eqCond.GetArgs()[0].(*expression.Column)) + p.RightJoinKeys = append(p.RightJoinKeys, eqCond.GetArgs()[1].(*expression.Column)) + } + p.mergeSchema() + buildKeyInfo(p) + return ret, p.self +} + +// updateEQCond will extract the arguments of a equal condition that connect two expressions. +func (p *LogicalJoin) updateEQCond() { + lChild, rChild := p.children[0], p.children[1] + var lKeys, rKeys []expression.Expression + for i := len(p.OtherConditions) - 1; i >= 0; i-- { + need2Remove := false + if eqCond, ok := p.OtherConditions[i].(*expression.ScalarFunction); ok && eqCond.FuncName.L == ast.EQ { + lExpr, rExpr := eqCond.GetArgs()[0], eqCond.GetArgs()[1] + if expression.ExprFromSchema(lExpr, lChild.Schema()) && expression.ExprFromSchema(rExpr, rChild.Schema()) { + lKeys = append(lKeys, lExpr) + rKeys = append(rKeys, rExpr) + need2Remove = true + } else if expression.ExprFromSchema(lExpr, rChild.Schema()) && expression.ExprFromSchema(rExpr, lChild.Schema()) { + lKeys = append(lKeys, rExpr) + rKeys = append(rKeys, lExpr) + need2Remove = true + } + } + if need2Remove { + p.OtherConditions = append(p.OtherConditions[:i], p.OtherConditions[i+1:]...) + } + } + if len(lKeys) > 0 { + needLProj, needRProj := false, false + for i := range lKeys { + _, lOk := lKeys[i].(*expression.Column) + _, rOk := rKeys[i].(*expression.Column) + needLProj = needLProj || !lOk + needRProj = needRProj || !rOk + } + + var lProj, rProj *LogicalProjection + if needLProj { + lProj = p.getProj(0) + } + if needRProj { + rProj = p.getProj(1) + } + for i := range lKeys { + lKey, rKey := lKeys[i], rKeys[i] + if lProj != nil { + lKey = lProj.appendExpr(lKey) + } + if rProj != nil { + rKey = rProj.appendExpr(rKey) + } + eqCond := expression.NewFunctionInternal(p.ctx, ast.EQ, types.NewFieldType(mysql.TypeTiny), lKey, rKey) + p.EqualConditions = append(p.EqualConditions, eqCond.(*expression.ScalarFunction)) + } + } +} + +func (p *LogicalProjection) appendExpr(expr expression.Expression) *expression.Column { + if col, ok := expr.(*expression.Column); ok { + return col + } + expr = expression.ColumnSubstitute(expr, p.schema, p.Exprs) + p.Exprs = append(p.Exprs, expr) + + col := &expression.Column{ + UniqueID: p.ctx.GetSessionVars().AllocPlanColumnID(), + RetType: expr.GetType(), + } + p.schema.Append(col) + return col +} + +func (p *LogicalJoin) getProj(idx int) *LogicalProjection { + child := p.children[idx] + proj, ok := child.(*LogicalProjection) + if ok { + return proj + } + proj = LogicalProjection{Exprs: make([]expression.Expression, 0, child.Schema().Len())}.Init(p.ctx) + for _, col := range child.Schema().Columns { + proj.Exprs = append(proj.Exprs, col) + } + proj.SetSchema(child.Schema().Clone()) + proj.SetChildren(child) + p.children[idx] = proj + return proj +} + +// simplifyOuterJoin transforms "LeftOuterJoin/RightOuterJoin" to "InnerJoin" if possible. +func simplifyOuterJoin(p *LogicalJoin, predicates []expression.Expression) { + if p.JoinType != LeftOuterJoin && p.JoinType != RightOuterJoin && p.JoinType != InnerJoin { + return + } + + innerTable := p.children[0] + outerTable := p.children[1] + if p.JoinType == LeftOuterJoin { + innerTable, outerTable = outerTable, innerTable + } + + // first simplify embedded outer join. + if innerPlan, ok := innerTable.(*LogicalJoin); ok { + simplifyOuterJoin(innerPlan, predicates) + } + if outerPlan, ok := outerTable.(*LogicalJoin); ok { + simplifyOuterJoin(outerPlan, predicates) + } + + if p.JoinType == InnerJoin { + return + } + // then simplify embedding outer join. + canBeSimplified := false + for _, expr := range predicates { + isOk := isNullRejected(p.ctx, innerTable.Schema(), expr) + if isOk { + canBeSimplified = true + break + } + } + if canBeSimplified { + p.JoinType = InnerJoin + } +} + +// isNullRejected check whether a condition is null-rejected +// A condition would be null-rejected in one of following cases: +// If it is a predicate containing a reference to an inner table that evaluates to UNKNOWN or FALSE when one of its arguments is NULL. +// If it is a conjunction containing a null-rejected condition as a conjunct. +// If it is a disjunction of null-rejected conditions. +func isNullRejected(ctx sessionctx.Context, schema *expression.Schema, expr expression.Expression) bool { + expr = expression.PushDownNot(nil, expr) + sc := ctx.GetSessionVars().StmtCtx + sc.InNullRejectCheck = true + result := expression.EvaluateExprWithNull(ctx, schema, expr) + sc.InNullRejectCheck = false + x, ok := result.(*expression.Constant) + if !ok { + return false + } + if x.Value.IsNull() { + return true + } else if isTrue, err := x.Value.ToBool(sc); err == nil && isTrue == 0 { + return true + } + return false +} + +// PredicatePushDown implements LogicalPlan PredicatePushDown interface. +func (p *LogicalProjection) PredicatePushDown(predicates []expression.Expression) (ret []expression.Expression, retPlan LogicalPlan) { + canBePushed := make([]expression.Expression, 0, len(predicates)) + canNotBePushed := make([]expression.Expression, 0, len(predicates)) + for _, expr := range p.Exprs { + if expression.HasAssignSetVarFunc(expr) { + _, child := p.baseLogicalPlan.PredicatePushDown(nil) + return predicates, child + } + } + for _, cond := range predicates { + newFilter := expression.ColumnSubstitute(cond, p.Schema(), p.Exprs) + if !expression.HasGetSetVarFunc(newFilter) { + canBePushed = append(canBePushed, expression.ColumnSubstitute(cond, p.Schema(), p.Exprs)) + } else { + canNotBePushed = append(canNotBePushed, cond) + } + } + remained, child := p.baseLogicalPlan.PredicatePushDown(canBePushed) + return append(remained, canNotBePushed...), child +} + +// PredicatePushDown implements LogicalPlan PredicatePushDown interface. +func (la *LogicalAggregation) PredicatePushDown(predicates []expression.Expression) (ret []expression.Expression, retPlan LogicalPlan) { + // TODO: Here you need to push the predicates across the aggregation. + // A simple example is that `select * from (select count(*) from t group by b) tmp_t where b > 1` is the same with + // `select * from (select count(*) from t where b > 1 group by b) tmp_t. + return predicates, la +} + +// PredicatePushDown implements LogicalPlan PredicatePushDown interface. +func (p *LogicalLimit) PredicatePushDown(predicates []expression.Expression) ([]expression.Expression, LogicalPlan) { + // Limit forbids any condition to push down. + p.baseLogicalPlan.PredicatePushDown(nil) + return predicates, p +} + +// deriveOtherConditions given a LogicalJoin, check the OtherConditions to see if we can derive more +// conditions for left/right child pushdown. +func deriveOtherConditions(p *LogicalJoin, deriveLeft bool, deriveRight bool) (leftCond []expression.Expression, + rightCond []expression.Expression) { + leftPlan, rightPlan := p.children[0], p.children[1] + for _, expr := range p.OtherConditions { + if deriveLeft { + leftRelaxedCond := expression.DeriveRelaxedFiltersFromDNF(expr, leftPlan.Schema()) + if leftRelaxedCond != nil { + leftCond = append(leftCond, leftRelaxedCond) + } + notNullExpr := deriveNotNullExpr(expr, leftPlan.Schema()) + if notNullExpr != nil { + leftCond = append(leftCond, notNullExpr) + } + } + if deriveRight { + rightRelaxedCond := expression.DeriveRelaxedFiltersFromDNF(expr, rightPlan.Schema()) + if rightRelaxedCond != nil { + rightCond = append(rightCond, rightRelaxedCond) + } + notNullExpr := deriveNotNullExpr(expr, rightPlan.Schema()) + if notNullExpr != nil { + rightCond = append(rightCond, notNullExpr) + } + } + } + return +} + +// deriveNotNullExpr generates a new expression `not(isnull(col))` given `col1 op col2`, +// in which `col` is in specified schema. Caller guarantees that only one of `col1` or +// `col2` is in schema. +func deriveNotNullExpr(expr expression.Expression, schema *expression.Schema) expression.Expression { + binop, ok := expr.(*expression.ScalarFunction) + if !ok || len(binop.GetArgs()) != 2 { + return nil + } + ctx := binop.GetCtx() + arg0, lOK := binop.GetArgs()[0].(*expression.Column) + arg1, rOK := binop.GetArgs()[1].(*expression.Column) + if !lOK || !rOK { + return nil + } + childCol := schema.RetrieveColumn(arg0) + if childCol == nil { + childCol = schema.RetrieveColumn(arg1) + } + if isNullRejected(ctx, schema, expr) && !mysql.HasNotNullFlag(childCol.RetType.Flag) { + return expression.BuildNotNullExpr(ctx, childCol) + } + return nil +} + +// Conds2TableDual builds a LogicalTableDual if cond is constant false or null. +func Conds2TableDual(p LogicalPlan, conds []expression.Expression) LogicalPlan { + if len(conds) != 1 { + return nil + } + con, ok := conds[0].(*expression.Constant) + if !ok { + return nil + } + sc := p.SCtx().GetSessionVars().StmtCtx + if isTrue, err := con.Value.ToBool(sc); (err == nil && isTrue == 0) || con.Value.IsNull() { + dual := LogicalTableDual{}.Init(p.SCtx()) + dual.SetSchema(p.Schema()) + return dual + } + return nil +} + +// outerJoinPropConst propagates constant equal and column equal conditions over outer join. +func (p *LogicalJoin) outerJoinPropConst(predicates []expression.Expression) []expression.Expression { + outerTable := p.children[0] + innerTable := p.children[1] + if p.JoinType == RightOuterJoin { + innerTable, outerTable = outerTable, innerTable + } + lenJoinConds := len(p.EqualConditions) + len(p.LeftConditions) + len(p.RightConditions) + len(p.OtherConditions) + joinConds := make([]expression.Expression, 0, lenJoinConds) + for _, equalCond := range p.EqualConditions { + joinConds = append(joinConds, equalCond) + } + joinConds = append(joinConds, p.LeftConditions...) + joinConds = append(joinConds, p.RightConditions...) + joinConds = append(joinConds, p.OtherConditions...) + p.EqualConditions = nil + p.LeftConditions = nil + p.RightConditions = nil + p.OtherConditions = nil + joinConds, predicates = expression.PropConstOverOuterJoin(p.ctx, joinConds, predicates, outerTable.Schema(), innerTable.Schema()) + p.attachOnConds(joinConds) + return predicates +} + +// PredicatePushDown implements LogicalPlan PredicatePushDown interface. +func (p *LogicalMemTable) PredicatePushDown(predicates []expression.Expression) ([]expression.Expression, LogicalPlan) { + return predicates, p.self +} + +func (*ppdSolver) name() string { + return "predicate_push_down" +} diff --git a/planner/core/rule_topn_push_down.go b/planner/core/rule_topn_push_down.go new file mode 100644 index 0000000..3caa753 --- /dev/null +++ b/planner/core/rule_topn_push_down.go @@ -0,0 +1,161 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "context" + + "github.com/cznic/mathutil" + "github.com/pingcap/tidb/expression" +) + +// pushDownTopNOptimizer pushes down the topN or limit. In the future we will remove the limit from `requiredProperty` in CBO phase. +type pushDownTopNOptimizer struct { +} + +func (s *pushDownTopNOptimizer) optimize(ctx context.Context, p LogicalPlan) (LogicalPlan, error) { + return p.pushDownTopN(nil), nil +} + +func (s *baseLogicalPlan) pushDownTopN(topN *LogicalTopN) LogicalPlan { + p := s.self + for i, child := range p.Children() { + p.Children()[i] = child.pushDownTopN(nil) + } + if topN != nil { + return topN.setChild(p) + } + return p +} + +// setChild set p as topn's child. +func (lt *LogicalTopN) setChild(p LogicalPlan) LogicalPlan { + // Remove this TopN if its child is a TableDual. + dual, isDual := p.(*LogicalTableDual) + if isDual { + numDualRows := uint64(dual.RowCount) + if numDualRows < lt.Offset { + dual.RowCount = 0 + return dual + } + dual.RowCount = int(mathutil.MinUint64(numDualRows-lt.Offset, lt.Count)) + return dual + } + + if lt.isLimit() { + limit := LogicalLimit{ + Count: lt.Count, + Offset: lt.Offset, + }.Init(lt.ctx) + limit.SetChildren(p) + return limit + } + // Then lt must be topN. + lt.SetChildren(p) + return lt +} + +func (ls *LogicalSort) pushDownTopN(topN *LogicalTopN) LogicalPlan { + if topN == nil { + return ls.baseLogicalPlan.pushDownTopN(nil) + } else if topN.isLimit() { + topN.ByItems = ls.ByItems + return ls.children[0].pushDownTopN(topN) + } + // If a TopN is pushed down, this sort is useless. + return ls.children[0].pushDownTopN(topN) +} + +func (p *LogicalLimit) convertToTopN() *LogicalTopN { + return LogicalTopN{Offset: p.Offset, Count: p.Count}.Init(p.ctx) +} + +func (p *LogicalLimit) pushDownTopN(topN *LogicalTopN) LogicalPlan { + child := p.children[0].pushDownTopN(p.convertToTopN()) + if topN != nil { + return topN.setChild(child) + } + return child +} + +func (p *LogicalProjection) pushDownTopN(topN *LogicalTopN) LogicalPlan { + for _, expr := range p.Exprs { + if expression.HasAssignSetVarFunc(expr) { + return p.baseLogicalPlan.pushDownTopN(topN) + } + } + if topN != nil { + for _, by := range topN.ByItems { + by.Expr = expression.ColumnSubstitute(by.Expr, p.schema, p.Exprs) + } + + // remove meaningless constant sort items. + for i := len(topN.ByItems) - 1; i >= 0; i-- { + switch topN.ByItems[i].Expr.(type) { + case *expression.Constant: + topN.ByItems = append(topN.ByItems[:i], topN.ByItems[i+1:]...) + } + } + } + p.children[0] = p.children[0].pushDownTopN(topN) + return p +} + +// pushDownTopNToChild will push a topN to one child of join. The idx stands for join child index. 0 is for left child. +func (p *LogicalJoin) pushDownTopNToChild(topN *LogicalTopN, idx int) LogicalPlan { + if topN == nil { + return p.children[idx].pushDownTopN(nil) + } + + for _, by := range topN.ByItems { + cols := expression.ExtractColumns(by.Expr) + for _, col := range cols { + if p.children[1-idx].Schema().Contains(col) { + return p.children[idx].pushDownTopN(nil) + } + } + } + + newTopN := LogicalTopN{ + Count: topN.Count + topN.Offset, + ByItems: make([]*ByItems, len(topN.ByItems)), + }.Init(topN.ctx) + for i := range topN.ByItems { + newTopN.ByItems[i] = topN.ByItems[i].Clone() + } + return p.children[idx].pushDownTopN(newTopN) +} + +func (p *LogicalJoin) pushDownTopN(topN *LogicalTopN) LogicalPlan { + switch p.JoinType { + case LeftOuterJoin: + p.children[0] = p.pushDownTopNToChild(topN, 0) + p.children[1] = p.children[1].pushDownTopN(nil) + case RightOuterJoin: + p.children[1] = p.pushDownTopNToChild(topN, 1) + p.children[0] = p.children[0].pushDownTopN(nil) + default: + return p.baseLogicalPlan.pushDownTopN(topN) + } + + // The LogicalJoin may be also a LogicalApply. So we must use self to set parents. + if topN != nil { + return topN.setChild(p.self) + } + return p.self +} + +func (*pushDownTopNOptimizer) name() string { + return "topn_push_down" +} diff --git a/planner/core/stats.go b/planner/core/stats.go new file mode 100644 index 0000000..a8b3a59 --- /dev/null +++ b/planner/core/stats.go @@ -0,0 +1,397 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "math" + + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/planner/property" + "github.com/pingcap/tidb/planner/util" + "github.com/pingcap/tidb/statistics" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/ranger" + "go.uber.org/zap" +) + +func (p *basePhysicalPlan) StatsCount() float64 { + return p.stats.RowCount +} + +// DeriveStats implement LogicalPlan DeriveStats interface. +func (p *LogicalTableDual) DeriveStats(childStats []*property.StatsInfo, selfSchema *expression.Schema, childSchema []*expression.Schema) (*property.StatsInfo, error) { + profile := &property.StatsInfo{ + RowCount: float64(p.RowCount), + Cardinality: make([]float64, selfSchema.Len()), + } + for i := range profile.Cardinality { + profile.Cardinality[i] = float64(p.RowCount) + } + p.stats = profile + return p.stats, nil +} + +// DeriveStats implement LogicalPlan DeriveStats interface. +func (p *LogicalMemTable) DeriveStats(childStats []*property.StatsInfo, selfSchema *expression.Schema, childSchema []*expression.Schema) (*property.StatsInfo, error) { + statsTable := statistics.PseudoTable(p.tableInfo) + stats := &property.StatsInfo{ + RowCount: float64(statsTable.Count), + Cardinality: make([]float64, len(p.tableInfo.Columns)), + HistColl: statsTable.GenerateHistCollFromColumnInfo(p.tableInfo.Columns, p.schema.Columns), + StatsVersion: statistics.PseudoVersion, + } + for i := range p.tableInfo.Columns { + stats.Cardinality[i] = float64(statsTable.Count) + } + p.stats = stats + return p.stats, nil +} + +// DeriveStats implement LogicalPlan DeriveStats interface. +func (p *LogicalShow) DeriveStats(childStats []*property.StatsInfo, selfSchema *expression.Schema, childSchema []*expression.Schema) (*property.StatsInfo, error) { + // A fake count, just to avoid panic now. + p.stats = getFakeStats(selfSchema.Len()) + return p.stats, nil +} + +func getFakeStats(length int) *property.StatsInfo { + profile := &property.StatsInfo{ + RowCount: 1, + Cardinality: make([]float64, length), + } + for i := range profile.Cardinality { + profile.Cardinality[i] = 1 + } + return profile +} + +// DeriveStats implement LogicalPlan DeriveStats interface. +func (p *LogicalShowDDLJobs) DeriveStats(childStats []*property.StatsInfo, selfSchema *expression.Schema, childSchema []*expression.Schema) (*property.StatsInfo, error) { + // A fake count, just to avoid panic now. + p.stats = getFakeStats(selfSchema.Len()) + return p.stats, nil +} + +func (p *baseLogicalPlan) recursiveDeriveStats() (*property.StatsInfo, error) { + if p.stats != nil { + return p.stats, nil + } + childStats := make([]*property.StatsInfo, len(p.children)) + childSchema := make([]*expression.Schema, len(p.children)) + for i, child := range p.children { + childProfile, err := child.recursiveDeriveStats() + if err != nil { + return nil, err + } + childStats[i] = childProfile + childSchema[i] = child.Schema() + } + return p.self.DeriveStats(childStats, p.self.Schema(), childSchema) +} + +// DeriveStats implement LogicalPlan DeriveStats interface. +func (p *baseLogicalPlan) DeriveStats(childStats []*property.StatsInfo, selfSchema *expression.Schema, childSchema []*expression.Schema) (*property.StatsInfo, error) { + if len(childStats) == 1 { + p.stats = childStats[0] + return p.stats, nil + } + if len(childStats) > 1 { + err := ErrInternal.GenWithStack("LogicalPlans with more than one child should implement their own DeriveStats().") + return nil, err + } + profile := &property.StatsInfo{ + RowCount: float64(1), + Cardinality: make([]float64, selfSchema.Len()), + } + for i := range profile.Cardinality { + profile.Cardinality[i] = float64(1) + } + p.stats = profile + return profile, nil +} + +// getColumnNDV computes estimated NDV of specified column using the original +// histogram of `DataSource` which is retrieved from storage(not the derived one). +func (ds *DataSource) getColumnNDV(colID int64) (ndv float64) { + hist, ok := ds.statisticTable.Columns[colID] + if ok && hist.Count > 0 { + factor := float64(ds.statisticTable.Count) / float64(hist.Count) + ndv = float64(hist.NDV) * factor + } else { + ndv = float64(ds.statisticTable.Count) * distinctFactor + } + return ndv +} + +func (ds *DataSource) initStats() { + if ds.tableStats != nil { + return + } + tableStats := &property.StatsInfo{ + RowCount: float64(ds.statisticTable.Count), + Cardinality: make([]float64, len(ds.Columns)), + HistColl: ds.statisticTable.GenerateHistCollFromColumnInfo(ds.Columns, ds.schema.Columns), + StatsVersion: ds.statisticTable.Version, + } + if ds.statisticTable.Pseudo { + tableStats.StatsVersion = statistics.PseudoVersion + } + for i, col := range ds.Columns { + tableStats.Cardinality[i] = ds.getColumnNDV(col.ID) + } + ds.tableStats = tableStats + ds.TblColHists = ds.statisticTable.ID2UniqueID(ds.TblCols) +} + +func (ds *DataSource) deriveStatsByFilter(conds expression.CNFExprs, filledPaths []*util.AccessPath) *property.StatsInfo { + ds.initStats() + selectivity, err := ds.tableStats.HistColl.Selectivity(ds.ctx, conds, filledPaths) + if err != nil { + logutil.BgLogger().Debug("something wrong happened, use the default selectivity", zap.Error(err)) + selectivity = selectionFactor + } + stats := ds.tableStats.Scale(selectivity) + return stats +} + +// DeriveStats implement LogicalPlan DeriveStats interface. +func (ds *DataSource) DeriveStats(childStats []*property.StatsInfo, selfSchema *expression.Schema, childSchema []*expression.Schema) (*property.StatsInfo, error) { + ds.initStats() + // PushDownNot here can convert query 'not (a != 1)' to 'a = 1'. + for i, expr := range ds.pushedDownConds { + ds.pushedDownConds[i] = expression.PushDownNot(ds.ctx, expr) + } + for _, path := range ds.possibleAccessPaths { + if path.IsTablePath { + continue + } + err := ds.fillIndexPath(path, ds.pushedDownConds) + if err != nil { + return nil, err + } + } + ds.stats = ds.deriveStatsByFilter(ds.pushedDownConds, ds.possibleAccessPaths) + for _, path := range ds.possibleAccessPaths { + if path.IsTablePath { + noIntervalRanges, err := ds.deriveTablePathStats(path, ds.pushedDownConds) + if err != nil { + return nil, err + } + // If we have point or empty range, just remove other possible paths. + if noIntervalRanges || len(path.Ranges) == 0 { + ds.possibleAccessPaths[0] = path + ds.possibleAccessPaths = ds.possibleAccessPaths[:1] + break + } + continue + } + noIntervalRanges := ds.deriveIndexPathStats(path) + // If we have empty range, or point range on unique index, just remove other possible paths. + if (noIntervalRanges && path.Index.Unique) || len(path.Ranges) == 0 { + ds.possibleAccessPaths[0] = path + ds.possibleAccessPaths = ds.possibleAccessPaths[:1] + break + } + } + return ds.stats, nil +} + +// DeriveStats implements LogicalPlan DeriveStats interface. +func (ts *LogicalTableScan) DeriveStats(childStats []*property.StatsInfo, selfSchema *expression.Schema, childSchema []*expression.Schema) (_ *property.StatsInfo, err error) { + // PushDownNot here can convert query 'not (a != 1)' to 'a = 1'. + for i, expr := range ts.AccessConds { + // TODO The expressions may be shared by TableScan and several IndexScans, there would be redundant + // `PushDownNot` function call in multiple `DeriveStats` then. + ts.AccessConds[i] = expression.PushDownNot(ts.ctx, expr) + } + ts.stats = ts.Source.deriveStatsByFilter(ts.AccessConds, nil) + sc := ts.SCtx().GetSessionVars().StmtCtx + // ts.Handle could be nil if PK is Handle, and PK column has been pruned. + if ts.Handle != nil { + ts.Ranges, err = ranger.BuildTableRange(ts.AccessConds, sc, ts.Handle.RetType) + } else { + isUnsigned := false + if ts.Source.tableInfo.PKIsHandle { + if pkColInfo := ts.Source.tableInfo.GetPkColInfo(); pkColInfo != nil { + isUnsigned = mysql.HasUnsignedFlag(pkColInfo.Flag) + } + } + ts.Ranges = ranger.FullIntRange(isUnsigned) + } + if err != nil { + return nil, err + } + return ts.stats, nil +} + +// DeriveStats implements LogicalPlan DeriveStats interface. +func (is *LogicalIndexScan) DeriveStats(childStats []*property.StatsInfo, selfSchema *expression.Schema, childSchema []*expression.Schema) (*property.StatsInfo, error) { + for i, expr := range is.AccessConds { + is.AccessConds[i] = expression.PushDownNot(is.ctx, expr) + } + is.stats = is.Source.deriveStatsByFilter(is.AccessConds, nil) + if len(is.AccessConds) == 0 { + is.Ranges = ranger.FullRange() + } + is.IdxCols, is.IdxColLens = expression.IndexInfo2PrefixCols(is.Columns, selfSchema.Columns, is.Index) + is.FullIdxCols, is.FullIdxColLens = expression.IndexInfo2Cols(is.Columns, selfSchema.Columns, is.Index) + if !is.Index.Unique && !is.Index.Primary && len(is.Index.Columns) == len(is.IdxCols) { + handleCol := is.getPKIsHandleCol(selfSchema) + if handleCol != nil && !mysql.HasUnsignedFlag(handleCol.RetType.Flag) { + is.IdxCols = append(is.IdxCols, handleCol) + is.IdxColLens = append(is.IdxColLens, types.UnspecifiedLength) + } + } + return is.stats, nil +} + +// DeriveStats implement LogicalPlan DeriveStats interface. +func (p *LogicalSelection) DeriveStats(childStats []*property.StatsInfo, selfSchema *expression.Schema, childSchema []*expression.Schema) (*property.StatsInfo, error) { + p.stats = childStats[0].Scale(selectionFactor) + return p.stats, nil +} + +func deriveLimitStats(childProfile *property.StatsInfo, limitCount float64) *property.StatsInfo { + stats := &property.StatsInfo{ + RowCount: math.Min(limitCount, childProfile.RowCount), + Cardinality: make([]float64, len(childProfile.Cardinality)), + } + for i := range stats.Cardinality { + stats.Cardinality[i] = math.Min(childProfile.Cardinality[i], stats.RowCount) + } + return stats +} + +// DeriveStats implement LogicalPlan DeriveStats interface. +func (p *LogicalLimit) DeriveStats(childStats []*property.StatsInfo, selfSchema *expression.Schema, childSchema []*expression.Schema) (*property.StatsInfo, error) { + p.stats = deriveLimitStats(childStats[0], float64(p.Count)) + return p.stats, nil +} + +// DeriveStats implement LogicalPlan DeriveStats interface. +func (lt *LogicalTopN) DeriveStats(childStats []*property.StatsInfo, selfSchema *expression.Schema, childSchema []*expression.Schema) (*property.StatsInfo, error) { + lt.stats = deriveLimitStats(childStats[0], float64(lt.Count)) + return lt.stats, nil +} + +// getCardinality will return the Cardinality of a couple of columns. We simply return the max one, because we cannot know +// the Cardinality for multi-dimension attributes properly. This is a simple and naive scheme of Cardinality estimation. +func getCardinality(cols []*expression.Column, schema *expression.Schema, profile *property.StatsInfo) float64 { + cardinality := 1.0 + indices := schema.ColumnsIndices(cols) + if indices == nil { + logutil.BgLogger().Error("column not found in schema", zap.Any("columns", cols), zap.String("schema", schema.String())) + return cardinality + } + for _, idx := range indices { + // It is a very elementary estimation. + cardinality = math.Max(cardinality, profile.Cardinality[idx]) + } + return cardinality +} + +// DeriveStats implement LogicalPlan DeriveStats interface. +func (p *LogicalProjection) DeriveStats(childStats []*property.StatsInfo, selfSchema *expression.Schema, childSchema []*expression.Schema) (*property.StatsInfo, error) { + childProfile := childStats[0] + p.stats = &property.StatsInfo{ + RowCount: childProfile.RowCount, + Cardinality: make([]float64, len(p.Exprs)), + } + for i, expr := range p.Exprs { + cols := expression.ExtractColumns(expr) + p.stats.Cardinality[i] = getCardinality(cols, childSchema[0], childProfile) + } + return p.stats, nil +} + +// DeriveStats implement LogicalPlan DeriveStats interface. +func (la *LogicalAggregation) DeriveStats(childStats []*property.StatsInfo, selfSchema *expression.Schema, childSchema []*expression.Schema) (*property.StatsInfo, error) { + childProfile := childStats[0] + gbyCols := make([]*expression.Column, 0, len(la.GroupByItems)) + for _, gbyExpr := range la.GroupByItems { + cols := expression.ExtractColumns(gbyExpr) + gbyCols = append(gbyCols, cols...) + } + cardinality := getCardinality(gbyCols, childSchema[0], childProfile) + la.stats = &property.StatsInfo{ + RowCount: cardinality, + Cardinality: make([]float64, selfSchema.Len()), + } + // We cannot estimate the Cardinality for every output, so we use a conservative strategy. + for i := range la.stats.Cardinality { + la.stats.Cardinality[i] = cardinality + } + la.inputCount = childProfile.RowCount + return la.stats, nil +} + +// DeriveStats implement LogicalPlan DeriveStats interface. +// If the type of join is SemiJoin, the selectivity of it will be same as selection's. +// If the type of join is LeftOuterSemiJoin, it will not add or remove any row. The last column is a boolean value, whose Cardinality should be two. +// If the type of join is inner/outer join, the output of join(s, t) should be N(s) * N(t) / (V(s.key) * V(t.key)) * Min(s.key, t.key). +// N(s) stands for the number of rows in relation s. V(s.key) means the Cardinality of join key in s. +// This is a quite simple strategy: We assume every bucket of relation which will participate join has the same number of rows, and apply cross join for +// every matched bucket. +func (p *LogicalJoin) DeriveStats(childStats []*property.StatsInfo, selfSchema *expression.Schema, childSchema []*expression.Schema) (*property.StatsInfo, error) { + leftProfile, rightProfile := childStats[0], childStats[1] + helper := &fullJoinRowCountHelper{ + cartesian: 0 == len(p.EqualConditions), + leftProfile: leftProfile, + rightProfile: rightProfile, + leftJoinKeys: p.LeftJoinKeys, + rightJoinKeys: p.RightJoinKeys, + leftSchema: childSchema[0], + rightSchema: childSchema[1], + } + p.equalCondOutCnt = helper.estimate() + count := p.equalCondOutCnt + if p.JoinType == LeftOuterJoin { + count = math.Max(count, leftProfile.RowCount) + } else if p.JoinType == RightOuterJoin { + count = math.Max(count, rightProfile.RowCount) + } + cardinality := make([]float64, 0, selfSchema.Len()) + cardinality = append(cardinality, leftProfile.Cardinality...) + cardinality = append(cardinality, rightProfile.Cardinality...) + for i := range cardinality { + cardinality[i] = math.Min(cardinality[i], count) + } + p.stats = &property.StatsInfo{ + RowCount: count, + Cardinality: cardinality, + } + return p.stats, nil +} + +type fullJoinRowCountHelper struct { + cartesian bool + leftProfile *property.StatsInfo + rightProfile *property.StatsInfo + leftJoinKeys []*expression.Column + rightJoinKeys []*expression.Column + leftSchema *expression.Schema + rightSchema *expression.Schema +} + +func (h *fullJoinRowCountHelper) estimate() float64 { + if h.cartesian { + return h.leftProfile.RowCount * h.rightProfile.RowCount + } + leftKeyCardinality := getCardinality(h.leftJoinKeys, h.leftSchema, h.leftProfile) + rightKeyCardinality := getCardinality(h.rightJoinKeys, h.rightSchema, h.rightProfile) + count := h.leftProfile.RowCount * h.rightProfile.RowCount / math.Max(leftKeyCardinality, rightKeyCardinality) + return count +} diff --git a/planner/core/stringer.go b/planner/core/stringer.go new file mode 100644 index 0000000..e6caa6f --- /dev/null +++ b/planner/core/stringer.go @@ -0,0 +1,178 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "fmt" + "strings" +) + +// ToString explains a Plan, returns description string. +func ToString(p Plan) string { + strs, _ := toString(p, []string{}, []int{}) + return strings.Join(strs, "->") +} + +func toString(in Plan, strs []string, idxs []int) ([]string, []int) { + switch x := in.(type) { + case LogicalPlan: + if len(x.Children()) > 1 { + idxs = append(idxs, len(strs)) + } + + for _, c := range x.Children() { + strs, idxs = toString(c, strs, idxs) + } + case PhysicalPlan: + if len(x.Children()) > 1 { + idxs = append(idxs, len(strs)) + } + + for _, c := range x.Children() { + strs, idxs = toString(c, strs, idxs) + } + } + + var str string + switch x := in.(type) { + case *PhysicalIndexScan: + str = fmt.Sprintf("Index(%s.%s)%v", x.Table.Name.L, x.Index.Name.L, x.Ranges) + case *PhysicalTableScan: + str = fmt.Sprintf("Table(%s)", x.Table.Name.L) + case *PhysicalHashJoin: + last := len(idxs) - 1 + idx := idxs[last] + children := strs[idx:] + strs = strs[:idx] + idxs = idxs[:last] + if x.InnerChildIdx == 0 { + str = "RightHashJoin{" + strings.Join(children, "->") + "}" + } else { + str = "LeftHashJoin{" + strings.Join(children, "->") + "}" + } + for _, eq := range x.EqualConditions { + l := eq.GetArgs()[0].String() + r := eq.GetArgs()[1].String() + str += fmt.Sprintf("(%s,%s)", l, r) + } + case *PhysicalMergeJoin: + last := len(idxs) - 1 + idx := idxs[last] + children := strs[idx:] + strs = strs[:idx] + idxs = idxs[:last] + id := "MergeJoin" + switch x.JoinType { + case LeftOuterJoin: + id = "MergeLeftOuterJoin" + case RightOuterJoin: + id = "MergeRightOuterJoin" + case InnerJoin: + id = "MergeInnerJoin" + } + str = id + "{" + strings.Join(children, "->") + "}" + for i := range x.LeftJoinKeys { + l := x.LeftJoinKeys[i].String() + r := x.RightJoinKeys[i].String() + str += fmt.Sprintf("(%s,%s)", l, r) + } + case *LogicalLimit, *PhysicalLimit: + str = "Limit" + case *ShowDDL: + str = "ShowDDL" + case *LogicalShow, *PhysicalShow: + str = "Show" + case *LogicalShowDDLJobs, *PhysicalShowDDLJobs: + str = "ShowDDLJobs" + case *LogicalSort, *PhysicalSort: + str = "Sort" + case *LogicalJoin: + last := len(idxs) - 1 + idx := idxs[last] + children := strs[idx:] + strs = strs[:idx] + str = "Join{" + strings.Join(children, "->") + "}" + idxs = idxs[:last] + for _, eq := range x.EqualConditions { + l := eq.GetArgs()[0].String() + r := eq.GetArgs()[1].String() + str += fmt.Sprintf("(%s,%s)", l, r) + } + case *DataSource: + if x.TableAsName != nil && x.TableAsName.L != "" { + str = fmt.Sprintf("DataScan(%s)", x.TableAsName) + } else { + str = fmt.Sprintf("DataScan(%s)", x.tableInfo.Name) + } + case *LogicalSelection: + str = fmt.Sprintf("Sel(%s)", x.Conditions) + case *PhysicalSelection: + str = fmt.Sprintf("Sel(%s)", x.Conditions) + case *LogicalProjection, *PhysicalProjection: + str = "Projection" + case *LogicalTopN: + str = fmt.Sprintf("TopN(%v,%d,%d)", x.ByItems, x.Offset, x.Count) + case *PhysicalTopN: + str = fmt.Sprintf("TopN(%v,%d,%d)", x.ByItems, x.Offset, x.Count) + case *LogicalTableDual, *PhysicalTableDual: + str = "Dual" + case *PhysicalHashAgg: + str = "HashAgg" + case *LogicalAggregation: + str = "Aggr(" + for i, aggFunc := range x.AggFuncs { + str += aggFunc.String() + if i != len(x.AggFuncs)-1 { + str += "," + } + } + str += ")" + case *PhysicalTableReader: + str = fmt.Sprintf("TableReader(%s)", ToString(x.tablePlan)) + case *PhysicalIndexReader: + str = fmt.Sprintf("IndexReader(%s)", ToString(x.indexPlan)) + case *PhysicalIndexLookUpReader: + str = fmt.Sprintf("IndexLookUp(%s, %s)", ToString(x.indexPlan), ToString(x.tablePlan)) + case *PhysicalUnionScan: + str = fmt.Sprintf("UnionScan(%s)", x.Conditions) + case *Analyze: + str = "Analyze{" + var children []string + for _, idx := range x.IdxTasks { + children = append(children, fmt.Sprintf("Index(%s)", idx.IndexInfo.Name.O)) + } + for _, col := range x.ColTasks { + var colNames []string + if col.PKInfo != nil { + colNames = append(colNames, col.PKInfo.Name.O) + } + for _, c := range col.ColsInfo { + colNames = append(colNames, c.Name.O) + } + children = append(children, fmt.Sprintf("Table(%s)", strings.Join(colNames, ", "))) + } + str = str + strings.Join(children, ",") + "}" + case *Delete: + str = fmt.Sprintf("%s->Delete", ToString(x.SelectPlan)) + case *Insert: + str = "Insert" + if x.SelectPlan != nil { + str = fmt.Sprintf("%s->Insert", ToString(x.SelectPlan)) + } + default: + str = fmt.Sprintf("%T", in) + } + strs = append(strs, str) + return strs, idxs +} diff --git a/planner/core/task.go b/planner/core/task.go new file mode 100644 index 0000000..1d3f73e --- /dev/null +++ b/planner/core/task.go @@ -0,0 +1,760 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "math" + + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/expression/aggregation" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/charset" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/statistics" + "github.com/pingcap/tidb/types" +) + +// task is a new version of `PhysicalPlanInfo`. It stores cost information for a task. +// A task may be CopTask, RootTask, MPPTask or a ParallelTask. +type task interface { + count() float64 + addCost(cost float64) + cost() float64 + copy() task + plan() PhysicalPlan + invalid() bool +} + +// copTask is a task that runs in a distributed kv store. +// TODO: In future, we should split copTask to indexTask and tableTask. +type copTask struct { + indexPlan PhysicalPlan + tablePlan PhysicalPlan + cst float64 + // indexPlanFinished means we have finished index plan. + indexPlanFinished bool + // keepOrder indicates if the plan scans data by order. + keepOrder bool + // In double read case, it may output one more column for handle(row id). + // We need to prune it, so we add a project do this. + doubleReadNeedProj bool + + extraHandleCol *expression.Column + // tblColHists stores the original stats of DataSource, it is used to get + // average row width when computing network cost. + tblColHists *statistics.HistColl + // tblCols stores the original columns of DataSource before being pruned, it + // is used to compute average row width when computing scan cost. + tblCols []*expression.Column + // rootTaskConds stores select conditions containing virtual columns. + // These conditions can't push to TiKV, so we have to add a selection for rootTask + rootTaskConds []expression.Expression +} + +func (t *copTask) invalid() bool { + return t.tablePlan == nil && t.indexPlan == nil +} + +func (t *rootTask) invalid() bool { + return t.p == nil +} + +func (t *copTask) count() float64 { + if t.indexPlanFinished { + return t.tablePlan.statsInfo().RowCount + } + return t.indexPlan.statsInfo().RowCount +} + +func (t *copTask) addCost(cst float64) { + t.cst += cst +} + +func (t *copTask) cost() float64 { + return t.cst +} + +func (t *copTask) copy() task { + nt := *t + return &nt +} + +func (t *copTask) plan() PhysicalPlan { + if t.indexPlanFinished { + return t.tablePlan + } + return t.indexPlan +} + +func attachPlan2Task(p PhysicalPlan, t task) task { + switch v := t.(type) { + case *copTask: + if v.indexPlanFinished { + p.SetChildren(v.tablePlan) + v.tablePlan = p + } else { + p.SetChildren(v.indexPlan) + v.indexPlan = p + } + case *rootTask: + p.SetChildren(v.p) + v.p = p + } + return t +} + +// finishIndexPlan means we no longer add plan to index plan, and compute the network cost for it. +func (t *copTask) finishIndexPlan() { + if t.indexPlanFinished { + return + } + cnt := t.count() + t.indexPlanFinished = true + sessVars := t.indexPlan.SCtx().GetSessionVars() + // Network cost of transferring rows of index scan to TiDB. + t.cst += cnt * sessVars.NetworkFactor * t.tblColHists.GetAvgRowSize(t.indexPlan.Schema().Columns, true) + if t.tablePlan == nil { + return + } + // Calculate the IO cost of table scan here because we cannot know its stats until we finish index plan. + t.tablePlan.(*PhysicalTableScan).stats = t.indexPlan.statsInfo() + var p PhysicalPlan + for p = t.indexPlan; len(p.Children()) > 0; p = p.Children()[0] { + } + rowSize := t.tblColHists.GetIndexAvgRowSize(t.tblCols, p.(*PhysicalIndexScan).Index.Unique) + t.cst += cnt * rowSize * sessVars.ScanFactor +} + +func (p *basePhysicalPlan) attach2Task(tasks ...task) task { + t := finishCopTask(p.ctx, tasks[0].copy()) + return attachPlan2Task(p.self, t) +} + +// GetCost computes cost of hash join operator itself. +func (p *PhysicalHashJoin) GetCost(lCnt, rCnt float64) float64 { + buildCnt, probeCnt := lCnt, rCnt + // Taking the right as the inner for right join or using the outer to build a hash table. + if p.InnerChildIdx == 1 { + buildCnt, probeCnt = rCnt, lCnt + } + sessVars := p.ctx.GetSessionVars() + // Cost of building hash table. + cpuCost := buildCnt * sessVars.CPUFactor + memoryCost := buildCnt * sessVars.MemoryFactor + // Number of matched row pairs regarding the equal join conditions. + helper := &fullJoinRowCountHelper{ + cartesian: false, + leftProfile: p.children[0].statsInfo(), + rightProfile: p.children[1].statsInfo(), + leftJoinKeys: p.LeftJoinKeys, + rightJoinKeys: p.RightJoinKeys, + leftSchema: p.children[0].Schema(), + rightSchema: p.children[1].Schema(), + } + numPairs := helper.estimate() + // Cost of querying hash table is cheap actually, so we just compute the cost of + // evaluating `OtherConditions` and joining row pairs. + probeCost := numPairs * sessVars.CPUFactor + // Cost of evaluating outer filter. + if len(p.LeftConditions)+len(p.RightConditions) > 0 { + // Input outer count for the above compution should be adjusted by selectionFactor. + probeCost *= selectionFactor + probeCost += probeCnt * sessVars.CPUFactor + } + probeCost /= float64(p.Concurrency) + // Cost of additional concurrent goroutines. + cpuCost += probeCost + float64(p.Concurrency+1)*sessVars.ConcurrencyFactor + + return cpuCost + memoryCost +} + +func (p *PhysicalHashJoin) attach2Task(tasks ...task) task { + lTask := finishCopTask(p.ctx, tasks[0].copy()) + rTask := finishCopTask(p.ctx, tasks[1].copy()) + p.SetChildren(lTask.plan(), rTask.plan()) + p.schema = BuildPhysicalJoinSchema(p.JoinType, p) + return &rootTask{ + p: p, + cst: lTask.cost() + rTask.cost() + p.GetCost(lTask.count(), rTask.count()), + } +} + +// GetCost computes cost of merge join operator itself. +func (p *PhysicalMergeJoin) GetCost(lCnt, rCnt float64) float64 { + outerCnt := lCnt + innerKeys := p.RightJoinKeys + innerSchema := p.children[1].Schema() + innerStats := p.children[1].statsInfo() + if p.JoinType == RightOuterJoin { + outerCnt = rCnt + innerKeys = p.LeftJoinKeys + innerSchema = p.children[0].Schema() + innerStats = p.children[0].statsInfo() + } + helper := &fullJoinRowCountHelper{ + cartesian: false, + leftProfile: p.children[0].statsInfo(), + rightProfile: p.children[1].statsInfo(), + leftJoinKeys: p.LeftJoinKeys, + rightJoinKeys: p.RightJoinKeys, + leftSchema: p.children[0].Schema(), + rightSchema: p.children[1].Schema(), + } + numPairs := helper.estimate() + sessVars := p.ctx.GetSessionVars() + probeCost := numPairs * sessVars.CPUFactor + // Cost of evaluating outer filters. + var cpuCost float64 + if len(p.LeftConditions)+len(p.RightConditions) > 0 { + probeCost *= selectionFactor + cpuCost += outerCnt * sessVars.CPUFactor + } + cpuCost += probeCost + // For merge join, only one group of rows with same join key(not null) are cached, + // we compute averge memory cost using estimated group size. + NDV := getCardinality(innerKeys, innerSchema, innerStats) + memoryCost := (innerStats.RowCount / NDV) * sessVars.MemoryFactor + return cpuCost + memoryCost +} + +func (p *PhysicalMergeJoin) attach2Task(tasks ...task) task { + lTask := finishCopTask(p.ctx, tasks[0].copy()) + rTask := finishCopTask(p.ctx, tasks[1].copy()) + p.SetChildren(lTask.plan(), rTask.plan()) + p.schema = BuildPhysicalJoinSchema(p.JoinType, p) + return &rootTask{ + p: p, + cst: lTask.cost() + rTask.cost() + p.GetCost(lTask.count(), rTask.count()), + } +} + +// splitCopAvg2CountAndSum splits the cop avg function to count and sum. +// Now it's only used for TableReader. +func splitCopAvg2CountAndSum(p PhysicalPlan) { + var baseAgg *basePhysicalAgg + if agg, ok := p.(*PhysicalHashAgg); ok { + baseAgg = &agg.basePhysicalAgg + } + if baseAgg == nil { + return + } + + schemaCursor := len(baseAgg.Schema().Columns) - len(baseAgg.GroupByItems) + for i := len(baseAgg.AggFuncs) - 1; i >= 0; i-- { + f := baseAgg.AggFuncs[i] + schemaCursor-- + if f.Name == ast.AggFuncAvg { + schemaCursor-- + sumAgg := *f + sumAgg.Name = ast.AggFuncSum + sumAgg.RetTp = baseAgg.Schema().Columns[schemaCursor+1].RetType + cntAgg := *f + cntAgg.Name = ast.AggFuncCount + cntAgg.RetTp = baseAgg.Schema().Columns[schemaCursor].RetType + cntAgg.RetTp.Flag = f.RetTp.Flag + baseAgg.AggFuncs = append(baseAgg.AggFuncs[:i], append([]*aggregation.AggFuncDesc{&cntAgg, &sumAgg}, baseAgg.AggFuncs[i+1:]...)...) + } + } +} + +// finishCopTask means we close the coprocessor task and create a root task. +func finishCopTask(ctx sessionctx.Context, task task) task { + t, ok := task.(*copTask) + if !ok { + return task + } + sessVars := ctx.GetSessionVars() + // copTasks are run in parallel, to make the estimated cost closer to execution time, we amortize + // the cost to cop iterator workers. According to `CopClient::Send`, the concurrency + // is Min(DistSQLScanConcurrency, numRegionsInvolvedInScan), since we cannot infer + // the number of regions involved, we simply use DistSQLScanConcurrency. + copIterWorkers := float64(t.plan().SCtx().GetSessionVars().DistSQLScanConcurrency) + t.finishIndexPlan() + // Network cost of transferring rows of table scan to TiDB. + if t.tablePlan != nil { + t.cst += t.count() * sessVars.NetworkFactor * t.tblColHists.GetAvgRowSize(t.tablePlan.Schema().Columns, false) + } + t.cst /= copIterWorkers + newTask := &rootTask{ + cst: t.cst, + } + if t.indexPlan != nil && t.tablePlan != nil { + p := PhysicalIndexLookUpReader{ + tablePlan: t.tablePlan, + indexPlan: t.indexPlan, + ExtraHandleCol: t.extraHandleCol, + }.Init(ctx) + p.stats = t.tablePlan.statsInfo() + // Add cost of building table reader executors. Handles are extracted in batch style, + // each handle is a range, the CPU cost of building copTasks should be: + // (indexRows / batchSize) * batchSize * CPUFactor + // Since we don't know the number of copTasks built, ignore these network cost now. + indexRows := t.indexPlan.statsInfo().RowCount + newTask.cst += indexRows * sessVars.CPUFactor + // Add cost of worker goroutines in index lookup. + numTblWorkers := float64(sessVars.IndexLookupConcurrency) + newTask.cst += (numTblWorkers + 1) * sessVars.ConcurrencyFactor + // When building table reader executor for each batch, we would sort the handles. CPU + // cost of sort is: + // CPUFactor * batchSize * Log2(batchSize) * (indexRows / batchSize) + indexLookupSize := float64(sessVars.IndexLookupSize) + batchSize := math.Min(indexLookupSize, indexRows) + if batchSize > 2 { + sortCPUCost := (indexRows * math.Log2(batchSize) * sessVars.CPUFactor) / numTblWorkers + newTask.cst += sortCPUCost + } + // Also, we need to sort the retrieved rows if index lookup reader is expected to return + // ordered results. Note that row count of these two sorts can be different, if there are + // operators above table scan. + tableRows := t.tablePlan.statsInfo().RowCount + selectivity := tableRows / indexRows + batchSize = math.Min(indexLookupSize*selectivity, tableRows) + if t.keepOrder && batchSize > 2 { + sortCPUCost := (tableRows * math.Log2(batchSize) * sessVars.CPUFactor) / numTblWorkers + newTask.cst += sortCPUCost + } + if t.doubleReadNeedProj { + schema := p.IndexPlans[0].(*PhysicalIndexScan).dataSourceSchema + proj := PhysicalProjection{Exprs: expression.Column2Exprs(schema.Columns)}.Init(ctx, p.stats, nil) + proj.SetSchema(schema) + proj.SetChildren(p) + newTask.p = proj + } else { + newTask.p = p + } + } else if t.indexPlan != nil { + p := PhysicalIndexReader{indexPlan: t.indexPlan}.Init(ctx) + p.stats = t.indexPlan.statsInfo() + newTask.p = p + } else { + tp := t.tablePlan + splitCopAvg2CountAndSum(tp) + for len(tp.Children()) > 0 { + tp = tp.Children()[0] + } + p := PhysicalTableReader{ + tablePlan: t.tablePlan, + }.Init(ctx) + p.stats = t.tablePlan.statsInfo() + newTask.p = p + } + + if len(t.rootTaskConds) > 0 { + sel := PhysicalSelection{Conditions: t.rootTaskConds}.Init(ctx, newTask.p.statsInfo()) + sel.SetChildren(newTask.p) + newTask.p = sel + } + + return newTask +} + +// rootTask is the final sink node of a plan graph. It should be a single goroutine on tidb. +type rootTask struct { + p PhysicalPlan + cst float64 +} + +func (t *rootTask) copy() task { + return &rootTask{ + p: t.p, + cst: t.cst, + } +} + +func (t *rootTask) count() float64 { + return t.p.statsInfo().RowCount +} + +func (t *rootTask) addCost(cst float64) { + t.cst += cst +} + +func (t *rootTask) cost() float64 { + return t.cst +} + +func (t *rootTask) plan() PhysicalPlan { + return t.p +} + +func (p *PhysicalLimit) attach2Task(tasks ...task) task { + t := tasks[0].copy() + if cop, ok := t.(*copTask); ok { + // For double read which requires order being kept, the limit cannot be pushed down to the table side, + // because handles would be reordered before being sent to table scan. + if !cop.keepOrder || !cop.indexPlanFinished || cop.indexPlan == nil { + // When limit is pushed down, we should remove its offset. + newCount := p.Offset + p.Count + childProfile := cop.plan().statsInfo() + // Strictly speaking, for the row count of stats, we should multiply newCount with "regionNum", + // but "regionNum" is unknown since the copTask can be a double read, so we ignore it now. + stats := deriveLimitStats(childProfile, float64(newCount)) + pushedDownLimit := PhysicalLimit{Count: newCount}.Init(p.ctx, stats) + cop = attachPlan2Task(pushedDownLimit, cop).(*copTask) + } + t = finishCopTask(p.ctx, cop) + } + return attachPlan2Task(p, t) +} + +// GetCost computes cost of TopN operator itself. +func (p *PhysicalTopN) GetCost(count float64, isRoot bool) float64 { + heapSize := float64(p.Offset + p.Count) + if heapSize < 2.0 { + heapSize = 2.0 + } + sessVars := p.ctx.GetSessionVars() + // Ignore the cost of `doCompaction` in current implementation of `TopNExec`, since it is the + // special side-effect of our Chunk format in TiDB layer, which may not exist in coprocessor's + // implementation, or may be removed in the future if we change data format. + // Note that we are using worst complexity to compute CPU cost, because it is simpler compared with + // considering probabilities of average complexity, i.e, we may not need adjust heap for each input + // row. + var cpuCost float64 + if isRoot { + cpuCost = count * math.Log2(heapSize) * sessVars.CPUFactor + } else { + cpuCost = count * math.Log2(heapSize) * sessVars.CopCPUFactor + } + memoryCost := heapSize * sessVars.MemoryFactor + return cpuCost + memoryCost +} + +// canPushDown checks if this topN can be pushed down. If each of the expression can be converted to pb, it can be pushed. +func (p *PhysicalTopN) canPushDown() bool { + exprs := make([]expression.Expression, 0, len(p.ByItems)) + for _, item := range p.ByItems { + exprs = append(exprs, item.Expr) + } + _, _, remained := expression.ExpressionsToPB(p.ctx.GetSessionVars().StmtCtx, exprs, p.ctx.GetClient()) + return len(remained) == 0 +} + +func (p *PhysicalTopN) allColsFromSchema(schema *expression.Schema) bool { + cols := make([]*expression.Column, 0, len(p.ByItems)) + for _, item := range p.ByItems { + cols = append(cols, expression.ExtractColumns(item.Expr)...) + } + return len(schema.ColumnsIndices(cols)) > 0 +} + +// GetCost computes the cost of in memory sort. +func (p *PhysicalSort) GetCost(count float64) float64 { + if count < 2.0 { + count = 2.0 + } + sessVars := p.ctx.GetSessionVars() + return count*math.Log2(count)*sessVars.CPUFactor + count*sessVars.MemoryFactor +} + +func (p *PhysicalSort) attach2Task(tasks ...task) task { + t := tasks[0].copy() + t = attachPlan2Task(p, t) + t.addCost(p.GetCost(t.count())) + return t +} + +func (p *NominalSort) attach2Task(tasks ...task) task { + return tasks[0] +} + +func (p *PhysicalTopN) getPushedDownTopN(childPlan PhysicalPlan) *PhysicalTopN { + newByItems := make([]*ByItems, 0, len(p.ByItems)) + for _, expr := range p.ByItems { + newByItems = append(newByItems, expr.Clone()) + } + newCount := p.Offset + p.Count + childProfile := childPlan.statsInfo() + // Strictly speaking, for the row count of pushed down TopN, we should multiply newCount with "regionNum", + // but "regionNum" is unknown since the copTask can be a double read, so we ignore it now. + stats := deriveLimitStats(childProfile, float64(newCount)) + topN := PhysicalTopN{ + ByItems: newByItems, + Count: newCount, + }.Init(p.ctx, stats) + topN.SetChildren(childPlan) + return topN +} + +func (p *PhysicalTopN) attach2Task(tasks ...task) task { + t := tasks[0].copy() + inputCount := t.count() + if copTask, ok := t.(*copTask); ok && p.canPushDown() { + // If all columns in topN are from index plan, we push it to index plan, otherwise we finish the index plan and + // push it to table plan. + var pushedDownTopN *PhysicalTopN + if !copTask.indexPlanFinished && p.allColsFromSchema(copTask.indexPlan.Schema()) { + pushedDownTopN = p.getPushedDownTopN(copTask.indexPlan) + copTask.indexPlan = pushedDownTopN + } else { + copTask.finishIndexPlan() + pushedDownTopN = p.getPushedDownTopN(copTask.tablePlan) + copTask.tablePlan = pushedDownTopN + } + copTask.addCost(pushedDownTopN.GetCost(inputCount, false)) + } + rootTask := finishCopTask(p.ctx, t) + rootTask.addCost(p.GetCost(rootTask.count(), true)) + rootTask = attachPlan2Task(p, rootTask) + return rootTask +} + +// GetCost computes the cost of projection operator itself. +func (p *PhysicalProjection) GetCost(count float64) float64 { + sessVars := p.ctx.GetSessionVars() + cpuCost := count * sessVars.CPUFactor + concurrency := float64(sessVars.ProjectionConcurrency) + if concurrency <= 0 { + return cpuCost + } + cpuCost /= concurrency + concurrencyCost := (1 + concurrency) * sessVars.ConcurrencyFactor + return cpuCost + concurrencyCost +} + +func (p *PhysicalProjection) attach2Task(tasks ...task) task { + t := tasks[0].copy() + if copTask, ok := t.(*copTask); ok { + // TODO: support projection push down. + t = finishCopTask(p.ctx, copTask) + } + t = attachPlan2Task(p, t) + t.addCost(p.GetCost(t.count())) + return t +} + +func (sel *PhysicalSelection) attach2Task(tasks ...task) task { + sessVars := sel.ctx.GetSessionVars() + t := finishCopTask(sel.ctx, tasks[0].copy()) + t.addCost(t.count() * sessVars.CPUFactor) + t = attachPlan2Task(sel, t) + return t +} + +// CheckAggCanPushCop checks whether the aggFuncs with groupByItems can +// be pushed down to coprocessor. +func CheckAggCanPushCop(sctx sessionctx.Context, aggFuncs []*aggregation.AggFuncDesc, groupByItems []expression.Expression) bool { + sc := sctx.GetSessionVars().StmtCtx + client := sctx.GetClient() + for _, aggFunc := range aggFuncs { + pb := aggregation.AggFuncToPBExpr(sc, client, aggFunc) + if pb == nil { + return false + } + } + _, _, remained := expression.ExpressionsToPB(sc, groupByItems, client) + if len(remained) > 0 { + return false + } + return true +} + +// BuildFinalModeAggregation splits either LogicalAggregation or PhysicalAggregation to finalAgg and partial1Agg, +// returns the body of finalAgg and the schema of partialAgg. +func BuildFinalModeAggregation( + sctx sessionctx.Context, + aggFuncs []*aggregation.AggFuncDesc, + groupByItems []expression.Expression, + finalSchema *expression.Schema) (finalAggFuncs []*aggregation.AggFuncDesc, finalGbyItems []expression.Expression, partialSchema *expression.Schema) { + // TODO: Refactor the way of constructing aggregation functions. + partialSchema = expression.NewSchema() + partialCursor := 0 + finalAggFuncs = make([]*aggregation.AggFuncDesc, len(aggFuncs)) + for i, aggFunc := range aggFuncs { + finalAggFunc := &aggregation.AggFuncDesc{} + finalAggFunc.Name = aggFunc.Name + args := make([]expression.Expression, 0, len(aggFunc.Args)) + if aggregation.NeedCount(finalAggFunc.Name) { + ft := types.NewFieldType(mysql.TypeLonglong) + ft.Flen, ft.Charset, ft.Collate = 21, charset.CharsetBin, charset.CollationBin + partialSchema.Append(&expression.Column{ + UniqueID: sctx.GetSessionVars().AllocPlanColumnID(), + RetType: ft, + }) + args = append(args, partialSchema.Columns[partialCursor]) + partialCursor++ + } + if aggregation.NeedValue(finalAggFunc.Name) { + partialSchema.Append(&expression.Column{ + UniqueID: sctx.GetSessionVars().AllocPlanColumnID(), + RetType: finalSchema.Columns[i].GetType(), + }) + args = append(args, partialSchema.Columns[partialCursor]) + partialCursor++ + } + finalAggFunc.Args = args + finalAggFunc.Mode = aggregation.FinalMode + finalAggFunc.RetTp = aggFunc.RetTp + finalAggFuncs[i] = finalAggFunc + } + // add group by columns + finalGbyItems = make([]expression.Expression, 0, len(groupByItems)) + for _, gbyExpr := range groupByItems { + var gbyCol *expression.Column + if col, ok := gbyExpr.(*expression.Column); ok { + gbyCol = col + } else { + gbyCol = &expression.Column{ + UniqueID: sctx.GetSessionVars().AllocPlanColumnID(), + RetType: gbyExpr.GetType(), + } + } + partialSchema.Append(gbyCol) + finalGbyItems = append(finalGbyItems, gbyCol) + } + return +} + +func (p *basePhysicalAgg) newPartialAggregate() (partial, final PhysicalPlan) { + // Check if this aggregation can push down. + if !CheckAggCanPushCop(p.ctx, p.AggFuncs, p.GroupByItems) { + return nil, p.self + } + finalAggFuncs, finalGbyItems, partialSchema := BuildFinalModeAggregation(p.ctx, p.AggFuncs, p.GroupByItems, p.schema) + // Remove unnecessary FirstRow. + p.AggFuncs = RemoveUnnecessaryFirstRow(p.ctx, finalAggFuncs, finalGbyItems, p.AggFuncs, p.GroupByItems, partialSchema) + finalSchema := p.schema + p.schema = partialSchema + partialAgg := p.self + // Create physical "final" aggregation. + finalAgg := basePhysicalAgg{ + AggFuncs: finalAggFuncs, + GroupByItems: finalGbyItems, + }.initForHash(p.ctx, p.stats) + finalAgg.schema = finalSchema + return partialAgg, finalAgg +} + +// RemoveUnnecessaryFirstRow removes unnecessary FirstRow of the aggregation. This function can be +// used for both LogicalAggregation and PhysicalAggregation. +// When the select column is same with the group by key, the column can be removed and gets value from the group by key. +// e.g +// select a, count(b) from t group by a; +// The schema is [firstrow(a), count(b), a]. The column firstrow(a) is unnecessary. +// Can optimize the schema to [count(b), a] , and change the index to get value. +func RemoveUnnecessaryFirstRow( + sctx sessionctx.Context, + finalAggFuncs []*aggregation.AggFuncDesc, + finalGbyItems []expression.Expression, + partialAggFuncs []*aggregation.AggFuncDesc, + partialGbyItems []expression.Expression, + partialSchema *expression.Schema) []*aggregation.AggFuncDesc { + partialCursor := 0 + newAggFuncs := make([]*aggregation.AggFuncDesc, 0, len(partialAggFuncs)) + for i, aggFunc := range partialAggFuncs { + if aggFunc.Name == ast.AggFuncFirstRow { + canOptimize := false + for j, gbyExpr := range partialGbyItems { + if gbyExpr.Equal(sctx, aggFunc.Args[0]) { + canOptimize = true + finalAggFuncs[i].Args[0] = finalGbyItems[j] + break + } + } + if canOptimize { + partialSchema.Columns = append(partialSchema.Columns[:partialCursor], partialSchema.Columns[partialCursor+1:]...) + continue + } + } + if aggregation.NeedCount(aggFunc.Name) { + partialCursor++ + } + if aggregation.NeedValue(aggFunc.Name) { + partialCursor++ + } + newAggFuncs = append(newAggFuncs, aggFunc) + } + return newAggFuncs +} + +// cpuCostDivisor computes the concurrency to which we would amortize CPU cost +// for hash aggregation. +func (p *PhysicalHashAgg) cpuCostDivisor() (float64, float64) { + sessionVars := p.ctx.GetSessionVars() + finalCon, partialCon := sessionVars.HashAggFinalConcurrency, sessionVars.HashAggPartialConcurrency + // According to `ValidateSetSystemVar`, `finalCon` and `partialCon` cannot be less than or equal to 0. + if finalCon == 1 && partialCon == 1 { + return 0, 0 + } + // It is tricky to decide which concurrency we should use to amortize CPU cost. Since cost of hash + // aggregation is tend to be under-estimated as explained in `attach2Task`, we choose the smaller + // concurrecy to make some compensation. + return math.Min(float64(finalCon), float64(partialCon)), float64(finalCon + partialCon) +} + +func (p *PhysicalHashAgg) attach2Task(tasks ...task) task { + t := tasks[0].copy() + inputRows := t.count() + if cop, ok := t.(*copTask); ok { + partialAgg, finalAgg := p.newPartialAggregate() + if partialAgg != nil { + if cop.tablePlan != nil { + cop.finishIndexPlan() + partialAgg.SetChildren(cop.tablePlan) + cop.tablePlan = partialAgg + } else { + partialAgg.SetChildren(cop.indexPlan) + cop.indexPlan = partialAgg + } + cop.addCost(p.GetCost(inputRows, false)) + } + // In `newPartialAggregate`, we are using stats of final aggregation as stats + // of `partialAgg`, so the network cost of transferring result rows of `partialAgg` + // to TiDB is normally under-estimated for hash aggregation, since the group-by + // column may be independent of the column used for region distribution, so a closer + // estimation of network cost for hash aggregation may multiply the number of + // regions involved in the `partialAgg`, which is unknown however. + t = finishCopTask(p.ctx, cop) + inputRows = t.count() + attachPlan2Task(finalAgg, t) + } else { + attachPlan2Task(p, t) + } + // We may have 3-phase hash aggregation actually, strictly speaking, we'd better + // calculate cost of each phase and sum the results up, but in fact we don't have + // region level table stats, and the concurrency of the `partialAgg`, + // i.e, max(number_of_regions, DistSQLScanConcurrency) is unknown either, so it is hard + // to compute costs separately. We ignore region level parallelism for both hash + // aggregation and stream aggregation when calculating cost, though this would lead to inaccuracy, + // hopefully this inaccuracy would be imposed on both aggregation implementations, + // so they are still comparable horizontally. + // Also, we use the stats of `partialAgg` as the input of cost computing for TiDB layer + // hash aggregation, it would cause under-estimation as the reason mentioned in comment above. + // To make it simple, we also treat 2-phase parallel hash aggregation in TiDB layer as + // 1-phase when computing cost. + t.addCost(p.GetCost(inputRows, true)) + return t +} + +// GetCost computes the cost of hash aggregation considering CPU/memory. +func (p *PhysicalHashAgg) GetCost(inputRows float64, isRoot bool) float64 { + cardinality := p.statsInfo().RowCount + aggFuncFactor := p.getAggFuncCostFactor() + var cpuCost float64 + sessVars := p.ctx.GetSessionVars() + if isRoot { + cpuCost = inputRows * sessVars.CPUFactor * aggFuncFactor + divisor, con := p.cpuCostDivisor() + if divisor > 0 { + cpuCost /= divisor + // Cost of additional goroutines. + cpuCost += (con + 1) * sessVars.ConcurrencyFactor + } + } else { + cpuCost = inputRows * sessVars.CopCPUFactor * aggFuncFactor + } + memoryCost := cardinality * sessVars.MemoryFactor * float64(len(p.AggFuncs)) + return cpuCost + memoryCost +} diff --git a/planner/core/testdata/analyze_suite_in.json b/planner/core/testdata/analyze_suite_in.json new file mode 100644 index 0000000..6aadfc2 --- /dev/null +++ b/planner/core/testdata/analyze_suite_in.json @@ -0,0 +1,57 @@ +[ + { + "name": "TestIssue9562", + "cases": [ + [ + "create table t(a int, b int, index idx_ab(a, b))", + "explain select * from t t1 join t t2 where t1.b = t2.b and t2.b is null" + ] + ] + }, + { + "name": "TestNullCount", + "cases": [ + "explain select * from t where a is null", + "explain select * from t use index(idx) where a is null", + "explain select * from t where b = 1", + "explain select * from t where b < 1" + ] + }, + { + "name": "TestEmptyTable", + "cases": [ + "select * from t where t.c1 <= 50", + "select * from t, t1 where t.c1 = t1.c1", + "select * from t limit 0" + ] + }, + { + "name": "TestIndexRead", + "cases": [ + "select count(*) from t group by e", + "select count(*) from t where e <= 10 group by e", + "select count(*) from t where e <= 50", + "select count(*) from t where c > '1' group by b", + "select count(*) from t where e = 1 group by b", + "select count(*) from t where e > 1 group by b", + "select count(e) from t where t.b <= 20", + "select count(e) from t where t.b <= 30", + "select count(e) from t where t.b <= 40", + "select count(e) from t where t.b <= 50", + "select count(e) from t where t.b <= 100000000000", + "select * from t where t.b <= 40", + "select * from t where t.b <= 50", + "select * from t where t.b <= 10000000000", + // test panic + "select * from t where 1 and t.b <= 50", + "select * from t where t.b <= 100 order by t.a limit 1", + "select * from t where t.b <= 1 order by t.a limit 10", + "select * from t use index(b) where b = 1 order by a", + // test datetime + "select * from t where d < cast('1991-09-05' as datetime)", + // test timestamp + "select * from t where ts < '1991-09-05'", + "select sum(a) from t1 use index(idx) where a = 3 and b = 100000 group by a limit 1" + ] + } +] diff --git a/planner/core/testdata/analyze_suite_out.json b/planner/core/testdata/analyze_suite_out.json new file mode 100644 index 0000000..7daa80b --- /dev/null +++ b/planner/core/testdata/analyze_suite_out.json @@ -0,0 +1,60 @@ +[ + { + "Name": "TestIssue9562", + "Cases": [ + { + "SQL": [ + "create table t(a int, b int, index idx_ab(a, b))", + "explain select * from t t1 join t t2 where t1.b = t2.b and t2.b is null" + ], + "Plan": [ + "Projection_7 0.00 root test.t.a, test.t.b, test.t.a, test.t.b", + "└─HashRightJoin_9 0.00 root inner join, equal:[eq(test.t.b, test.t.b)]", + " ├─TableReader_12 0.00 root data:Selection_11", + " │ └─Selection_11 0.00 cop isnull(test.t.b), not(isnull(test.t.b))", + " │ └─TableScan_10 10000.00 cop table:t2, range:[-inf,+inf], keep order:false, stats:pseudo", + " └─TableReader_18 9990.00 root data:Selection_17", + " └─Selection_17 9990.00 cop not(isnull(test.t.b))", + " └─TableScan_16 10000.00 cop table:t1, range:[-inf,+inf], keep order:false, stats:pseudo" + ] + } + ] + }, + { + "Name": "TestNullCount", + "Cases": [ + [ + "TableReader_7 2.00 root data:Selection_6", + "└─Selection_6 2.00 cop isnull(test.t.a)", + " └─TableScan_5 2.00 cop table:t, range:[-inf,+inf], keep order:false" + ], + [ + "IndexLookUp_7 2.00 root ", + "├─IndexScan_5 2.00 cop table:t, index:a, range:[NULL,NULL], keep order:false", + "└─TableScan_6 2.00 cop table:t, keep order:false" + ], + [ + "TableReader_7 0.00 root data:Selection_6", + "└─Selection_6 0.00 cop eq(test.t.b, 1)", + " └─TableScan_5 2.00 cop table:t, range:[-inf,+inf], keep order:false" + ], + [ + "TableReader_7 0.00 root data:Selection_6", + "└─Selection_6 0.00 cop lt(test.t.b, 1)", + " └─TableScan_5 2.00 cop table:t, range:[-inf,+inf], keep order:false" + ] + ] + }, + { + "Name": "TestEmptyTable", + "Cases": [ + "TableReader(Table(t)->Sel([le(test.t.c1, 50)]))", + "LeftHashJoin{TableReader(Table(t)->Sel([not(isnull(test.t.c1))]))->TableReader(Table(t1)->Sel([not(isnull(test.t1.c1))]))}(test.t.c1,test.t1.c1)", + "Dual" + ] + }, + { + "Name": "TestIndexRead", + "Cases": null + } +] diff --git a/planner/core/testdata/integration_suite_in.json b/planner/core/testdata/integration_suite_in.json new file mode 100644 index 0000000..03b803b --- /dev/null +++ b/planner/core/testdata/integration_suite_in.json @@ -0,0 +1,15 @@ +[ + { + "name": "TestPushLimitDownIndexLookUpReader", + "cases": [ + // Limit should be pushed down into IndexLookUpReader, row count of IndexLookUpReader and TableScan should be 1.00. + "explain select * from tbl use index(idx_b_c) where b > 1 limit 2,1", + // Projection atop IndexLookUpReader, Limit should be pushed down into IndexLookUpReader, and Projection should have row count 1.00 as well. + "explain select * from tbl use index(idx_b_c) where b > 1 order by b desc limit 2,1", + // Limit should be pushed down into IndexLookUpReader when Selection on top of IndexScan. + "explain select * from tbl use index(idx_b_c) where b > 1 and c > 1 limit 2,1", + // Limit should NOT be pushed down into IndexLookUpReader when Selection on top of TableScan. + "explain select * from tbl use index(idx_b_c) where b > 1 and a > 1 limit 2,1" + ] + } +] diff --git a/planner/core/testdata/integration_suite_out.json b/planner/core/testdata/integration_suite_out.json new file mode 100644 index 0000000..60e1941 --- /dev/null +++ b/planner/core/testdata/integration_suite_out.json @@ -0,0 +1,50 @@ +[ + { + "Name": "TestPushLimitDownIndexLookUpReader", + "Cases": [ + { + "SQL": "explain select * from tbl use index(idx_b_c) where b > 1 limit 2,1", + "Plan": [ + "Limit_9 1.00 root offset:2, count:1", + "└─IndexLookUp_14 3.00 root ", + " ├─Limit_13 3.00 cop offset:0, count:3", + " │ └─IndexScan_11 3.00 cop table:tbl, index:b, c, range:(1,+inf], keep order:false", + " └─TableScan_12 3.00 cop table:tbl, keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain select * from tbl use index(idx_b_c) where b > 1 order by b desc limit 2,1", + "Plan": [ + "Limit_12 1.00 root offset:2, count:1", + "└─Projection_25 3.00 root test.tbl.a, test.tbl.b, test.tbl.c", + " └─IndexLookUp_24 3.00 root ", + " ├─Limit_23 3.00 cop offset:0, count:3", + " │ └─IndexScan_21 3.00 cop table:tbl, index:b, c, range:(1,+inf], keep order:true, desc", + " └─TableScan_22 3.00 cop table:tbl, keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain select * from tbl use index(idx_b_c) where b > 1 and c > 1 limit 2,1", + "Plan": [ + "Limit_9 1.00 root offset:2, count:1", + "└─IndexLookUp_15 3.00 root ", + " ├─Limit_14 3.00 cop offset:0, count:3", + " │ └─Selection_13 3.00 cop gt(test.tbl.c, 1)", + " │ └─IndexScan_11 3.75 cop table:tbl, index:b, c, range:(1,+inf], keep order:false", + " └─TableScan_12 3.00 cop table:tbl, keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain select * from tbl use index(idx_b_c) where b > 1 and a > 1 limit 2,1", + "Plan": [ + "Limit_9 1.00 root offset:2, count:1", + "└─IndexLookUp_15 3.00 root ", + " ├─IndexScan_11 3.75 cop table:tbl, index:b, c, range:(1,+inf], keep order:false", + " └─Limit_14 3.00 cop offset:0, count:3", + " └─Selection_13 3.00 cop gt(test.tbl.a, 1)", + " └─TableScan_12 3.75 cop table:tbl, keep order:false" + ] + } + ] + } +] diff --git a/planner/core/testdata/plan_suite_in.json b/planner/core/testdata/plan_suite_in.json new file mode 100644 index 0000000..3d27c58 --- /dev/null +++ b/planner/core/testdata/plan_suite_in.json @@ -0,0 +1,247 @@ +[ + { + "name": "TestIndexHint", + "cases": [ + // simple case + "select /*+ USE_INDEX(t, c_d_e) */ * from t", + "select /*+ USE_INDEX(test.t, c_d_e) */ * from t", + "select /*+ IGNORE_INDEX(t, c_d_e) */ c from t order by c", + "select /*+ IGNORE_INDEX(test.t, c_d_e) */ c from t order by c", + "select /*+ USE_INDEX(t, c_d_e) */ * from t t1", + "select /*+ IGNORE_INDEX(t, c_d_e) */ t1.c from t t1 order by t1.c", + "select /*+ USE_INDEX(t1, c_d_e) */ * from t t1", + "select /*+ IGNORE_INDEX(t1, c_d_e) */ t1.c from t t1 order by t1.c", + "select /*+ USE_INDEX(t1, c_d_e), USE_INDEX(t2, f) */ * from t t1, t t2 where t1.a = t2.b", + "select /*+ IGNORE_INDEX(t1, c_d_e), IGNORE_INDEX(t2, f), HASH_JOIN(t1) */ * from t t1, t t2 where t1.a = t2.b", + // test multiple indexes + "select /*+ USE_INDEX(t, c_d_e, f, g) */ * from t order by f", + // use TablePath when the hint only contains table. + "select /*+ USE_INDEX(t) */ f from t where f > 10", + // there will be a warning instead of error when index not exist + "select /*+ USE_INDEX(t, no_such_index) */ * from t", + "select /*+ IGNORE_INDEX(t, no_such_index) */ * from t", + // use both use_index and ignore_index, same as index hints in sql. + "select /*+ USE_INDEX(t, c_d_e), IGNORE_INDEX(t, f) */ c from t order by c", + "select /*+ USE_INDEX(t, f), IGNORE_INDEX(t, f) */ c from t order by c", + "select /*+ USE_INDEX(t, c_d_e), IGNORE_INDEX(t, c_d_e) */ c from t order by c", + "select /*+ USE_INDEX(t, c_d_e, f), IGNORE_INDEX(t, c_d_e) */ c from t order by c" + ] + }, + { + "name": "TestDAGPlanBuilderSimpleCase", + "cases":[ + // Test index hint. + "select * from t t1 use index(c_d_e)", + "select f from t use index() where f = 1", + // Test ts + Sort vs. DoubleRead + filter. + "select a from t where a between 1 and 2 order by c", + // Test DNF condition + Double Read. + "select * from t where (t.c > 0 and t.c < 2) or (t.c > 4 and t.c < 6) or (t.c > 8 and t.c < 10) or (t.c > 12 and t.c < 14) or (t.c > 16 and t.c < 18)", + "select * from t where (t.c > 0 and t.c < 1) or (t.c > 2 and t.c < 3) or (t.c > 4 and t.c < 5) or (t.c > 6 and t.c < 7) or (t.c > 9 and t.c < 10)", + // Test TopN to table branch in double read. + "select * from t where t.c = 1 and t.e = 1 order by t.b limit 1", + // Test Null Range + "select * from t where t.e_str is null", + // Test Null Range but the column has not null flag. + "select * from t where t.c is null", + // Test TopN to index branch in double read. + "select * from t where t.c = 1 and t.e = 1 order by t.e limit 1", + // Test TopN to Limit in double read. + "select * from t where t.c = 1 and t.e = 1 order by t.d limit 1", + // Test TopN to Limit in index single read. + "select c from t where t.c = 1 and t.e = 1 order by t.d limit 1", + // Test TopN to Limit in table single read. + "select c from t order by t.a limit 1", + // Test TopN push down in table single read. + "select c from t order by t.a + t.b limit 1", + // Test Limit push down in table single read. + "select c from t limit 1", + // Test Limit push down in index single read. + "select c from t where c = 1 limit 1", + // Test index single read and Selection. + "select c from t where c = 1", + // Test index single read and Sort. + "select c from t order by c", + // Test index single read and Sort. + "select c from t where c = 1 order by e", + // Test Limit push down in double single read. + "select c, b from t where c = 1 limit 1", + // Test Selection + Limit push down in double single read. + "select c, b from t where c = 1 and e = 1 and b = 1 limit 1", + // Test Order by multi columns. + "select c from t where c = 1 order by d, c", + // Test for index with length. + "select c_str from t where e_str = '1' order by d_str, c_str", + // Test PK in index single read. + "select c from t where t.c = 1 and t.a > 1 order by t.d limit 1", + // Test composed index. + // FIXME: The TopN didn't be pushed. + "select c from t where t.c = 1 and t.d = 1 order by t.a limit 1", + // Test PK in index double read. + "select * from t where t.c = 1 and t.a > 1 order by t.d limit 1", + // Test index filter condition push down. + "select * from t use index(e_d_c_str_prefix) where t.c_str = 'abcdefghijk' and t.d_str = 'd' and t.e_str = 'e'", + "select * from (select * from t use index() order by b) t left join t t1 on t.a=t1.a limit 10", + "select * from (select *, NULL as xxx from t) t order by xxx", + "select * from t use index(f) where f = 1 and a = 1", + "select * from t2 use index(b) where b = 1 and a = 1" + ] + }, + { + "name": "TestDAGPlanBuilderJoin", + "cases": [ + "select * from t t1 join t t2 on t1.a = t2.c_str", + "select * from t t1 join t t2 on t1.b = t2.a", + "select * from t t1 join t t2 on t1.a = t2.a join t t3 on t1.a = t3.a", + "select * from t t1 join t t2 on t1.a = t2.a join t t3 on t1.b = t3.a", + "select * from t t1 join t t2 on t1.b = t2.a order by t1.a", + "select * from t t1 join t t2 on t1.b = t2.a order by t1.a limit 1", + // Test hash join's hint. + "select /*+ TIDB_HJ(t1, t2) */ * from t t1 join t t2 on t1.b = t2.a order by t1.a limit 1", + "select * from t t1 left join t t2 on t1.b = t2.a where 1 = 1 limit 1", + "select * from t t1 join t t2 on t1.b = t2.a and t1.c = 1 and t1.d = 1 and t1.e = 1 order by t1.a limit 1", + "select * from t t1 join t t2 on t1.b = t2.b join t t3 on t1.b = t3.b", + "select * from t t1 join t t2 on t1.a = t2.a order by t1.a", + "select * from t t1 left outer join t t2 on t1.a = t2.a right outer join t t3 on t1.a = t3.a", + "select * from t t1 join t t2 on t1.a = t2.a join t t3 on t1.a = t3.a and t1.b = 1 and t3.c = 1", + // Test Single Merge Join. + // Merge Join now enforce a sort. + "select /*+ TIDB_SMJ(t1,t2)*/ * from t t1, t t2 where t1.a = t2.b", + "select /*+ TIDB_SMJ(t1,t2)*/ * from t t1, t t2 where t1.a = t2.a", + // Test Single Merge Join + Sort. + "select /*+ TIDB_SMJ(t1,t2)*/ * from t t1, t t2 where t1.a = t2.a order by t2.a", + "select /*+ TIDB_SMJ(t1,t2)*/ * from t t1, t t2 where t1.b = t2.b order by t2.a", + // Test Single Merge Join + Sort + desc. + "select /*+ TIDB_SMJ(t1,t2)*/ * from t t1, t t2 where t1.a = t2.a order by t2.a desc", + "select /*+ TIDB_SMJ(t1,t2)*/ * from t t1, t t2 where t1.b = t2.b order by t2.b desc", + // Test Multi Merge Join. + "select /*+ TIDB_SMJ(t1,t2,t3)*/ * from t t1, t t2, t t3 where t1.a = t2.a and t2.a = t3.a", + "select /*+ TIDB_SMJ(t1,t2,t3)*/ * from t t1, t t2, t t3 where t1.a = t2.b and t2.a = t3.b", + // Test Multi Merge Join with multi keys. + // TODO: More tests should be added. + "select /*+ TIDB_SMJ(t1,t2,t3)*/ * from t t1, t t2, t t3 where t1.c = t2.c and t1.d = t2.d and t3.c = t1.c and t3.d = t1.d", + "select /*+ TIDB_SMJ(t1,t2,t3)*/ * from t t1, t t2, t t3 where t1.c = t2.c and t1.d = t2.d and t3.c = t1.c and t3.d = t1.d order by t1.c", + // Test Multi Merge Join + Outer Join. + "select /*+ TIDB_SMJ(t1,t2,t3)*/ * from t t1 left outer join t t2 on t1.a = t2.a left outer join t t3 on t2.a = t3.a", + "select /*+ TIDB_SMJ(t1,t2,t3)*/ * from t t1 left outer join t t2 on t1.a = t2.a left outer join t t3 on t1.a = t3.a" + ] + }, + { + "name": "TestDAGPlanTopN", + "cases": [ + "select * from t t1 left join t t2 on t1.b = t2.b left join t t3 on t2.b = t3.b order by t1.a limit 1", + "select * from t t1 left join t t2 on t1.b = t2.b left join t t3 on t2.b = t3.b order by t1.b limit 1", + "select * from t t1 left join t t2 on t1.b = t2.b left join t t3 on t2.b = t3.b limit 1", + "select * from t where b = 1 and c = 1 order by c limit 1", + "select * from t where c = 1 order by c limit 1", + "select * from t order by a limit 1", + "select c from t order by c limit 1" + ] + }, + { + "name": "TestDAGPlanBuilderBasePhysicalPlan", + "cases": [ + // Test complex delete. + "delete from t where b < 1 order by d limit 1", + // Test simple delete. + "delete from t", + // Test "USE INDEX" hint in delete statement from single table + "delete from t use index(c_d_e) where b = 1", + // Test complex insert. + "insert into t select * from t where b < 1 order by d limit 1", + // Test simple insert. + "insert into t (a, b, c, e, f, g) values(0,0,0,0,0,0)", + // Test dual. + "select 1", + "select * from t where false", + // Test show. + "show tables" + ] + }, + { + "name": "TestDAGPlanBuilderUnionScan", + "cases": [ + // Read table. + "select * from t", + "select * from t where b = 1", + "select * from t where a = 1", + "select * from t where a = 1 order by a", + "select * from t where a = 1 order by b", + "select * from t where a = 1 limit 1", + "select * from t where c = 1", + "select c from t where c = 1" + ] + }, + { + "name": "TestDAGPlanBuilderAgg", + "cases": [ + // Test agg + table. + "select sum(a), avg(b + c) from t group by d", + // Test group by (c + d) + "select sum(e), avg(e + c) from t where c = 1 group by (c + d)", + // Test stream agg + index single. + "select sum(e), avg(e + c) from t where c = 1 group by c", + // Test hash agg + index single. + "select sum(e), avg(e + c) from t where c = 1 group by e", + // Test hash agg + index double. + "select sum(e), avg(b + c) from t where c = 1 and e = 1 group by d", + // Test stream agg + index double. + "select sum(e), avg(b + c) from t where c = 1 and b = 1", + // Test hash agg + order. + "select sum(e) as k, avg(b + c) from t where c = 1 and b = 1 and e = 1 group by d order by k", + // Test stream agg + order. + "select sum(e) as k, avg(b + c) from t where c = 1 and b = 1 and e = 1 group by c order by k", + // Test stream agg with multi group by columns. + "select sum(e+1) from t group by e,d,c order by c", + "select sum(e+1) from t group by e,d,c order by c,e", + // Test stream agg + limit or sort + "select count(*) from t group by g order by g limit 10", + "select count(*) from t group by g limit 10", + "select count(*) from t group by g order by g", + "select count(*) from t group by g order by g desc limit 1", + // Test hash agg + limit or sort + "select count(*) from t group by b order by b limit 10", + "select count(*) from t group by b order by b", + "select count(*) from t group by b limit 10", + // Test merge join + stream agg + "select sum(a.g), sum(b.g) from t a join t b on a.g = b.g group by a.g", + // Test index join + stream agg + "select /*+ tidb_inlj(a,b) */ sum(a.g), sum(b.g) from t a join t b on a.g = b.g and a.g > 60 group by a.g order by a.g limit 1", + "select sum(a.g), sum(b.g) from t a join t b on a.g = b.g and a.a>5 group by a.g order by a.g limit 1", + "select sum(d) from t" + ] + }, + { + "name": "TestAggEliminator", + "cases": [ + // Max to Limit + Sort-Desc. + "select max(a) from t;", + // Min to Limit + Sort. + "select min(a) from t;", + // Min to Limit + Sort, and isnull() should be added. + "select min(c_str) from t;", + // Do nothing to max + firstrow. + "select max(a), b from t;", + // If max/min contains scalar function, we can still do transformation. + "select max(a+1) from t;", + // Min + Max to Limit + Sort + Join. + "select max(a), min(a) from t;", + // Min + Max with range condition. + "select max(a), min(a) from t where a > 10", + // Min + Max with unified index range condition. + "select max(d), min(d) from t where c = 1 and d > 10", + // Min + Max with multiple columns + "select max(a), max(c), min(f) from t", + // Do nothing if any column has no index. + "select max(a), max(b) from t", + // Do nothing if any column has a non-range condition. + "select max(a), max(c) from t where c > 10", + // Do nothing if the condition cannot be pushed down to range. + "select max(a), min(a) from t where a * 3 + 10 < 100", + // Do nothing to max with groupby. + "select max(a) from t group by b;", + // If inner is not a data source, we can still do transformation. + "select max(a) from (select t1.a from t t1 join t t2 on t1.a=t2.a) t" + ] + } +] diff --git a/planner/core/testdata/plan_suite_out.json b/planner/core/testdata/plan_suite_out.json new file mode 100644 index 0000000..f4f9972 --- /dev/null +++ b/planner/core/testdata/plan_suite_out.json @@ -0,0 +1,551 @@ +[ + { + "Name": "TestIndexHint", + "Cases": [ + { + "SQL": "select /*+ USE_INDEX(t, c_d_e) */ * from t", + "Best": "IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))", + "HasWarn": false + }, + { + "SQL": "select /*+ USE_INDEX(test.t, c_d_e) */ * from t", + "Best": "IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))", + "HasWarn": false + }, + { + "SQL": "select /*+ IGNORE_INDEX(t, c_d_e) */ c from t order by c", + "Best": "TableReader(Table(t))->Sort", + "HasWarn": false + }, + { + "SQL": "select /*+ IGNORE_INDEX(test.t, c_d_e) */ c from t order by c", + "Best": "TableReader(Table(t))->Sort", + "HasWarn": false + }, + { + "SQL": "select /*+ USE_INDEX(t, c_d_e) */ * from t t1", + "Best": "TableReader(Table(t))", + "HasWarn": false + }, + { + "SQL": "select /*+ IGNORE_INDEX(t, c_d_e) */ t1.c from t t1 order by t1.c", + "Best": "IndexReader(Index(t.c_d_e)[[NULL,+inf]])", + "HasWarn": false + }, + { + "SQL": "select /*+ USE_INDEX(t1, c_d_e) */ * from t t1", + "Best": "IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))", + "HasWarn": false + }, + { + "SQL": "select /*+ IGNORE_INDEX(t1, c_d_e) */ t1.c from t t1 order by t1.c", + "Best": "TableReader(Table(t))->Sort", + "HasWarn": false + }, + { + "SQL": "select /*+ USE_INDEX(t1, c_d_e), USE_INDEX(t2, f) */ * from t t1, t t2 where t1.a = t2.b", + "Best": "LeftHashJoin{IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))->IndexLookUp(Index(t.f)[[NULL,+inf]], Table(t))}(test.t.a,test.t.b)", + "HasWarn": false + }, + { + "SQL": "select /*+ IGNORE_INDEX(t1, c_d_e), IGNORE_INDEX(t2, f), HASH_JOIN(t1) */ * from t t1, t t2 where t1.a = t2.b", + "Best": "LeftHashJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.b)", + "HasWarn": false + }, + { + "SQL": "select /*+ USE_INDEX(t, c_d_e, f, g) */ * from t order by f", + "Best": "IndexLookUp(Index(t.f)[[NULL,+inf]], Table(t))", + "HasWarn": false + }, + { + "SQL": "select /*+ USE_INDEX(t) */ f from t where f > 10", + "Best": "TableReader(Table(t)->Sel([gt(test.t.f, 10)]))", + "HasWarn": false + }, + { + "SQL": "select /*+ USE_INDEX(t, no_such_index) */ * from t", + "Best": "TableReader(Table(t))", + "HasWarn": true + }, + { + "SQL": "select /*+ IGNORE_INDEX(t, no_such_index) */ * from t", + "Best": "TableReader(Table(t))", + "HasWarn": true + }, + { + "SQL": "select /*+ USE_INDEX(t, c_d_e), IGNORE_INDEX(t, f) */ c from t order by c", + "Best": "IndexReader(Index(t.c_d_e)[[NULL,+inf]])", + "HasWarn": false + }, + { + "SQL": "select /*+ USE_INDEX(t, f), IGNORE_INDEX(t, f) */ c from t order by c", + "Best": "TableReader(Table(t))->Sort", + "HasWarn": false + }, + { + "SQL": "select /*+ USE_INDEX(t, c_d_e), IGNORE_INDEX(t, c_d_e) */ c from t order by c", + "Best": "TableReader(Table(t))->Sort", + "HasWarn": false + }, + { + "SQL": "select /*+ USE_INDEX(t, c_d_e, f), IGNORE_INDEX(t, c_d_e) */ c from t order by c", + "Best": "IndexLookUp(Index(t.f)[[NULL,+inf]], Table(t))->Sort", + "HasWarn": false + } + ] + }, + { + "Name": "TestDAGPlanBuilderSimpleCase", + "Cases": [ + { + "SQL": "select * from t t1 use index(c_d_e)", + "Best": "IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))" + }, + { + "SQL": "select f from t use index() where f = 1", + "Best": "TableReader(Table(t)->Sel([eq(test.t.f, 1)]))" + }, + { + "SQL": "select a from t where a between 1 and 2 order by c", + "Best": "TableReader(Table(t))->Sort->Projection" + }, + { + "SQL": "select * from t where (t.c > 0 and t.c < 2) or (t.c > 4 and t.c < 6) or (t.c > 8 and t.c < 10) or (t.c > 12 and t.c < 14) or (t.c > 16 and t.c < 18)", + "Best": "IndexLookUp(Index(t.c_d_e)[(0,2) (4,6) (8,10) (12,14) (16,18)], Table(t))" + }, + { + "SQL": "select * from t where (t.c > 0 and t.c < 1) or (t.c > 2 and t.c < 3) or (t.c > 4 and t.c < 5) or (t.c > 6 and t.c < 7) or (t.c > 9 and t.c < 10)", + "Best": "Dual" + }, + { + "SQL": "select * from t where t.c = 1 and t.e = 1 order by t.b limit 1", + "Best": "IndexLookUp(Index(t.c_d_e)[[1,1]]->Sel([eq(test.t.e, 1)]), Table(t))->TopN([test.t.b],0,1)" + }, + { + "SQL": "select * from t where t.e_str is null", + "Best": "IndexLookUp(Index(t.e_d_c_str_prefix)[[NULL,NULL]], Table(t))" + }, + { + "SQL": "select * from t where t.c is null", + "Best": "Dual" + }, + { + "SQL": "select * from t where t.c = 1 and t.e = 1 order by t.e limit 1", + "Best": "IndexLookUp(Index(t.c_d_e)[[1,1]]->Sel([eq(test.t.e, 1)]), Table(t))->TopN([test.t.e],0,1)" + }, + { + "SQL": "select * from t where t.c = 1 and t.e = 1 order by t.d limit 1", + "Best": "IndexLookUp(Index(t.c_d_e)[[1,1]]->Sel([eq(test.t.e, 1)])->Limit, Table(t))->Limit" + }, + { + "SQL": "select c from t where t.c = 1 and t.e = 1 order by t.d limit 1", + "Best": "IndexReader(Index(t.c_d_e)[[1,1]]->Sel([eq(test.t.e, 1)])->Limit)->Limit->Projection" + }, + { + "SQL": "select c from t order by t.a limit 1", + "Best": "TableReader(Table(t)->Limit)->Limit->Projection" + }, + { + "SQL": "select c from t order by t.a + t.b limit 1", + "Best": "TableReader(Table(t)->TopN([plus(test.t.a, test.t.b)],0,1))->Projection->TopN([Column#14],0,1)->Projection->Projection" + }, + { + "SQL": "select c from t limit 1", + "Best": "IndexReader(Index(t.c_d_e)[[NULL,+inf]]->Limit)->Limit" + }, + { + "SQL": "select c from t where c = 1 limit 1", + "Best": "IndexReader(Index(t.c_d_e)[[1,1]]->Limit)->Limit" + }, + { + "SQL": "select c from t where c = 1", + "Best": "IndexReader(Index(t.c_d_e)[[1,1]])" + }, + { + "SQL": "select c from t order by c", + "Best": "IndexReader(Index(t.c_d_e)[[NULL,+inf]])" + }, + { + "SQL": "select c from t where c = 1 order by e", + "Best": "IndexReader(Index(t.c_d_e)[[1,1]])->Sort->Projection" + }, + { + "SQL": "select c, b from t where c = 1 limit 1", + "Best": "IndexLookUp(Index(t.c_d_e)[[1,1]]->Limit, Table(t))->Limit->Projection" + }, + { + "SQL": "select c, b from t where c = 1 and e = 1 and b = 1 limit 1", + "Best": "IndexLookUp(Index(t.c_d_e)[[1,1]]->Sel([eq(test.t.e, 1)]), Table(t)->Sel([eq(test.t.b, 1)])->Limit)->Limit->Projection" + }, + { + "SQL": "select c from t where c = 1 order by d, c", + "Best": "IndexReader(Index(t.c_d_e)[[1,1]])->Sort->Projection" + }, + { + "SQL": "select c_str from t where e_str = '1' order by d_str, c_str", + "Best": "IndexLookUp(Index(t.e_d_c_str_prefix)[[\"1\",\"1\"]], Table(t))->Sort->Projection" + }, + { + "SQL": "select c from t where t.c = 1 and t.a > 1 order by t.d limit 1", + "Best": "IndexReader(Index(t.c_d_e)[[1,1]]->Sel([gt(test.t.a, 1)])->Limit)->Limit->Projection" + }, + { + "SQL": "select c from t where t.c = 1 and t.d = 1 order by t.a limit 1", + "Best": "IndexReader(Index(t.c_d_e)[[1 1,1 1]])->TopN([test.t.a],0,1)->Projection" + }, + { + "SQL": "select * from t where t.c = 1 and t.a > 1 order by t.d limit 1", + "Best": "IndexLookUp(Index(t.c_d_e)[[1,1]]->Sel([gt(test.t.a, 1)])->Limit, Table(t))->Limit" + }, + { + "SQL": "select * from t use index(e_d_c_str_prefix) where t.c_str = 'abcdefghijk' and t.d_str = 'd' and t.e_str = 'e'", + "Best": "IndexLookUp(Index(t.e_d_c_str_prefix)[[\"e\" \"d\" \"abcdefghij\",\"e\" \"d\" \"abcdefghij\"]], Table(t)->Sel([eq(test.t.c_str, abcdefghijk)]))" + }, + { + "SQL": "select * from (select * from t use index() order by b) t left join t t1 on t.a=t1.a limit 10", + "Best": "LeftHashJoin{TableReader(Table(t)->TopN([test.t.b],0,10))->TopN([test.t.b],0,10)->TableReader(Table(t))}(test.t.a,test.t.a)->Limit" + }, + { + "SQL": "select * from (select *, NULL as xxx from t) t order by xxx", + "Best": "TableReader(Table(t))->Projection" + }, + { + "SQL": "select * from t use index(f) where f = 1 and a = 1", + "Best": "IndexLookUp(Index(t.f)[[1,1]]->Sel([eq(test.t.a, 1)]), Table(t))" + }, + { + "SQL": "select * from t2 use index(b) where b = 1 and a = 1", + "Best": "IndexLookUp(Index(t2.b)[[1,1]]->Sel([eq(test.t2.a, 1)]), Table(t2))" + } + ] + }, + { + "Name": "TestDAGPlanBuilderJoin", + "Cases": [ + { + "SQL": "select * from t t1 join t t2 on t1.a = t2.c_str", + "Best": "RightHashJoin{TableReader(Table(t)->Sel([not(isnull(test.t.c_str))]))->TableReader(Table(t))}(test.t.c_str,test.t.a)->Projection" + }, + { + "SQL": "select * from t t1 join t t2 on t1.b = t2.a", + "Best": "LeftHashJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.b,test.t.a)" + }, + { + "SQL": "select * from t t1 join t t2 on t1.a = t2.a join t t3 on t1.a = t3.a", + "Best": "LeftHashJoin{MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->TableReader(Table(t))}(test.t.a,test.t.a)" + }, + { + "SQL": "select * from t t1 join t t2 on t1.a = t2.a join t t3 on t1.b = t3.a", + "Best": "LeftHashJoin{MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->TableReader(Table(t))}(test.t.b,test.t.a)" + }, + { + "SQL": "select * from t t1 join t t2 on t1.b = t2.a order by t1.a", + "Best": "LeftHashJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.b,test.t.a)->Sort" + }, + { + "SQL": "select * from t t1 join t t2 on t1.b = t2.a order by t1.a limit 1", + "Best": "LeftHashJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.b,test.t.a)->TopN([test.t.a],0,1)" + }, + { + "SQL": "select /*+ TIDB_HJ(t1, t2) */ * from t t1 join t t2 on t1.b = t2.a order by t1.a limit 1", + "Best": "LeftHashJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.b,test.t.a)->TopN([test.t.a],0,1)" + }, + { + "SQL": "select * from t t1 left join t t2 on t1.b = t2.a where 1 = 1 limit 1", + "Best": "LeftHashJoin{TableReader(Table(t)->Limit)->Limit->TableReader(Table(t))}(test.t.b,test.t.a)->Limit" + }, + { + "SQL": "select * from t t1 join t t2 on t1.b = t2.a and t1.c = 1 and t1.d = 1 and t1.e = 1 order by t1.a limit 1", + "Best": "RightHashJoin{IndexLookUp(Index(t.c_d_e)[[1 1 1,1 1 1]], Table(t))->TableReader(Table(t))}(test.t.b,test.t.a)->TopN([test.t.a],0,1)" + }, + { + "SQL": "select * from t t1 join t t2 on t1.b = t2.b join t t3 on t1.b = t3.b", + "Best": "LeftHashJoin{LeftHashJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.b,test.t.b)->TableReader(Table(t))}(test.t.b,test.t.b)" + }, + { + "SQL": "select * from t t1 join t t2 on t1.a = t2.a order by t1.a", + "Best": "MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)" + }, + { + "SQL": "select * from t t1 left outer join t t2 on t1.a = t2.a right outer join t t3 on t1.a = t3.a", + "Best": "MergeRightOuterJoin{MergeLeftOuterJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->TableReader(Table(t))}(test.t.a,test.t.a)" + }, + { + "SQL": "select * from t t1 join t t2 on t1.a = t2.a join t t3 on t1.a = t3.a and t1.b = 1 and t3.c = 1", + "Best": "RightHashJoin{LeftHashJoin{TableReader(Table(t)->Sel([eq(test.t.b, 1)]))->IndexLookUp(Index(t.c_d_e)[[1,1]], Table(t))}(test.t.a,test.t.a)->TableReader(Table(t))}(test.t.a,test.t.a)->Projection" + }, + { + "SQL": "select /*+ TIDB_SMJ(t1,t2)*/ * from t t1, t t2 where t1.a = t2.b", + "Best": "MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))->Sort}(test.t.a,test.t.b)" + }, + { + "SQL": "select /*+ TIDB_SMJ(t1,t2)*/ * from t t1, t t2 where t1.a = t2.a", + "Best": "MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)" + }, + { + "SQL": "select /*+ TIDB_SMJ(t1,t2)*/ * from t t1, t t2 where t1.a = t2.a order by t2.a", + "Best": "MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)" + }, + { + "SQL": "select /*+ TIDB_SMJ(t1,t2)*/ * from t t1, t t2 where t1.b = t2.b order by t2.a", + "Best": "MergeInnerJoin{TableReader(Table(t))->Sort->TableReader(Table(t))->Sort}(test.t.b,test.t.b)->Sort" + }, + { + "SQL": "select /*+ TIDB_SMJ(t1,t2)*/ * from t t1, t t2 where t1.a = t2.a order by t2.a desc", + "Best": "MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)" + }, + { + "SQL": "select /*+ TIDB_SMJ(t1,t2)*/ * from t t1, t t2 where t1.b = t2.b order by t2.b desc", + "Best": "MergeInnerJoin{TableReader(Table(t))->Sort->TableReader(Table(t))->Sort}(test.t.b,test.t.b)" + }, + { + "SQL": "select /*+ TIDB_SMJ(t1,t2,t3)*/ * from t t1, t t2, t t3 where t1.a = t2.a and t2.a = t3.a", + "Best": "MergeInnerJoin{MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->TableReader(Table(t))}(test.t.a,test.t.a)" + }, + { + "SQL": "select /*+ TIDB_SMJ(t1,t2,t3)*/ * from t t1, t t2, t t3 where t1.a = t2.b and t2.a = t3.b", + "Best": "MergeInnerJoin{MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))->Sort}(test.t.a,test.t.b)->Sort->TableReader(Table(t))->Sort}(test.t.a,test.t.b)" + }, + { + "SQL": "select /*+ TIDB_SMJ(t1,t2,t3)*/ * from t t1, t t2, t t3 where t1.c = t2.c and t1.d = t2.d and t3.c = t1.c and t3.d = t1.d", + "Best": "MergeInnerJoin{MergeInnerJoin{IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))->IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))}(test.t.c,test.t.c)(test.t.d,test.t.d)->IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))}(test.t.c,test.t.c)(test.t.d,test.t.d)" + }, + { + "SQL": "select /*+ TIDB_SMJ(t1,t2,t3)*/ * from t t1, t t2, t t3 where t1.c = t2.c and t1.d = t2.d and t3.c = t1.c and t3.d = t1.d order by t1.c", + "Best": "MergeInnerJoin{MergeInnerJoin{IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))->IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))}(test.t.c,test.t.c)(test.t.d,test.t.d)->IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))}(test.t.c,test.t.c)(test.t.d,test.t.d)" + }, + { + "SQL": "select /*+ TIDB_SMJ(t1,t2,t3)*/ * from t t1 left outer join t t2 on t1.a = t2.a left outer join t t3 on t2.a = t3.a", + "Best": "MergeLeftOuterJoin{MergeLeftOuterJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->TableReader(Table(t))}(test.t.a,test.t.a)" + }, + { + "SQL": "select /*+ TIDB_SMJ(t1,t2,t3)*/ * from t t1 left outer join t t2 on t1.a = t2.a left outer join t t3 on t1.a = t3.a", + "Best": "MergeLeftOuterJoin{MergeLeftOuterJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->TableReader(Table(t))}(test.t.a,test.t.a)" + } + ] + }, + { + "Name": "TestDAGPlanTopN", + "Cases": [ + { + "SQL": "select * from t t1 left join t t2 on t1.b = t2.b left join t t3 on t2.b = t3.b order by t1.a limit 1", + "Best": "LeftHashJoin{LeftHashJoin{TableReader(Table(t)->Limit)->Limit->TableReader(Table(t))}(test.t.b,test.t.b)->TopN([test.t.a],0,1)->TableReader(Table(t))}(test.t.b,test.t.b)->TopN([test.t.a],0,1)" + }, + { + "SQL": "select * from t t1 left join t t2 on t1.b = t2.b left join t t3 on t2.b = t3.b order by t1.b limit 1", + "Best": "LeftHashJoin{LeftHashJoin{TableReader(Table(t)->TopN([test.t.b],0,1))->TopN([test.t.b],0,1)->TableReader(Table(t))}(test.t.b,test.t.b)->TopN([test.t.b],0,1)->TableReader(Table(t))}(test.t.b,test.t.b)->TopN([test.t.b],0,1)" + }, + { + "SQL": "select * from t t1 left join t t2 on t1.b = t2.b left join t t3 on t2.b = t3.b limit 1", + "Best": "LeftHashJoin{LeftHashJoin{TableReader(Table(t)->Limit)->Limit->TableReader(Table(t))}(test.t.b,test.t.b)->Limit->TableReader(Table(t))}(test.t.b,test.t.b)->Limit" + }, + { + "SQL": "select * from t where b = 1 and c = 1 order by c limit 1", + "Best": "IndexLookUp(Index(t.c_d_e)[[1,1]], Table(t)->Sel([eq(test.t.b, 1)]))->Limit" + }, + { + "SQL": "select * from t where c = 1 order by c limit 1", + "Best": "IndexLookUp(Index(t.c_d_e)[[1,1]]->Limit, Table(t))->Limit" + }, + { + "SQL": "select * from t order by a limit 1", + "Best": "TableReader(Table(t)->Limit)->Limit" + }, + { + "SQL": "select c from t order by c limit 1", + "Best": "IndexReader(Index(t.c_d_e)[[NULL,+inf]]->Limit)->Limit" + } + ] + }, + { + "Name": "TestDAGPlanBuilderBasePhysicalPlan", + "Cases": [ + { + "SQL": "delete from t where b < 1 order by d limit 1", + "Best": "TableReader(Table(t)->Sel([lt(test.t.b, 1)])->TopN([test.t.d],0,1))->TopN([test.t.d],0,1)->Delete" + }, + { + "SQL": "delete from t", + "Best": "TableReader(Table(t))->Delete" + }, + { + "SQL": "delete from t use index(c_d_e) where b = 1", + "Best": "IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t)->Sel([eq(test.t.b, 1)]))->Delete" + }, + { + "SQL": "insert into t select * from t where b < 1 order by d limit 1", + "Best": "TableReader(Table(t)->Sel([lt(test.t.b, 1)])->TopN([test.t.d],0,1))->TopN([test.t.d],0,1)->Insert" + }, + { + "SQL": "insert into t (a, b, c, e, f, g) values(0,0,0,0,0,0)", + "Best": "Insert" + }, + { + "SQL": "select 1", + "Best": "Dual->Projection" + }, + { + "SQL": "select * from t where false", + "Best": "Dual" + }, + { + "SQL": "show tables", + "Best": "Show" + } + ] + }, + { + "Name": "TestDAGPlanBuilderUnionScan", + "Cases": null + }, + { + "Name": "TestDAGPlanBuilderAgg", + "Cases": [ + { + "SQL": "select sum(a), avg(b + c) from t group by d", + "Best": "TableReader(Table(t)->HashAgg)->HashAgg" + }, + { + "SQL": "select sum(e), avg(e + c) from t where c = 1 group by (c + d)", + "Best": "IndexReader(Index(t.c_d_e)[[1,1]]->HashAgg)->HashAgg" + }, + { + "SQL": "select sum(e), avg(e + c) from t where c = 1 group by c", + "Best": "IndexReader(Index(t.c_d_e)[[1,1]]->HashAgg)->HashAgg" + }, + { + "SQL": "select sum(e), avg(e + c) from t where c = 1 group by e", + "Best": "IndexReader(Index(t.c_d_e)[[1,1]]->HashAgg)->HashAgg" + }, + { + "SQL": "select sum(e), avg(b + c) from t where c = 1 and e = 1 group by d", + "Best": "IndexLookUp(Index(t.c_d_e)[[1,1]]->Sel([eq(test.t.e, 1)]), Table(t))->Projection->HashAgg" + }, + { + "SQL": "select sum(e), avg(b + c) from t where c = 1 and b = 1", + "Best": "IndexLookUp(Index(t.c_d_e)[[1,1]], Table(t)->Sel([eq(test.t.b, 1)]))->Projection->HashAgg" + }, + { + "SQL": "select sum(e) as k, avg(b + c) from t where c = 1 and b = 1 and e = 1 group by d order by k", + "Best": "IndexLookUp(Index(t.c_d_e)[[1,1]]->Sel([eq(test.t.e, 1)]), Table(t)->Sel([eq(test.t.b, 1)]))->Projection->HashAgg->Sort" + }, + { + "SQL": "select sum(e) as k, avg(b + c) from t where c = 1 and b = 1 and e = 1 group by c order by k", + "Best": "IndexLookUp(Index(t.c_d_e)[[1,1]]->Sel([eq(test.t.e, 1)]), Table(t)->Sel([eq(test.t.b, 1)]))->Projection->HashAgg->Sort" + }, + { + "SQL": "select sum(e+1) from t group by e,d,c order by c", + "Best": "IndexReader(Index(t.c_d_e)[[NULL,+inf]]->HashAgg)->HashAgg->Sort->Projection" + }, + { + "SQL": "select sum(e+1) from t group by e,d,c order by c,e", + "Best": "IndexReader(Index(t.c_d_e)[[NULL,+inf]]->HashAgg)->HashAgg->Sort->Projection" + }, + { + "SQL": "select count(*) from t group by g order by g limit 10", + "Best": "IndexReader(Index(t.g)[[NULL,+inf]]->HashAgg)->HashAgg->TopN([test.t.g],0,10)->Projection" + }, + { + "SQL": "select count(*) from t group by g limit 10", + "Best": "IndexReader(Index(t.g)[[NULL,+inf]]->HashAgg)->HashAgg->Limit" + }, + { + "SQL": "select count(*) from t group by g order by g", + "Best": "IndexReader(Index(t.g)[[NULL,+inf]]->HashAgg)->HashAgg->Sort->Projection" + }, + { + "SQL": "select count(*) from t group by g order by g desc limit 1", + "Best": "IndexReader(Index(t.g)[[NULL,+inf]]->HashAgg)->HashAgg->TopN([test.t.g true],0,1)->Projection" + }, + { + "SQL": "select count(*) from t group by b order by b limit 10", + "Best": "TableReader(Table(t)->HashAgg)->HashAgg->TopN([test.t.b],0,10)->Projection" + }, + { + "SQL": "select count(*) from t group by b order by b", + "Best": "TableReader(Table(t)->HashAgg)->HashAgg->Sort->Projection" + }, + { + "SQL": "select count(*) from t group by b limit 10", + "Best": "TableReader(Table(t)->HashAgg)->HashAgg->Limit" + }, + { + "SQL": "select sum(a.g), sum(b.g) from t a join t b on a.g = b.g group by a.g", + "Best": "MergeInnerJoin{IndexReader(Index(t.g)[[NULL,+inf]])->IndexReader(Index(t.g)[[NULL,+inf]])}(test.t.g,test.t.g)->HashAgg" + }, + { + "SQL": "select /*+ tidb_inlj(a,b) */ sum(a.g), sum(b.g) from t a join t b on a.g = b.g and a.g > 60 group by a.g order by a.g limit 1", + "Best": "MergeInnerJoin{IndexReader(Index(t.g)[(60,+inf]])->IndexReader(Index(t.g)[(60,+inf]])}(test.t.g,test.t.g)->HashAgg->TopN([test.t.g],0,1)->Projection" + }, + { + "SQL": "select sum(a.g), sum(b.g) from t a join t b on a.g = b.g and a.a>5 group by a.g order by a.g limit 1", + "Best": "RightHashJoin{TableReader(Table(t))->IndexReader(Index(t.g)[[NULL,+inf]])}(test.t.g,test.t.g)->HashAgg->TopN([test.t.g],0,1)->Projection" + }, + { + "SQL": "select sum(d) from t", + "Best": "IndexReader(Index(t.c_d_e)[[NULL,+inf]]->HashAgg)->HashAgg" + } + ] + }, + { + "Name": "TestAggEliminator", + "Cases": [ + { + "SQL": "select max(a) from t;", + "Best": "TableReader(Table(t)->Limit)->Limit->HashAgg" + }, + { + "SQL": "select min(a) from t;", + "Best": "TableReader(Table(t)->Limit)->Limit->HashAgg" + }, + { + "SQL": "select min(c_str) from t;", + "Best": "IndexReader(Index(t.c_d_e_str)[[-inf,+inf]]->Limit)->Limit->HashAgg" + }, + { + "SQL": "select max(a), b from t;", + "Best": "TableReader(Table(t)->HashAgg)->HashAgg" + }, + { + "SQL": "select max(a+1) from t;", + "Best": "IndexReader(Index(t.f)[[NULL,+inf]]->Sel([not(isnull(plus(test.t.a, 1)))])->TopN([plus(test.t.a, 1) true],0,1))->Projection->TopN([Column#40 true],0,1)->Projection->Projection->HashAgg" + }, + { + "SQL": "select max(a), min(a) from t;", + "Best": "LeftHashJoin{TableReader(Table(t)->Limit)->Limit->HashAgg->TableReader(Table(t)->Limit)->Limit->HashAgg}" + }, + { + "SQL": "select max(a), min(a) from t where a > 10", + "Best": "LeftHashJoin{TableReader(Table(t)->Limit)->Limit->HashAgg->TableReader(Table(t)->Limit)->Limit->HashAgg}" + }, + { + "SQL": "select max(d), min(d) from t where c = 1 and d > 10", + "Best": "LeftHashJoin{IndexReader(Index(t.c_d_e)[(1 10,1 +inf]]->Limit)->Limit->HashAgg->IndexReader(Index(t.c_d_e)[(1 10,1 +inf]]->Limit)->Limit->HashAgg}" + }, + { + "SQL": "select max(a), max(c), min(f) from t", + "Best": "LeftHashJoin{LeftHashJoin{TableReader(Table(t)->Limit)->Limit->HashAgg->IndexReader(Index(t.c_d_e)[[NULL,+inf]]->Limit)->Limit->HashAgg}->IndexReader(Index(t.f)[[NULL,+inf]]->Limit)->Limit->HashAgg}" + }, + { + "SQL": "select max(a), max(b) from t", + "Best": "TableReader(Table(t)->HashAgg)->HashAgg" + }, + { + "SQL": "select max(a), max(c) from t where c > 10", + "Best": "IndexReader(Index(t.c_d_e)[(10,+inf]]->HashAgg)->HashAgg" + }, + { + "SQL": "select max(a), min(a) from t where a * 3 + 10 < 100", + "Best": "IndexReader(Index(t.f)[[NULL,+inf]]->Sel([lt(plus(mul(test.t.a, 3), 10), 100)])->HashAgg)->HashAgg" + }, + { + "SQL": "select max(a) from t group by b;", + "Best": "TableReader(Table(t)->HashAgg)->HashAgg" + }, + { + "SQL": "select max(a) from (select t1.a from t t1 join t t2 on t1.a=t2.a) t", + "Best": "LeftHashJoin{IndexReader(Index(t.f)[[NULL,+inf]])->IndexReader(Index(t.f)[[NULL,+inf]])}(test.t.a,test.t.a)->TopN([test.t.a true],0,1)->HashAgg" + } + ] + } +] diff --git a/planner/core/testdata/plan_suite_unexported_in.json b/planner/core/testdata/plan_suite_unexported_in.json new file mode 100644 index 0000000..130c9cd --- /dev/null +++ b/planner/core/testdata/plan_suite_unexported_in.json @@ -0,0 +1,207 @@ +[ + { + "name": "TestEagerAggregation", + "cases": [ + "select sum(t.a), sum(t.a+1), sum(t.a), count(t.a), sum(t.a) + count(t.a) from t", + "select sum(t.a + t.b), sum(t.a + t.c), sum(t.a + t.b), count(t.a) from t having sum(t.a + t.b) > 0 order by sum(t.a + t.c)", + "select sum(a.a) from t a, t b where a.c = b.c", + "select sum(b.a) from t a, t b where a.c = b.c", + "select sum(b.a), a.a from t a, t b where a.c = b.c", + "select sum(a.a), b.a from t a, t b where a.c = b.c", + "select sum(a.a), sum(b.a) from t a, t b where a.c = b.c", + "select sum(a.a), max(b.a) from t a, t b where a.c = b.c", + "select max(a.a), sum(b.a) from t a, t b where a.c = b.c", + "select sum(a.a) from t a, t b, t c where a.c = b.c and b.c = c.c", + "select sum(b.a) from t a left join t b on a.c = b.c", + "select sum(a.a) from t a left join t b on a.c = b.c", + "select sum(a.a) from t a right join t b on a.c = b.c", + "select sum(a) from (select * from t) x", + "select max(a.b), max(b.b) from t a join t b on a.c = b.c group by a.a", + "select max(a.b), max(b.b) from t a join t b on a.a = b.a group by a.c", + "select max(a.c) from t a join t b on a.a=b.a and a.b=b.b group by a.b", + "select t1.a, count(t2.b) from t t1, t t2 where t1.a = t2.a group by t1.a" + ] + }, + { + "name": "TestPlanBuilder", + "cases": [ + "delete from t where t.a >= 1000 order by t.a desc limit 10", + // The correctness of explain result is checked at integration test. There is to improve coverage. + "explain select /*+ TIDB_INLJ(t1, t2) */ * from t t1 left join t t2 on t1.a=t2.a where t1.b=1 and t2.b=1 and (t1.c=1 or t2.c=1)", + "explain select /*+ TIDB_HJ(t1, t2) */ * from t t1 left join t t2 on t1.a=t2.a where t1.b=1 and t2.b=1 and (t1.c=1 or t2.c=1)", + "explain select /*+ TIDB_SMJ(t1, t2) */ * from t t1 right join t t2 on t1.a=t2.a where t1.b=1 and t2.b=1 and (t1.c=1 or t2.c=1)", + "explain format=\"dot\" select /*+ TIDB_SMJ(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", + "explain select * from t order by b", + "explain select * from t order by b limit 1", + "explain format=\"dot\" select * from t order by a", + "insert into t select * from t", + "select * from t t1, t t2 where 1 = 0" + ] + }, + { + "name": "TestPredicatePushDown", + "cases": [ + "select count(*) from t a, t b where a.a = b.a", + "select * from t ta join t tb on ta.d = tb.d and ta.d > 1 where tb.a = 0", + "select * from t ta join t tb on ta.d = tb.d where ta.d > 1 and tb.a = 0", + "select * from t ta left outer join t tb on ta.d = tb.d and ta.d > 1 where tb.a = 0", + "select * from t ta right outer join t tb on ta.d = tb.d and ta.a > 1 where tb.a = 0", + "select * from t ta left outer join t tb on ta.d = tb.d and ta.a > 1 where ta.d = 0", + "select * from t ta left outer join t tb on ta.d = tb.d and ta.a > 1 where tb.d = 0", + "select * from t ta left outer join t tb on ta.d = tb.d and ta.a > 1 where tb.c is not null and tb.c = 0 and ifnull(tb.d, 1)", + "select * from t ta left outer join t tb on ta.a = tb.a left outer join t tc on tb.b = tc.b where tc.c > 0", + "select * from t ta left outer join t tb on ta.a = tb.a left outer join t tc on tc.b = ta.b where tb.c > 0", + "select * from t ta left outer join t tb on ta.d = tb.d and ta.a > 1 where ifnull(tb.d, 1) or tb.d is null", + "select a, count(a) cnt from t group by a having cnt < 1", + // issue #3873 + "select t1.a, t2.a from t as t1 left join t as t2 on t1.a = t2.a where t1.a < 1.0", + // issue #7728 + "select * from t t1 join t t2 on t1.a = t2.a where t2.a = null" + ] + }, + { + "name": "TestTopNPushDown", + "cases": [ + // Test TopN + Selection. + "select * from t where a < 1 order by b limit 5", + // Test Limit + Selection. + "select * from t where a < 1 limit 5", + // Test Limit + Agg + Proj . + "select a, count(b) from t group by b limit 5", + // Test TopN + Agg + Proj . + "select a, count(b) from t group by b order by c limit 5", + // Test TopN + Join + Proj. + "select * from t, t s order by t.a limit 5", + // Test Limit + Join + Proj. + "select * from t, t s limit 5", + // Test TopN + Left Join + Proj. + "select * from t left outer join t s on t.a = s.a order by t.a limit 5", + // Test TopN + Left Join + Proj. + "select * from t left outer join t s on t.a = s.a order by t.a limit 5, 5", + // Test Limit + Left Join + Proj. + "select * from t left outer join t s on t.a = s.a limit 5", + // Test TopN + Right Join + Proj. + "select * from t right outer join t s on t.a = s.a order by s.a limit 5", + // Test Limit + Right Join + Proj. + "select * from t right outer join t s on t.a = s.a order by s.a,t.b limit 5", + // Test `ByItem` containing column from both sides. + "select ifnull(t1.b, t2.a) from t t1 left join t t2 on t1.e=t2.e order by ifnull(t1.b, t2.a) limit 5", + // Test ifnull cannot be eliminated + "select ifnull(t1.h, t2.b) from t t1 left join t t2 on t1.e=t2.e order by ifnull(t1.h, t2.b) limit 5" + ] + }, + { + "name": "TestUniqueKeyInfo", + "cases": [ + "select a, sum(e) from t group by b", + "select a, b, sum(f) from t group by b", + "select c, d, e, sum(a) from t group by c, d, e", + "select f, g, sum(a) from t", + "select * from t t1 join t t2 on t1.a = t2.e", + "select f from t having sum(a) > 0", + "select * from t t1 left join t t2 on t1.a = t2.a" + ] + }, + { + "name": "TestAggPrune", + "cases": [ + "select a, count(b) from t group by a", + "select sum(b) from t group by c, d, e", + "select tt.a, sum(tt.b) from (select a, b from t) tt group by tt.a", + "select count(1) from (select count(1), a as b from t group by a) tt group by b", + "select a, count(b) from t group by a" + ] + }, + { + "name": "TestColumnPruning", + "cases": [ + "select count(*) from t group by a", + "select count(*) from t", + "select count(*) from t a join t b where a.a < 1", + "select count(*) from t a join t b on a.a = b.d", + "select count(*) from t a join t b on a.a = b.d order by sum(a.d)", + "select count(b.a) from t a join t b on a.a = b.d group by b.b order by sum(a.d)", + "select a as c1, b as c2 from t order by 1, c1 + c2 + c" + ] + }, + { + "name": "TestDeriveNotNullConds", + "cases": [ + "select * from t t1 inner join t t2 on t1.e = t2.e", + "select * from t t1 inner join t t2 on t1.e > t2.e", + "select * from t t1 inner join t t2 on t1.e = t2.e and t1.e is not null", + "select * from t t1 left join t t2 on t1.e = t2.e", + "select * from t t1 left join t t2 on t1.e > t2.e", + "select * from t t1 left join t t2 on t1.e = t2.e and t2.e is not null", + "select * from t t1 right join t t2 on t1.e = t2.e and t1.e is not null", + // Not deriving if column has NotNull flag already. + "select * from t t1 inner join t t2 on t1.b = t2.b", + "select * from t t1 left join t t2 on t1.b = t2.b", + "select * from t t1 left join t t2 on t1.b > t2.b" + ] + }, + { + "name": "TestJoinPredicatePushDown", + "cases": [ + // issue #7628, inner join + "select * from t as t1 join t as t2 on t1.b = t2.b where t1.a > t2.a", + "select * from t as t1 join t as t2 on t1.b = t2.b where t1.a=1 or t2.a=1", + "select * from t as t1 join t as t2 on t1.b = t2.b where (t1.a=1 and t2.a=1) or (t1.a=2 and t2.a=2)", + "select * from t as t1 join t as t2 on t1.b = t2.b where (t1.c=1 and (t1.a=3 or t2.a=3)) or (t1.a=2 and t2.a=2)", + "select * from t as t1 join t as t2 on t1.b = t2.b where (t1.c=1 and ((t1.a=3 and t2.a=3) or (t1.a=4 and t2.a=4)))", + "select * from t as t1 join t as t2 on t1.b = t2.b where (t1.a>1 and t1.a < 3 and t2.a=1) or (t1.a=2 and t2.a=2)", + "select * from t as t1 join t as t2 on t1.b = t2.b and ((t1.a=1 and t2.a=1) or (t1.a=2 and t2.a=2))", + // issue #7628, left join + "select * from t as t1 left join t as t2 on t1.b = t2.b and ((t1.a=1 and t2.a=1) or (t1.a=2 and t2.a=2))", + "select * from t as t1 left join t as t2 on t1.b = t2.b and t1.a > t2.a", + "select * from t as t1 left join t as t2 on t1.b = t2.b and (t1.a=1 or t2.a=1)", + "select * from t as t1 left join t as t2 on t1.b = t2.b and ((t1.c=1 and (t1.a=3 or t2.a=3)) or (t1.a=2 and t2.a=2))", + "select * from t as t1 left join t as t2 on t1.b = t2.b and ((t2.c=1 and (t1.a=3 or t2.a=3)) or (t1.a=2 and t2.a=2))", + "select * from t as t1 left join t as t2 on t1.b = t2.b and ((t1.c=1 and ((t1.a=3 and t2.a=3) or (t1.a=4 and t2.a=4))) or (t1.a=2 and t2.a=2))", + // Duplicate condition would be removed. + "select * from t t1 join t t2 on t1.a > 1 and t1.a > 1" + ] + }, + { + "name": "TestJoinReOrder", + "cases": [ + "select * from t t1, t t2, t t3, t t4, t t5, t t6 where t1.a = t2.b and t2.a = t3.b and t3.c = t4.a and t4.d = t2.c and t5.d = t6.d", + "select * from t t1, t t2, t t3, t t4, t t5, t t6, t t7, t t8 where t1.a = t8.a", + "select * from t t1, t t2, t t3, t t4, t t5 where t1.a = t5.a and t5.a = t4.a and t4.a = t3.a and t3.a = t2.a and t2.a = t1.a and t1.a = t3.a and t2.a = t4.a and t5.b < 8", + "select * from t t1, t t2, t t3, t t4, t t5 where t1.a = t5.a and t5.a = t4.a and t4.a = t3.a and t3.a = t2.a and t2.a = t1.a and t1.a = t3.a and t2.a = t4.a and t3.b = 1 and t4.a = 1" + ] + }, + { + "name": "TestOuterJoinEliminator", + "cases": [ + // Test duplicate agnostic agg functions on join + "select max(t1.a), min(test.t1.b) from t t1 left join t t2 on t1.b = t2.b", + // Test left outer join + "select t1.b from t t1 left outer join t t2 on t1.a = t2.a", + // Test right outer join + "select t2.b from t t1 right outer join t t2 on t1.a = t2.a", + // For complex join query + "select max(t3.b) from (t t1 left join t t2 on t1.a = t2.a) right join t t3 on t1.b = t3.b", + "select t1.a ta, t1.b tb from t t1 left join t t2 on t1.a = t2.a", + // Because the `order by` uses t2.a, the `join` can't be eliminated. + "select t1.a, t1.b from t t1 left join t t2 on t1.a = t2.a order by t2.a" + ] + }, + { + "name": "TestSimplifyOuterJoin", + "cases": [ + "select * from t t1 left join t t2 on t1.b = t2.b where t1.c > 1 or t2.c > 1;", + "select * from t t1 left join t t2 on t1.b = t2.b where t1.c > 1 and t2.c > 1;", + "select * from t t1 left join t t2 on t1.b > 1 where t1.c = t2.c;" + ] + }, + { + "name": "TestOuterWherePredicatePushDown", + "cases": [ + // issue #7628, left join with where condition + "select * from t as t1 left join t as t2 on t1.b = t2.b where (t1.a=1 and t2.a is null) or (t1.a=2 and t2.a=2)", + "select * from t as t1 left join t as t2 on t1.b = t2.b where (t1.c=1 and (t1.a=3 or t2.a=3)) or (t1.a=2 and t2.a=2)", + "select * from t as t1 left join t as t2 on t1.b = t2.b where (t1.c=1 and ((t1.a=3 and t2.a=3) or (t1.a=4 and t2.a=4))) or (t1.a=2 and t2.a is null)" + ] + } +] diff --git a/planner/core/testdata/plan_suite_unexported_out.json b/planner/core/testdata/plan_suite_unexported_out.json new file mode 100644 index 0000000..b33441f --- /dev/null +++ b/planner/core/testdata/plan_suite_unexported_out.json @@ -0,0 +1,549 @@ +[ + { + "Name": "TestEagerAggregation", + "Cases": [ + "DataScan(t)->Aggr(sum(test.t.a),sum(plus(test.t.a, 1)),count(test.t.a))->Projection", + "DataScan(t)->Aggr(sum(plus(test.t.a, test.t.b)),sum(plus(test.t.a, test.t.c)),count(test.t.a))->Sel([gt(Column#13, 0)])->Projection->Sort->Projection", + "Join{DataScan(a)->Aggr(sum(test.t.a),firstrow(test.t.c))->DataScan(b)}(test.t.c,test.t.c)->Aggr(sum(Column#26))->Projection", + "Join{DataScan(a)->DataScan(b)->Aggr(sum(test.t.a),firstrow(test.t.c))}(test.t.c,test.t.c)->Aggr(sum(Column#26))->Projection", + "Join{DataScan(a)->DataScan(b)->Aggr(sum(test.t.a),firstrow(test.t.c))}(test.t.c,test.t.c)->Aggr(sum(Column#26),firstrow(test.t.a))->Projection", + "Join{DataScan(a)->Aggr(sum(test.t.a),firstrow(test.t.c))->DataScan(b)}(test.t.c,test.t.c)->Aggr(sum(Column#26),firstrow(test.t.a))->Projection", + "Join{DataScan(a)->DataScan(b)}(test.t.c,test.t.c)->Aggr(sum(test.t.a),sum(test.t.a))->Projection", + "Join{DataScan(a)->Aggr(sum(test.t.a),firstrow(test.t.c))->DataScan(b)}(test.t.c,test.t.c)->Aggr(sum(Column#27),max(test.t.a))->Projection", + "Join{DataScan(a)->DataScan(b)->Aggr(sum(test.t.a),firstrow(test.t.c))}(test.t.c,test.t.c)->Aggr(max(test.t.a),sum(Column#27))->Projection", + "Join{Join{DataScan(a)->DataScan(b)}(test.t.c,test.t.c)->DataScan(c)}(test.t.c,test.t.c)->Aggr(sum(test.t.a))->Projection", + "Join{DataScan(a)->DataScan(b)->Aggr(sum(test.t.a),firstrow(test.t.c))}(test.t.c,test.t.c)->Aggr(sum(Column#26))->Projection", + "Join{DataScan(a)->Aggr(sum(test.t.a),firstrow(test.t.c))->DataScan(b)}(test.t.c,test.t.c)->Aggr(sum(Column#26))->Projection", + "Join{DataScan(a)->Aggr(sum(test.t.a),firstrow(test.t.c))->DataScan(b)}(test.t.c,test.t.c)->Aggr(sum(Column#26))->Projection", + "DataScan(t)->Aggr(sum(test.t.a))->Projection", + "Join{DataScan(a)->DataScan(b)->Aggr(max(test.t.b),firstrow(test.t.c))}(test.t.c,test.t.c)->Projection->Projection", + "Join{DataScan(a)->DataScan(b)}(test.t.a,test.t.a)->Aggr(max(test.t.b),max(test.t.b))->Projection", + "Join{DataScan(a)->DataScan(b)}(test.t.a,test.t.a)(test.t.b,test.t.b)->Aggr(max(test.t.c))->Projection", + "Join{DataScan(t1)->DataScan(t2)}(test.t.a,test.t.a)->Projection->Projection" + ] + }, + { + "Name": "TestPlanBuilder", + "Cases": [ + "TableReader(Table(t)->Limit)->Limit->Delete", + "*core.Explain", + "*core.Explain", + "*core.Explain", + "*core.Explain", + "*core.Explain", + "*core.Explain", + "*core.Explain", + "TableReader(Table(t))->Insert", + "Dual->Projection" + ] + }, + { + "Name": "TestPredicatePushDown", + "Cases": [ + "Join{DataScan(a)->DataScan(b)}(test.t.a,test.t.a)->Aggr(count(1))->Projection", + "Join{DataScan(ta)->DataScan(tb)}(test.t.d,test.t.d)->Projection", + "Join{DataScan(ta)->DataScan(tb)}(test.t.d,test.t.d)->Projection", + "Join{DataScan(ta)->DataScan(tb)}(test.t.d,test.t.d)->Projection", + "Join{DataScan(ta)->DataScan(tb)}(test.t.d,test.t.d)->Projection", + "Join{DataScan(ta)->DataScan(tb)}->Projection", + "Join{DataScan(ta)->DataScan(tb)}->Projection", + "Join{DataScan(ta)->DataScan(tb)}(test.t.d,test.t.d)->Projection", + "Join{Join{DataScan(ta)->DataScan(tb)}(test.t.a,test.t.a)->DataScan(tc)}(test.t.b,test.t.b)->Projection", + "Join{Join{DataScan(ta)->DataScan(tb)}(test.t.a,test.t.a)->DataScan(tc)}(test.t.b,test.t.b)->Projection", + "Join{DataScan(ta)->DataScan(tb)}(test.t.d,test.t.d)->Sel([or(ifnull(test.t.d, 1), isnull(test.t.d))])->Projection", + "DataScan(t)->Aggr(count(test.t.a),firstrow(test.t.a))->Sel([lt(Column#13, 1)])->Projection", + "Join{DataScan(t1)->DataScan(t2)}(test.t.a,test.t.a)->Projection", + "Dual->Projection" + ] + }, + { + "Name": "TestTopNPushDown", + "Cases": [ + "DataScan(t)->TopN([test.t.b],0,5)->Projection", + "DataScan(t)->Limit->Projection", + "DataScan(t)->Aggr(count(test.t.b),firstrow(test.t.a))->Limit->Projection", + "DataScan(t)->Aggr(count(test.t.b),firstrow(test.t.a),firstrow(test.t.c))->TopN([test.t.c],0,5)->Projection", + "Join{DataScan(t)->DataScan(s)}->TopN([test.t.a],0,5)->Projection", + "Join{DataScan(t)->DataScan(s)}->Limit->Projection", + "Join{DataScan(t)->TopN([test.t.a],0,5)->DataScan(s)}(test.t.a,test.t.a)->TopN([test.t.a],0,5)->Projection", + "Join{DataScan(t)->TopN([test.t.a],0,10)->DataScan(s)}(test.t.a,test.t.a)->TopN([test.t.a],5,5)->Projection", + "Join{DataScan(t)->Limit->DataScan(s)}(test.t.a,test.t.a)->Limit->Projection", + "Join{DataScan(t)->DataScan(s)->TopN([test.t.a],0,5)}(test.t.a,test.t.a)->TopN([test.t.a],0,5)->Projection", + "Join{DataScan(t)->DataScan(s)}(test.t.a,test.t.a)->TopN([test.t.a test.t.b],0,5)->Projection", + "Join{DataScan(t1)->TopN([test.t.b],0,5)->DataScan(t2)}(test.t.e,test.t.e)->TopN([test.t.b],0,5)->Projection", + "Join{DataScan(t1)->DataScan(t2)}(test.t.e,test.t.e)->TopN([ifnull(test.t.h, test.t.b)],0,5)->Projection->Projection" + ] + }, + { + "Name": "TestUniqueKeyInfo", + "Cases": [ + { + "1": [ + [ + "test.t.a" + ] + ], + "2": [ + [ + "test.t.a" + ] + ], + "3": [ + [ + "test.t.a" + ] + ] + }, + { + "1": [ + [ + "test.t.f" + ], + [ + "test.t.a" + ] + ], + "2": [ + [ + "test.t.a" + ], + [ + "test.t.b" + ] + ], + "3": [ + [ + "test.t.a" + ], + [ + "test.t.b" + ] + ] + }, + { + "1": [ + [ + "test.t.a" + ] + ], + "2": [ + [ + "test.t.c", + "test.t.d", + "test.t.e" + ] + ], + "3": [ + [ + "test.t.c", + "test.t.d", + "test.t.e" + ] + ] + }, + { + "1": [ + [ + "test.t.f" + ], + [ + "test.t.f", + "test.t.g" + ], + [ + "test.t.a" + ] + ], + "2": [ + [ + "test.t.f" + ], + [ + "test.t.f", + "test.t.g" + ] + ], + "3": [ + [ + "test.t.f" + ], + [ + "test.t.f", + "test.t.g" + ] + ] + }, + { + "1": [ + [ + "test.t.f" + ], + [ + "test.t.f", + "test.t.g" + ], + [ + "test.t.a" + ] + ], + "2": [ + [ + "test.t.f" + ], + [ + "test.t.f", + "test.t.g" + ], + [ + "test.t.a" + ] + ], + "3": [ + [ + "test.t.f" + ], + [ + "test.t.f", + "test.t.g" + ], + [ + "test.t.a" + ] + ], + "4": [ + [ + "test.t.f" + ], + [ + "test.t.f", + "test.t.g" + ], + [ + "test.t.a" + ] + ] + }, + { + "1": [ + [ + "test.t.f" + ], + [ + "test.t.a" + ] + ], + "2": [ + [ + "test.t.f" + ] + ], + "3": [ + [ + "test.t.f" + ] + ], + "5": [ + [ + "test.t.f" + ] + ], + "6": [ + [ + "test.t.f" + ] + ] + }, + { + "1": [ + [ + "test.t.f" + ], + [ + "test.t.f", + "test.t.g" + ], + [ + "test.t.a" + ] + ], + "2": [ + [ + "test.t.f" + ], + [ + "test.t.f", + "test.t.g" + ], + [ + "test.t.a" + ] + ], + "3": [ + [ + "test.t.f" + ], + [ + "test.t.f", + "test.t.g" + ], + [ + "test.t.a" + ] + ], + "4": [ + [ + "test.t.f" + ], + [ + "test.t.f", + "test.t.g" + ], + [ + "test.t.a" + ] + ] + } + ] + }, + { + "Name": "TestAggPrune", + "Cases": [ + "DataScan(t)->Projection", + "DataScan(t)->Aggr(sum(test.t.b))->Projection", + "DataScan(t)->Projection", + "DataScan(t)->Projection", + "DataScan(t)->Projection" + ] + }, + { + "Name": "TestColumnPruning", + "Cases": [ + { + "1": [ + "test.t.a" + ] + }, + { + "1": [ + "test.t.a" + ] + }, + { + "1": [ + "test.t.a" + ], + "2": [ + "test.t.a" + ] + }, + { + "1": [ + "test.t.a" + ], + "2": [ + "test.t.d" + ] + }, + { + "1": [ + "test.t.a", + "test.t.d" + ], + "2": [ + "test.t.d" + ] + }, + { + "1": [ + "test.t.a", + "test.t.d" + ], + "2": [ + "test.t.a", + "test.t.b", + "test.t.d" + ] + }, + { + "1": [ + "test.t.a", + "test.t.b", + "test.t.c" + ] + } + ] + }, + { + "Name": "TestDeriveNotNullConds", + "Cases": [ + { + "Plan": "Join{DataScan(t1)->DataScan(t2)}(test.t.e,test.t.e)->Projection", + "Left": "[not(isnull(test.t.e))]", + "Right": "[not(isnull(test.t.e))]" + }, + { + "Plan": "Join{DataScan(t1)->DataScan(t2)}->Projection", + "Left": "[not(isnull(test.t.e))]", + "Right": "[not(isnull(test.t.e))]" + }, + { + "Plan": "Join{DataScan(t1)->DataScan(t2)}(test.t.e,test.t.e)->Projection", + "Left": "[not(isnull(test.t.e))]", + "Right": "[not(isnull(test.t.e))]" + }, + { + "Plan": "Join{DataScan(t1)->DataScan(t2)}(test.t.e,test.t.e)->Projection", + "Left": "[]", + "Right": "[not(isnull(test.t.e))]" + }, + { + "Plan": "Join{DataScan(t1)->DataScan(t2)}->Projection", + "Left": "[]", + "Right": "[not(isnull(test.t.e))]" + }, + { + "Plan": "Join{DataScan(t1)->DataScan(t2)}(test.t.e,test.t.e)->Projection", + "Left": "[]", + "Right": "[not(isnull(test.t.e))]" + }, + { + "Plan": "Join{DataScan(t1)->DataScan(t2)}(test.t.e,test.t.e)->Projection", + "Left": "[not(isnull(test.t.e))]", + "Right": "[]" + }, + { + "Plan": "Join{DataScan(t1)->DataScan(t2)}(test.t.b,test.t.b)->Projection", + "Left": "[]", + "Right": "[]" + }, + { + "Plan": "Join{DataScan(t1)->DataScan(t2)}(test.t.b,test.t.b)->Projection", + "Left": "[]", + "Right": "[]" + }, + { + "Plan": "Join{DataScan(t1)->DataScan(t2)}->Projection", + "Left": "[]", + "Right": "[]" + } + ] + }, + { + "Name": "TestJoinPredicatePushDown", + "Cases": [ + { + "Left": "[]", + "Right": "[]" + }, + { + "Left": "[]", + "Right": "[]" + }, + { + "Left": "[or(eq(test.t.a, 1), eq(test.t.a, 2))]", + "Right": "[or(eq(test.t.a, 1), eq(test.t.a, 2))]" + }, + { + "Left": "[or(eq(test.t.c, 1), eq(test.t.a, 2))]", + "Right": "[]" + }, + { + "Left": "[eq(test.t.c, 1) or(eq(test.t.a, 3), eq(test.t.a, 4))]", + "Right": "[or(eq(test.t.a, 3), eq(test.t.a, 4))]" + }, + { + "Left": "[or(and(gt(test.t.a, 1), lt(test.t.a, 3)), eq(test.t.a, 2))]", + "Right": "[or(eq(test.t.a, 1), eq(test.t.a, 2))]" + }, + { + "Left": "[or(eq(test.t.a, 1), eq(test.t.a, 2))]", + "Right": "[or(eq(test.t.a, 1), eq(test.t.a, 2))]" + }, + { + "Left": "[]", + "Right": "[or(eq(test.t.a, 1), eq(test.t.a, 2))]" + }, + { + "Left": "[]", + "Right": "[]" + }, + { + "Left": "[]", + "Right": "[]" + }, + { + "Left": "[]", + "Right": "[]" + }, + { + "Left": "[]", + "Right": "[or(eq(test.t.c, 1), eq(test.t.a, 2))]" + }, + { + "Left": "[]", + "Right": "[or(or(eq(test.t.a, 3), eq(test.t.a, 4)), eq(test.t.a, 2))]" + }, + { + "Left": "[gt(test.t.a, 1)]", + "Right": "[]" + } + ] + }, + { + "Name": "TestJoinReOrder", + "Cases": [ + "Join{Join{Join{Join{DataScan(t1)->DataScan(t2)}(test.t.a,test.t.b)->DataScan(t3)}(test.t.a,test.t.b)->DataScan(t4)}(test.t.c,test.t.a)(test.t.c,test.t.d)->Join{DataScan(t5)->DataScan(t6)}(test.t.d,test.t.d)}->Projection", + "Join{Join{Join{Join{DataScan(t1)->DataScan(t8)}(test.t.a,test.t.a)->DataScan(t2)}->Join{DataScan(t3)->DataScan(t4)}}->Join{Join{DataScan(t5)->DataScan(t6)}->DataScan(t7)}}->Projection", + "Join{Join{Join{Join{DataScan(t5)->DataScan(t1)}(test.t.a,test.t.a)->DataScan(t2)}(test.t.a,test.t.a)->DataScan(t3)}(test.t.a,test.t.a)(test.t.a,test.t.a)->DataScan(t4)}(test.t.a,test.t.a)(test.t.a,test.t.a)(test.t.a,test.t.a)->Projection", + "Join{Join{Join{DataScan(t3)->DataScan(t1)}->Join{DataScan(t2)->DataScan(t4)}}->DataScan(t5)}->Projection" + ] + }, + { + "Name": "TestOuterJoinEliminator", + "Cases": [ + "DataScan(t1)->Aggr(max(test.t.a),min(test.t.b))->Projection", + "DataScan(t1)->Projection", + "DataScan(t2)->Projection", + "Join{Join{DataScan(t1)->DataScan(t2)}(test.t.a,test.t.a)->DataScan(t3)->TopN([test.t.b true],0,1)}(test.t.b,test.t.b)->TopN([test.t.b true],0,1)->Aggr(max(test.t.b))->Projection", + "DataScan(t1)->Projection", + "Join{DataScan(t1)->DataScan(t2)}(test.t.a,test.t.a)->Sort->Projection" + ] + }, + { + "Name": "TestSimplifyOuterJoin", + "Cases": [ + { + "Best": "Join{DataScan(t1)->DataScan(t2)}(test.t.b,test.t.b)->Sel([or(gt(test.t.c, 1), gt(test.t.c, 1))])->Projection", + "JoinType": "left outer join" + }, + { + "Best": "Join{DataScan(t1)->DataScan(t2)}(test.t.b,test.t.b)->Projection", + "JoinType": "inner join" + }, + { + "Best": "Join{DataScan(t1)->DataScan(t2)}(test.t.c,test.t.c)->Projection", + "JoinType": "inner join" + } + ] + }, + { + "Name": "TestOuterWherePredicatePushDown", + "Cases": [ + { + "Sel": "[or(and(eq(test.t.a, 1), isnull(test.t.a)), and(eq(test.t.a, 2), eq(test.t.a, 2)))]", + "Left": "[or(eq(test.t.a, 1), eq(test.t.a, 2))]", + "Right": "[]" + }, + { + "Sel": "[or(and(eq(test.t.c, 1), or(eq(test.t.a, 3), eq(test.t.a, 3))), and(eq(test.t.a, 2), eq(test.t.a, 2)))]", + "Left": "[or(eq(test.t.c, 1), eq(test.t.a, 2))]", + "Right": "[]" + }, + { + "Sel": "[or(and(eq(test.t.c, 1), or(and(eq(test.t.a, 3), eq(test.t.a, 3)), and(eq(test.t.a, 4), eq(test.t.a, 4)))), and(eq(test.t.a, 2), isnull(test.t.a)))]", + "Left": "[or(and(eq(test.t.c, 1), or(eq(test.t.a, 3), eq(test.t.a, 4))), eq(test.t.a, 2))]", + "Right": "[]" + } + ] + } +] diff --git a/planner/core/util.go b/planner/core/util.go new file mode 100644 index 0000000..e73bf73 --- /dev/null +++ b/planner/core/util.go @@ -0,0 +1,151 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/types" +) + +// AggregateFuncExtractor visits Expr tree. +// It converts ColunmNameExpr to AggregateFuncExpr and collects AggregateFuncExpr. +type AggregateFuncExtractor struct { + inAggregateFuncExpr bool + // AggFuncs is the collected AggregateFuncExprs. + AggFuncs []*ast.AggregateFuncExpr +} + +// Enter implements Visitor interface. +func (a *AggregateFuncExtractor) Enter(n ast.Node) (ast.Node, bool) { + switch n.(type) { + case *ast.AggregateFuncExpr: + a.inAggregateFuncExpr = true + case *ast.SelectStmt: + return n, true + } + return n, false +} + +// Leave implements Visitor interface. +func (a *AggregateFuncExtractor) Leave(n ast.Node) (ast.Node, bool) { + switch v := n.(type) { + case *ast.AggregateFuncExpr: + a.inAggregateFuncExpr = false + a.AggFuncs = append(a.AggFuncs, v) + } + return n, true +} + +// logicalSchemaProducer stores the schema for the logical plans who can produce schema directly. +type logicalSchemaProducer struct { + schema *expression.Schema + names types.NameSlice + baseLogicalPlan +} + +// Schema implements the Plan.Schema interface. +func (s *logicalSchemaProducer) Schema() *expression.Schema { + if s.schema == nil { + s.schema = expression.NewSchema() + } + return s.schema +} + +func (s *logicalSchemaProducer) OutputNames() types.NameSlice { + return s.names +} + +func (s *logicalSchemaProducer) SetOutputNames(names types.NameSlice) { + s.names = names +} + +// SetSchema implements the Plan.SetSchema interface. +func (s *logicalSchemaProducer) SetSchema(schema *expression.Schema) { + s.schema = schema +} + +func (s *logicalSchemaProducer) setSchemaAndNames(schema *expression.Schema, names types.NameSlice) { + s.schema = schema + s.names = names +} + +// physicalSchemaProducer stores the schema for the physical plans who can produce schema directly. +type physicalSchemaProducer struct { + schema *expression.Schema + basePhysicalPlan +} + +// Schema implements the Plan.Schema interface. +func (s *physicalSchemaProducer) Schema() *expression.Schema { + if s.schema == nil { + s.schema = expression.NewSchema() + } + return s.schema +} + +// SetSchema implements the Plan.SetSchema interface. +func (s *physicalSchemaProducer) SetSchema(schema *expression.Schema) { + s.schema = schema +} + +// baseSchemaProducer stores the schema for the base plans who can produce schema directly. +type baseSchemaProducer struct { + schema *expression.Schema + names types.NameSlice + basePlan +} + +// OutputNames returns the outputting names of each column. +func (s *baseSchemaProducer) OutputNames() types.NameSlice { + return s.names +} + +func (s *baseSchemaProducer) SetOutputNames(names types.NameSlice) { + s.names = names +} + +// Schema implements the Plan.Schema interface. +func (s *baseSchemaProducer) Schema() *expression.Schema { + if s.schema == nil { + s.schema = expression.NewSchema() + } + return s.schema +} + +// SetSchema implements the Plan.SetSchema interface. +func (s *baseSchemaProducer) SetSchema(schema *expression.Schema) { + s.schema = schema +} + +func (s *baseSchemaProducer) setSchemaAndNames(schema *expression.Schema, names types.NameSlice) { + s.schema = schema + s.names = names +} + +func buildLogicalJoinSchema(joinType JoinType, join LogicalPlan) *expression.Schema { + leftSchema := join.Children()[0].Schema() + newSchema := expression.MergeSchema(leftSchema, join.Children()[1].Schema()) + if joinType == LeftOuterJoin { + resetNotNullFlag(newSchema, leftSchema.Len(), newSchema.Len()) + } else if joinType == RightOuterJoin { + resetNotNullFlag(newSchema, 0, leftSchema.Len()) + } + return newSchema +} + +// BuildPhysicalJoinSchema builds the schema of PhysicalJoin from it's children's schema. +func BuildPhysicalJoinSchema(joinType JoinType, join PhysicalPlan) *expression.Schema { + return expression.MergeSchema(join.Children()[0].Schema(), join.Children()[1].Schema()) +} diff --git a/planner/implementation/base.go b/planner/implementation/base.go new file mode 100644 index 0000000..9f03346 --- /dev/null +++ b/planner/implementation/base.go @@ -0,0 +1,57 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package implementation + +import ( + plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/planner/memo" +) + +type baseImpl struct { + cost float64 + plan plannercore.PhysicalPlan +} + +func (impl *baseImpl) CalcCost(outCount float64, children ...memo.Implementation) float64 { + impl.cost = 0 + for _, child := range children { + impl.cost += child.GetCost() + } + return impl.cost +} + +func (impl *baseImpl) SetCost(cost float64) { + impl.cost = cost +} + +func (impl *baseImpl) GetCost() float64 { + return impl.cost +} + +func (impl *baseImpl) GetPlan() plannercore.PhysicalPlan { + return impl.plan +} + +func (impl *baseImpl) AttachChildren(children ...memo.Implementation) memo.Implementation { + childrenPlan := make([]plannercore.PhysicalPlan, len(children)) + for i, child := range children { + childrenPlan[i] = child.GetPlan() + } + impl.plan.SetChildren(childrenPlan...) + return impl +} + +func (impl *baseImpl) ScaleCostLimit(costLimit float64) float64 { + return costLimit +} diff --git a/planner/implementation/base_test.go b/planner/implementation/base_test.go new file mode 100644 index 0000000..8d55a38 --- /dev/null +++ b/planner/implementation/base_test.go @@ -0,0 +1,64 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package implementation + +import ( + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/parser" + "github.com/pingcap/tidb/parser/model" + plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/planner/memo" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/util/testleak" +) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + TestingT(t) +} + +var _ = Suite(&testImplSuite{}) + +type testImplSuite struct { + *parser.Parser + is infoschema.InfoSchema + sctx sessionctx.Context +} + +func (s *testImplSuite) SetUpSuite(c *C) { + testleak.BeforeTest() + s.is = infoschema.MockInfoSchema([]*model.TableInfo{plannercore.MockSignedTable()}) + s.sctx = plannercore.MockContext() + s.Parser = parser.New() +} + +func (s *testImplSuite) TearDownSuite(c *C) { + testleak.AfterTest(c)() +} + +func (s *testImplSuite) TestBaseImplementation(c *C) { + p := plannercore.PhysicalLimit{}.Init(s.sctx, nil, nil) + impl := &baseImpl{plan: p} + c.Assert(impl.GetPlan(), Equals, p) + + cost := impl.CalcCost(10, []memo.Implementation{}...) + c.Assert(cost, Equals, 0.0) + c.Assert(impl.GetCost(), Equals, 0.0) + + impl.SetCost(6.0) + c.Assert(impl.GetCost(), Equals, 6.0) +} diff --git a/planner/implementation/datasource.go b/planner/implementation/datasource.go new file mode 100644 index 0000000..0a6134d --- /dev/null +++ b/planner/implementation/datasource.go @@ -0,0 +1,172 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package implementation + +import ( + "github.com/pingcap/tidb/expression" + plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/planner/memo" + "github.com/pingcap/tidb/statistics" + "math" +) + +// TableDualImpl implementation of PhysicalTableDual. +type TableDualImpl struct { + baseImpl +} + +// NewTableDualImpl creates a new table dual Implementation. +func NewTableDualImpl(dual *plannercore.PhysicalTableDual) *TableDualImpl { + return &TableDualImpl{baseImpl{plan: dual}} +} + +// CalcCost calculates the cost of the table dual Implementation. +func (impl *TableDualImpl) CalcCost(outCount float64, children ...memo.Implementation) float64 { + return 0 +} + +// TableReaderImpl implementation of PhysicalTableReader. +type TableReaderImpl struct { + baseImpl + tblColHists *statistics.HistColl +} + +// NewTableReaderImpl creates a new table reader Implementation. +func NewTableReaderImpl(reader *plannercore.PhysicalTableReader, hists *statistics.HistColl) *TableReaderImpl { + base := baseImpl{plan: reader} + impl := &TableReaderImpl{ + baseImpl: base, + tblColHists: hists, + } + return impl +} + +// CalcCost calculates the cost of the table reader Implementation. +func (impl *TableReaderImpl) CalcCost(outCount float64, children ...memo.Implementation) float64 { + reader := impl.plan.(*plannercore.PhysicalTableReader) + width := impl.tblColHists.GetAvgRowSize(reader.Schema().Columns, false) + sessVars := reader.SCtx().GetSessionVars() + networkCost := outCount * sessVars.NetworkFactor * width + // copTasks are run in parallel, to make the estimated cost closer to execution time, we amortize + // the cost to cop iterator workers. According to `CopClient::Send`, the concurrency + // is Min(DistSQLScanConcurrency, numRegionsInvolvedInScan), since we cannot infer + // the number of regions involved, we simply use DistSQLScanConcurrency. + copIterWorkers := float64(sessVars.DistSQLScanConcurrency) + impl.cost = (networkCost + children[0].GetCost()) / copIterWorkers + return impl.cost +} + +// ScaleCostLimit implements Implementation interface. +func (impl *TableReaderImpl) ScaleCostLimit(costLimit float64) float64 { + reader := impl.plan.(*plannercore.PhysicalTableReader) + sessVars := reader.SCtx().GetSessionVars() + copIterWorkers := float64(sessVars.DistSQLScanConcurrency) + if math.MaxFloat64/copIterWorkers < costLimit { + return math.MaxFloat64 + } + return costLimit * copIterWorkers +} + +// TableScanImpl implementation of PhysicalTableScan. +type TableScanImpl struct { + baseImpl + tblColHists *statistics.HistColl + tblCols []*expression.Column +} + +// NewTableScanImpl creates a new table scan Implementation. +func NewTableScanImpl(ts *plannercore.PhysicalTableScan, cols []*expression.Column, hists *statistics.HistColl) *TableScanImpl { + base := baseImpl{plan: ts} + impl := &TableScanImpl{ + baseImpl: base, + tblColHists: hists, + tblCols: cols, + } + return impl +} + +// CalcCost calculates the cost of the table scan Implementation. +func (impl *TableScanImpl) CalcCost(outCount float64, children ...memo.Implementation) float64 { + ts := impl.plan.(*plannercore.PhysicalTableScan) + width := impl.tblColHists.GetTableAvgRowSize(impl.tblCols) + sessVars := ts.SCtx().GetSessionVars() + impl.cost = outCount * sessVars.ScanFactor * width + if ts.Desc { + impl.cost = outCount * sessVars.DescScanFactor * width + } + return impl.cost +} + +// IndexReaderImpl is the implementation of PhysicalIndexReader. +type IndexReaderImpl struct { + baseImpl + tblColHists *statistics.HistColl +} + +// ScaleCostLimit implements Implementation interface. +func (impl *IndexReaderImpl) ScaleCostLimit(costLimit float64) float64 { + reader := impl.plan.(*plannercore.PhysicalIndexReader) + sessVars := reader.SCtx().GetSessionVars() + copIterWorkers := float64(sessVars.DistSQLScanConcurrency) + if math.MaxFloat64/copIterWorkers < costLimit { + return math.MaxFloat64 + } + return costLimit * copIterWorkers +} + +// CalcCost implements Implementation interface. +func (impl *IndexReaderImpl) CalcCost(outCount float64, children ...memo.Implementation) float64 { + reader := impl.plan.(*plannercore.PhysicalIndexReader) + sessVars := reader.SCtx().GetSessionVars() + networkCost := outCount * sessVars.NetworkFactor * impl.tblColHists.GetAvgRowSize(children[0].GetPlan().Schema().Columns, true) + copIterWorkers := float64(sessVars.DistSQLScanConcurrency) + impl.cost = (networkCost + children[0].GetCost()) / copIterWorkers + return impl.cost +} + +// NewIndexReaderImpl creates a new IndexReader Implementation. +func NewIndexReaderImpl(reader *plannercore.PhysicalIndexReader, tblColHists *statistics.HistColl) *IndexReaderImpl { + return &IndexReaderImpl{ + baseImpl: baseImpl{plan: reader}, + tblColHists: tblColHists, + } +} + +// IndexScanImpl is the Implementation of PhysicalIndexScan. +type IndexScanImpl struct { + baseImpl + tblColHists *statistics.HistColl +} + +// CalcCost implements Implementation interface. +func (impl *IndexScanImpl) CalcCost(outCount float64, children ...memo.Implementation) float64 { + is := impl.plan.(*plannercore.PhysicalIndexScan) + sessVars := is.SCtx().GetSessionVars() + rowSize := impl.tblColHists.GetIndexAvgRowSize(is.Schema().Columns, is.Index.Unique) + cost := outCount * rowSize * sessVars.ScanFactor + if is.Desc { + cost = outCount * rowSize * sessVars.DescScanFactor + } + cost += float64(len(is.Ranges)) * sessVars.SeekFactor + impl.cost = cost + return impl.cost +} + +// NewIndexScanImpl creates a new IndexScan Implementation. +func NewIndexScanImpl(scan *plannercore.PhysicalIndexScan, tblColHists *statistics.HistColl) *IndexScanImpl { + return &IndexScanImpl{ + baseImpl: baseImpl{plan: scan}, + tblColHists: tblColHists, + } +} diff --git a/planner/implementation/join.go b/planner/implementation/join.go new file mode 100644 index 0000000..9ce8d2f --- /dev/null +++ b/planner/implementation/join.go @@ -0,0 +1,47 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package implementation + +import ( + plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/planner/memo" +) + +// HashJoinImpl is the implementation for PhysicalHashJoin. +type HashJoinImpl struct { + baseImpl +} + +// CalcCost implements Implementation CalcCost interface. +func (impl *HashJoinImpl) CalcCost(outCount float64, children ...memo.Implementation) float64 { + hashJoin := impl.plan.(*plannercore.PhysicalHashJoin) + // The children here are only used to calculate the cost. + hashJoin.SetChildren(children[0].GetPlan(), children[1].GetPlan()) + selfCost := hashJoin.GetCost(children[0].GetPlan().StatsCount(), children[1].GetPlan().StatsCount()) + impl.cost = selfCost + children[0].GetCost() + children[1].GetCost() + return impl.cost +} + +// AttachChildren implements Implementation AttachChildren interface. +func (impl *HashJoinImpl) AttachChildren(children ...memo.Implementation) memo.Implementation { + hashJoin := impl.plan.(*plannercore.PhysicalHashJoin) + hashJoin.SetChildren(children[0].GetPlan(), children[1].GetPlan()) + hashJoin.SetSchema(plannercore.BuildPhysicalJoinSchema(hashJoin.JoinType, hashJoin)) + return impl +} + +// NewHashJoinImpl creates a new HashJoinImpl. +func NewHashJoinImpl(hashJoin *plannercore.PhysicalHashJoin) *HashJoinImpl { + return &HashJoinImpl{baseImpl{plan: hashJoin}} +} diff --git a/planner/implementation/simple_plans.go b/planner/implementation/simple_plans.go new file mode 100644 index 0000000..2fd2d7f --- /dev/null +++ b/planner/implementation/simple_plans.go @@ -0,0 +1,146 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package implementation + +import ( + plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/planner/memo" +) + +// ProjectionImpl is the implementation of PhysicalProjection. +type ProjectionImpl struct { + baseImpl +} + +// NewProjectionImpl creates a new projection Implementation. +func NewProjectionImpl(proj *plannercore.PhysicalProjection) *ProjectionImpl { + return &ProjectionImpl{baseImpl{plan: proj}} +} + +// ShowImpl is the Implementation of PhysicalShow. +type ShowImpl struct { + baseImpl +} + +// NewShowImpl creates a new ShowImpl. +func NewShowImpl(show *plannercore.PhysicalShow) *ShowImpl { + return &ShowImpl{baseImpl: baseImpl{plan: show}} +} + +// TiDBSelectionImpl is the implementation of PhysicalSelection in TiDB layer. +type TiDBSelectionImpl struct { + baseImpl +} + +// CalcCost implements Implementation CalcCost interface. +func (sel *TiDBSelectionImpl) CalcCost(outCount float64, children ...memo.Implementation) float64 { + sel.cost = children[0].GetPlan().Stats().RowCount*sel.plan.SCtx().GetSessionVars().CPUFactor + children[0].GetCost() + return sel.cost +} + +// NewTiDBSelectionImpl creates a new TiDBSelectionImpl. +func NewTiDBSelectionImpl(sel *plannercore.PhysicalSelection) *TiDBSelectionImpl { + return &TiDBSelectionImpl{baseImpl{plan: sel}} +} + +// TiKVSelectionImpl is the implementation of PhysicalSelection in TiKV layer. +type TiKVSelectionImpl struct { + baseImpl +} + +// CalcCost implements Implementation CalcCost interface. +func (sel *TiKVSelectionImpl) CalcCost(outCount float64, children ...memo.Implementation) float64 { + sel.cost = children[0].GetPlan().Stats().RowCount*sel.plan.SCtx().GetSessionVars().CopCPUFactor + children[0].GetCost() + return sel.cost +} + +// NewTiKVSelectionImpl creates a new TiKVSelectionImpl. +func NewTiKVSelectionImpl(sel *plannercore.PhysicalSelection) *TiKVSelectionImpl { + return &TiKVSelectionImpl{baseImpl{plan: sel}} +} + +// TiDBHashAggImpl is the implementation of PhysicalHashAgg in TiDB layer. +type TiDBHashAggImpl struct { + baseImpl +} + +// CalcCost implements Implementation CalcCost interface. +func (agg *TiDBHashAggImpl) CalcCost(outCount float64, children ...memo.Implementation) float64 { + hashAgg := agg.plan.(*plannercore.PhysicalHashAgg) + selfCost := hashAgg.GetCost(children[0].GetPlan().Stats().RowCount, true) + agg.cost = selfCost + children[0].GetCost() + return agg.cost +} + +// AttachChildren implements Implementation AttachChildren interface. +func (agg *TiDBHashAggImpl) AttachChildren(children ...memo.Implementation) memo.Implementation { + hashAgg := agg.plan.(*plannercore.PhysicalHashAgg) + hashAgg.SetChildren(children[0].GetPlan()) + // Inject extraProjection if the AggFuncs or GroupByItems contain ScalarFunction. + plannercore.InjectProjBelowAgg(hashAgg, hashAgg.AggFuncs, hashAgg.GroupByItems) + return agg +} + +// NewTiDBHashAggImpl creates a new TiDBHashAggImpl. +func NewTiDBHashAggImpl(agg *plannercore.PhysicalHashAgg) *TiDBHashAggImpl { + return &TiDBHashAggImpl{baseImpl{plan: agg}} +} + +// TiKVHashAggImpl is the implementation of PhysicalHashAgg in TiKV layer. +type TiKVHashAggImpl struct { + baseImpl +} + +// CalcCost implements Implementation CalcCost interface. +func (agg *TiKVHashAggImpl) CalcCost(outCount float64, children ...memo.Implementation) float64 { + hashAgg := agg.plan.(*plannercore.PhysicalHashAgg) + selfCost := hashAgg.GetCost(children[0].GetPlan().Stats().RowCount, false) + agg.cost = selfCost + children[0].GetCost() + return agg.cost +} + +// NewTiKVHashAggImpl creates a new TiKVHashAggImpl. +func NewTiKVHashAggImpl(agg *plannercore.PhysicalHashAgg) *TiKVHashAggImpl { + return &TiKVHashAggImpl{baseImpl{plan: agg}} +} + +// LimitImpl is the implementation of PhysicalLimit. Since PhysicalLimit on different +// engines have the same behavior, and we don't calculate the cost of `Limit`, we only +// have one Implementation for it. +type LimitImpl struct { + baseImpl +} + +// NewLimitImpl creates a new LimitImpl. +func NewLimitImpl(limit *plannercore.PhysicalLimit) *LimitImpl { + return &LimitImpl{baseImpl{plan: limit}} +} + +// TiDBTopNImpl is the implementation of PhysicalTopN in TiDB layer. +type TiDBTopNImpl struct { + baseImpl +} + +// CalcCost implements Implementation CalcCost interface. +func (impl *TiDBTopNImpl) CalcCost(outCount float64, children ...memo.Implementation) float64 { + topN := impl.plan.(*plannercore.PhysicalTopN) + childCount := children[0].GetPlan().Stats().RowCount + impl.cost = topN.GetCost(childCount, true) + children[0].GetCost() + return impl.cost +} + +// NewTiDBTopNImpl creates a new TiDBTopNImpl. +func NewTiDBTopNImpl(topN *plannercore.PhysicalTopN) *TiDBTopNImpl { + return &TiDBTopNImpl{baseImpl{plan: topN}} +} diff --git a/planner/implementation/sort.go b/planner/implementation/sort.go new file mode 100644 index 0000000..9e2e8a5 --- /dev/null +++ b/planner/implementation/sort.go @@ -0,0 +1,64 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package implementation + +import ( + "math" + + plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/planner/memo" +) + +// SortImpl implementation of PhysicalSort. +type SortImpl struct { + baseImpl +} + +// NewSortImpl creates a new sort Implementation. +func NewSortImpl(sort *plannercore.PhysicalSort) *SortImpl { + return &SortImpl{baseImpl{plan: sort}} +} + +// CalcCost calculates the cost of the sort Implementation. +func (impl *SortImpl) CalcCost(outCount float64, children ...memo.Implementation) float64 { + cnt := math.Min(children[0].GetPlan().Stats().RowCount, impl.plan.GetChildReqProps(0).ExpectedCnt) + sort := impl.plan.(*plannercore.PhysicalSort) + impl.cost = sort.GetCost(cnt) + children[0].GetCost() + return impl.cost +} + +// AttachChildren implements Implementation AttachChildren interface. +func (impl *SortImpl) AttachChildren(children ...memo.Implementation) memo.Implementation { + sort := impl.plan.(*plannercore.PhysicalSort) + sort.SetChildren(children[0].GetPlan()) + // When the Sort orderByItems contain ScalarFunction, we need + // to inject two Projections below and above the Sort. + impl.plan = plannercore.InjectProjBelowSort(sort, sort.ByItems) + return impl +} + +// NominalSortImpl is the implementation of NominalSort. +type NominalSortImpl struct { + baseImpl +} + +// AttachChildren implements Implementation AttachChildren interface. +func (impl *NominalSortImpl) AttachChildren(children ...memo.Implementation) memo.Implementation { + return children[0] +} + +// NewNominalSortImpl creates a new NominalSort Implementation. +func NewNominalSortImpl(sort *plannercore.NominalSort) *NominalSortImpl { + return &NominalSortImpl{baseImpl{plan: sort}} +} diff --git a/planner/memo/expr_iterator.go b/planner/memo/expr_iterator.go new file mode 100644 index 0000000..e479c2c --- /dev/null +++ b/planner/memo/expr_iterator.go @@ -0,0 +1,208 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package memo + +import ( + "container/list" +) + +// ExprIter enumerates all the equivalent expressions in the Group according to +// the expression pattern. +type ExprIter struct { + // Group and Element solely identify a Group expression. + *Group + *list.Element + + // matched indicates whether the current Group expression bound by the + // iterator matches the pattern after the creation or iteration. + matched bool + + // Pattern describes the node of pattern tree. + // The Operand type of the Group expression and the EngineType of the Group + // must be matched with it. + *Pattern + + // Children is used to iterate the child expressions. + Children []*ExprIter +} + +// Next returns the next Group expression matches the pattern. +func (iter *ExprIter) Next() (found bool) { + defer func() { + iter.matched = found + }() + + // Iterate child firstly. + for i := len(iter.Children) - 1; i >= 0; i-- { + if !iter.Children[i].Next() { + continue + } + + for j := i + 1; j < len(iter.Children); j++ { + iter.Children[j].Reset() + } + return true + } + + // It's root node or leaf ANY node. + if iter.Group == nil || iter.Operand == OperandAny { + return false + } + + // Otherwise, iterate itself to find more matched equivalent expressions. + for elem := iter.Element.Next(); elem != nil; elem = elem.Next() { + expr := elem.Value.(*GroupExpr) + + if !iter.Operand.Match(GetOperand(expr.ExprNode)) { + // All the Equivalents which have the same Operand are continuously + // stored in the list. Once the current equivalent can not Match + // the Operand, the rest can not, either. + return false + } + + if len(iter.Children) == 0 { + iter.Element = elem + return true + } + if len(iter.Children) != len(expr.Children) { + continue + } + + allMatched := true + for i := range iter.Children { + iter.Children[i].Group = expr.Children[i] + if !iter.Children[i].Reset() { + allMatched = false + break + } + } + + if allMatched { + iter.Element = elem + return true + } + } + return false +} + +// Matched returns whether the iterator founds a Group expression matches the +// pattern. +func (iter *ExprIter) Matched() bool { + return iter.matched +} + +// Reset resets the iterator to the first matched Group expression. +func (iter *ExprIter) Reset() (findMatch bool) { + defer func() { iter.matched = findMatch }() + + if iter.Pattern.MatchOperandAny(iter.Group.EngineType) { + return true + } + + for elem := iter.Group.GetFirstElem(iter.Operand); elem != nil; elem = elem.Next() { + expr := elem.Value.(*GroupExpr) + + if !iter.Pattern.Match(GetOperand(expr.ExprNode), expr.Group.EngineType) { + break + } + + // The leaf node of the pattern tree might not be an OperandAny or a XXXScan. + // We allow the patterns like: Selection -> Projection. + // For example, we have such a memo: + // Group#1 + // Selection_0 input:[Group#2] + // Group#2 + // Projection_1 input:[Group#3] + // Projection_2 input:[Group#4] + // Group#3 + // ..... + // For the pattern above, we will match it twice: `Selection_0->Projection_1` + // and `Selection_0->Projection_2`. So if the iterator has no children, we can safely return + // the element here. + if len(iter.Children) == 0 { + iter.Element = elem + return true + } + if len(expr.Children) != len(iter.Children) { + continue + } + + allMatched := true + for i := range iter.Children { + iter.Children[i].Group = expr.Children[i] + if !iter.Children[i].Reset() { + allMatched = false + break + } + } + if allMatched { + iter.Element = elem + return true + } + } + return false +} + +// GetExpr returns the root GroupExpr of the iterator. +func (iter *ExprIter) GetExpr() *GroupExpr { + return iter.Element.Value.(*GroupExpr) +} + +// NewExprIterFromGroupElem creates the iterator on the Group Element. +func NewExprIterFromGroupElem(elem *list.Element, p *Pattern) *ExprIter { + expr := elem.Value.(*GroupExpr) + if !p.Match(GetOperand(expr.ExprNode), expr.Group.EngineType) { + return nil + } + iter := newExprIterFromGroupExpr(expr, p) + if iter != nil { + iter.Element = elem + } + return iter +} + +// newExprIterFromGroupExpr creates the iterator on the Group expression. +func newExprIterFromGroupExpr(expr *GroupExpr, p *Pattern) *ExprIter { + if len(p.Children) != 0 && len(p.Children) != len(expr.Children) { + return nil + } + iter := &ExprIter{Pattern: p, matched: true} + for i := range p.Children { + childIter := newExprIterFromGroup(expr.Children[i], p.Children[i]) + if childIter == nil { + return nil + } + iter.Children = append(iter.Children, childIter) + } + return iter +} + +// newExprIterFromGroup creates the iterator on the Group. +func newExprIterFromGroup(g *Group, p *Pattern) *ExprIter { + if p.MatchOperandAny(g.EngineType) { + return &ExprIter{Group: g, Pattern: p, matched: true} + } + for elem := g.GetFirstElem(p.Operand); elem != nil; elem = elem.Next() { + expr := elem.Value.(*GroupExpr) + if !p.Match(GetOperand(expr.ExprNode), g.EngineType) { + return nil + } + iter := newExprIterFromGroupExpr(expr, p) + if iter != nil { + iter.Group, iter.Element = g, elem + return iter + } + } + return nil +} diff --git a/planner/memo/expr_iterator_test.go b/planner/memo/expr_iterator_test.go new file mode 100644 index 0000000..3a087f0 --- /dev/null +++ b/planner/memo/expr_iterator_test.go @@ -0,0 +1,171 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package memo + +import ( + . "github.com/pingcap/check" + plannercore "github.com/pingcap/tidb/planner/core" +) + +func (s *testMemoSuite) TestNewExprIterFromGroupElem(c *C) { + g0 := NewGroupWithSchema(NewGroupExpr(plannercore.LogicalSelection{}.Init(s.sctx)), s.schema) + g0.Insert(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx))) + g0.Insert(NewGroupExpr(plannercore.LogicalProjection{}.Init(s.sctx))) + g0.Insert(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx))) + + g1 := NewGroupWithSchema(NewGroupExpr(plannercore.LogicalSelection{}.Init(s.sctx)), s.schema) + g1.Insert(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx))) + g1.Insert(NewGroupExpr(plannercore.LogicalProjection{}.Init(s.sctx))) + g1.Insert(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx))) + + expr := NewGroupExpr(plannercore.LogicalJoin{}.Init(s.sctx)) + expr.Children = append(expr.Children, g0) + expr.Children = append(expr.Children, g1) + g2 := NewGroupWithSchema(expr, s.schema) + + pattern := BuildPattern(OperandJoin, EngineAll, BuildPattern(OperandProjection, EngineAll), BuildPattern(OperandSelection, EngineAll)) + iter := NewExprIterFromGroupElem(g2.Equivalents.Front(), pattern) + + c.Assert(iter, NotNil) + c.Assert(iter.Group, IsNil) + c.Assert(iter.Element, Equals, g2.Equivalents.Front()) + c.Assert(iter.matched, Equals, true) + c.Assert(iter.Operand, Equals, OperandJoin) + c.Assert(len(iter.Children), Equals, 2) + + c.Assert(iter.Children[0].Group, Equals, g0) + c.Assert(iter.Children[0].Element, Equals, g0.GetFirstElem(OperandProjection)) + c.Assert(iter.Children[0].matched, Equals, true) + c.Assert(iter.Children[0].Operand, Equals, OperandProjection) + c.Assert(len(iter.Children[0].Children), Equals, 0) + + c.Assert(iter.Children[1].Group, Equals, g1) + c.Assert(iter.Children[1].Element, Equals, g1.GetFirstElem(OperandSelection)) + c.Assert(iter.Children[1].matched, Equals, true) + c.Assert(iter.Children[1].Operand, Equals, OperandSelection) + c.Assert(len(iter.Children[0].Children), Equals, 0) +} + +func (s *testMemoSuite) TestExprIterNext(c *C) { + g0 := NewGroupWithSchema(NewGroupExpr(plannercore.LogicalProjection{}.Init(s.sctx)), s.schema) + g0.Insert(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx))) + g0.Insert(NewGroupExpr(plannercore.LogicalProjection{}.Init(s.sctx))) + g0.Insert(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx))) + g0.Insert(NewGroupExpr(plannercore.LogicalProjection{}.Init(s.sctx))) + + g1 := NewGroupWithSchema(NewGroupExpr(plannercore.LogicalSelection{}.Init(s.sctx)), s.schema) + g1.Insert(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx))) + g1.Insert(NewGroupExpr(plannercore.LogicalSelection{}.Init(s.sctx))) + g1.Insert(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx))) + g1.Insert(NewGroupExpr(plannercore.LogicalSelection{}.Init(s.sctx))) + + expr := NewGroupExpr(plannercore.LogicalJoin{}.Init(s.sctx)) + expr.Children = append(expr.Children, g0) + expr.Children = append(expr.Children, g1) + g2 := NewGroupWithSchema(expr, s.schema) + + pattern := BuildPattern(OperandJoin, EngineAll, BuildPattern(OperandProjection, EngineAll), BuildPattern(OperandSelection, EngineAll)) + iter := NewExprIterFromGroupElem(g2.Equivalents.Front(), pattern) + c.Assert(iter, NotNil) + + count := 0 + for ; iter.Matched(); iter.Next() { + count++ + c.Assert(iter.Group, IsNil) + c.Assert(iter.matched, Equals, true) + c.Assert(iter.Operand, Equals, OperandJoin) + c.Assert(len(iter.Children), Equals, 2) + + c.Assert(iter.Children[0].Group, Equals, g0) + c.Assert(iter.Children[0].matched, Equals, true) + c.Assert(iter.Children[0].Operand, Equals, OperandProjection) + c.Assert(len(iter.Children[0].Children), Equals, 0) + + c.Assert(iter.Children[1].Group, Equals, g1) + c.Assert(iter.Children[1].matched, Equals, true) + c.Assert(iter.Children[1].Operand, Equals, OperandSelection) + c.Assert(len(iter.Children[1].Children), Equals, 0) + } + + c.Assert(count, Equals, 9) +} + +func (s *testMemoSuite) TestExprIterReset(c *C) { + g0 := NewGroupWithSchema(NewGroupExpr(plannercore.LogicalProjection{}.Init(s.sctx)), s.schema) + g0.Insert(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx))) + g0.Insert(NewGroupExpr(plannercore.LogicalProjection{}.Init(s.sctx))) + g0.Insert(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx))) + g0.Insert(NewGroupExpr(plannercore.LogicalProjection{}.Init(s.sctx))) + + sel1 := NewGroupExpr(plannercore.LogicalSelection{}.Init(s.sctx)) + sel2 := NewGroupExpr(plannercore.LogicalSelection{}.Init(s.sctx)) + sel3 := NewGroupExpr(plannercore.LogicalSelection{}.Init(s.sctx)) + g1 := NewGroupWithSchema(sel1, s.schema) + g1.Insert(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx))) + g1.Insert(sel2) + g1.Insert(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx))) + g1.Insert(sel3) + + g2 := NewGroupWithSchema(NewGroupExpr(plannercore.LogicalSelection{}.Init(s.sctx)), s.schema) + g2.Insert(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx))) + g2.Insert(NewGroupExpr(plannercore.LogicalSelection{}.Init(s.sctx))) + g2.Insert(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx))) + g2.Insert(NewGroupExpr(plannercore.LogicalSelection{}.Init(s.sctx))) + + // link join with Group 0 and 1 + expr := NewGroupExpr(plannercore.LogicalJoin{}.Init(s.sctx)) + expr.Children = append(expr.Children, g0) + expr.Children = append(expr.Children, g1) + g3 := NewGroupWithSchema(expr, s.schema) + + // link sel 1~3 with Group 2 + sel1.Children = append(sel1.Children, g2) + sel2.Children = append(sel2.Children, g2) + sel3.Children = append(sel3.Children, g2) + + // create a pattern: join(proj, sel(limit)) + lhsPattern := BuildPattern(OperandProjection, EngineAll) + rhsPattern := BuildPattern(OperandSelection, EngineAll, BuildPattern(OperandLimit, EngineAll)) + pattern := BuildPattern(OperandJoin, EngineAll, lhsPattern, rhsPattern) + + // create expression iterator for the pattern on join + iter := NewExprIterFromGroupElem(g3.Equivalents.Front(), pattern) + c.Assert(iter, NotNil) + + count := 0 + for ; iter.Matched(); iter.Next() { + count++ + c.Assert(iter.Group, IsNil) + c.Assert(iter.matched, Equals, true) + c.Assert(iter.Operand, Equals, OperandJoin) + c.Assert(len(iter.Children), Equals, 2) + + c.Assert(iter.Children[0].Group, Equals, g0) + c.Assert(iter.Children[0].matched, Equals, true) + c.Assert(iter.Children[0].Operand, Equals, OperandProjection) + c.Assert(len(iter.Children[0].Children), Equals, 0) + + c.Assert(iter.Children[1].Group, Equals, g1) + c.Assert(iter.Children[1].matched, Equals, true) + c.Assert(iter.Children[1].Operand, Equals, OperandSelection) + c.Assert(len(iter.Children[1].Children), Equals, 1) + + c.Assert(iter.Children[1].Children[0].Group, Equals, g2) + c.Assert(iter.Children[1].Children[0].matched, Equals, true) + c.Assert(iter.Children[1].Children[0].Operand, Equals, OperandLimit) + c.Assert(len(iter.Children[1].Children[0].Children), Equals, 0) + } + + c.Assert(count, Equals, 18) +} diff --git a/planner/memo/group.go b/planner/memo/group.go new file mode 100644 index 0000000..bf48182 --- /dev/null +++ b/planner/memo/group.go @@ -0,0 +1,238 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package memo + +import ( + "container/list" + "fmt" + + "github.com/pingcap/tidb/expression" + plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/planner/property" +) + +// EngineType is determined by whether it's above or below `Gather`s. +// Plan will choose the different engine to be implemented/executed on according to its EngineType. +// Different engine may support different operators with different cost, so we should design +// different transformation and implementation rules for each engine. +type EngineType uint + +const ( + // EngineTiDB stands for groups which is above `Gather`s and will be executed in TiDB layer. + EngineTiDB EngineType = 1 << iota + // EngineTiKV stands for groups which is below `Gather`s and will be executed in TiKV layer. + EngineTiKV +) + +// EngineTypeSet is the bit set of EngineTypes. +type EngineTypeSet uint + +const ( + // EngineTiDBOnly is the EngineTypeSet for EngineTiDB only. + EngineTiDBOnly = EngineTypeSet(EngineTiDB) + // EngineTiKVOnly is the EngineTypeSet for EngineTiKV only. + EngineTiKVOnly = EngineTypeSet(EngineTiKV) + // EngineAll is the EngineTypeSet for all of the EngineTypes. + EngineAll = EngineTypeSet(EngineTiDB | EngineTiKV) +) + +// Contains checks whether the EngineTypeSet contains the EngineType. +func (e EngineTypeSet) Contains(tp EngineType) bool { + return uint(e)&uint(tp) != 0 +} + +// String implements fmt.Stringer interface. +func (e EngineType) String() string { + switch e { + case EngineTiDB: + return "EngineTiDB" + case EngineTiKV: + return "EngineTiKV" + } + return "UnknownEngineType" +} + +// Group is short for expression Group, which is used to store all the +// logically equivalent expressions. It's a set of GroupExpr. +type Group struct { + Equivalents *list.List + + FirstExpr map[Operand]*list.Element + Fingerprints map[string]*list.Element + + Explored bool + SelfFingerprint string + + ImplMap map[string]Implementation + Prop *property.LogicalProperty + + EngineType EngineType + + //hasBuiltKeyInfo indicates whether this group has called `BuildKeyInfo`. + // BuildKeyInfo is lazily called when a rule needs information of + // unique key or maxOneRow (in LogicalProp). For each Group, we only need + // to collect these information once. + hasBuiltKeyInfo bool +} + +// NewGroupWithSchema creates a new Group with given schema. +func NewGroupWithSchema(e *GroupExpr, s *expression.Schema) *Group { + prop := &property.LogicalProperty{Schema: expression.NewSchema(s.Columns...)} + g := &Group{ + Equivalents: list.New(), + Fingerprints: make(map[string]*list.Element), + FirstExpr: make(map[Operand]*list.Element), + ImplMap: make(map[string]Implementation), + Prop: prop, + EngineType: EngineTiDB, + } + g.Insert(e) + return g +} + +// SetEngineType sets the engine type of the group. +func (g *Group) SetEngineType(e EngineType) *Group { + g.EngineType = e + return g +} + +// FingerPrint returns the unique fingerprint of the Group. +func (g *Group) FingerPrint() string { + if g.SelfFingerprint == "" { + g.SelfFingerprint = fmt.Sprintf("%p", g) + } + return g.SelfFingerprint +} + +// Insert a nonexistent Group expression. +func (g *Group) Insert(e *GroupExpr) bool { + if e == nil || g.Exists(e) { + return false + } + + operand := GetOperand(e.ExprNode) + var newEquiv *list.Element + mark, hasMark := g.FirstExpr[operand] + if hasMark { + newEquiv = g.Equivalents.InsertAfter(e, mark) + } else { + newEquiv = g.Equivalents.PushBack(e) + g.FirstExpr[operand] = newEquiv + } + g.Fingerprints[e.FingerPrint()] = newEquiv + e.Group = g + return true +} + +// Delete an existing Group expression. +func (g *Group) Delete(e *GroupExpr) { + fingerprint := e.FingerPrint() + equiv, ok := g.Fingerprints[fingerprint] + if !ok { + return // Can not find the target GroupExpr. + } + + operand := GetOperand(equiv.Value.(*GroupExpr).ExprNode) + if g.FirstExpr[operand] == equiv { + // The target GroupExpr is the first Element of the same Operand. + // We need to change the FirstExpr to the next Expr, or delete the FirstExpr. + nextElem := equiv.Next() + if nextElem != nil && GetOperand(nextElem.Value.(*GroupExpr).ExprNode) == operand { + g.FirstExpr[operand] = nextElem + } else { + // There is no more GroupExpr of the Operand, so we should + // delete the FirstExpr of this Operand. + delete(g.FirstExpr, operand) + } + } + + g.Equivalents.Remove(equiv) + delete(g.Fingerprints, fingerprint) + e.Group = nil +} + +// DeleteAll deletes all of the GroupExprs in the Group. +func (g *Group) DeleteAll() { + g.Equivalents = list.New() + g.Fingerprints = make(map[string]*list.Element) + g.FirstExpr = make(map[Operand]*list.Element) + g.SelfFingerprint = "" +} + +// Exists checks whether a Group expression existed in a Group. +func (g *Group) Exists(e *GroupExpr) bool { + _, ok := g.Fingerprints[e.FingerPrint()] + return ok +} + +// GetFirstElem returns the first Group expression which matches the Operand. +// Return a nil pointer if there isn't. +func (g *Group) GetFirstElem(operand Operand) *list.Element { + if operand == OperandAny { + return g.Equivalents.Front() + } + return g.FirstExpr[operand] +} + +// GetImpl returns the best Implementation satisfy the physical property. +func (g *Group) GetImpl(prop *property.PhysicalProperty) Implementation { + key := prop.HashCode() + return g.ImplMap[string(key)] +} + +// InsertImpl inserts the best Implementation satisfy the physical property. +func (g *Group) InsertImpl(prop *property.PhysicalProperty, impl Implementation) { + key := prop.HashCode() + g.ImplMap[string(key)] = impl +} + +// Convert2GroupExpr converts a logical plan to a GroupExpr. +func Convert2GroupExpr(node plannercore.LogicalPlan) *GroupExpr { + e := NewGroupExpr(node) + e.Children = make([]*Group, 0, len(node.Children())) + for _, child := range node.Children() { + childGroup := Convert2Group(child) + e.Children = append(e.Children, childGroup) + } + return e +} + +// Convert2Group converts a logical plan to a Group. +func Convert2Group(node plannercore.LogicalPlan) *Group { + e := Convert2GroupExpr(node) + g := NewGroupWithSchema(e, node.Schema()) + // Stats property for `Group` would be computed after exploration phase. + return g +} + +// BuildKeyInfo recursively builds UniqueKey and MaxOneRow info in the LogicalProperty. +func (g *Group) BuildKeyInfo() { + if g.hasBuiltKeyInfo { + return + } + g.hasBuiltKeyInfo = true + + e := g.Equivalents.Front().Value.(*GroupExpr) + childSchema := make([]*expression.Schema, len(e.Children)) + for i := range e.Children { + e.Children[i].BuildKeyInfo() + childSchema[i] = e.Children[i].Prop.Schema + } + if len(childSchema) == 1 { + // For UnaryPlan(such as Selection, Limit ...), we can set the child's unique key as its unique key. + // If the GroupExpr is a schemaProducer, schema.Keys will be reset below in `BuildKeyInfo()`. + g.Prop.Schema.Keys = childSchema[0].Keys + } + e.ExprNode.BuildKeyInfo(g.Prop.Schema, childSchema) +} diff --git a/planner/memo/group_expr.go b/planner/memo/group_expr.go new file mode 100644 index 0000000..86d63ff --- /dev/null +++ b/planner/memo/group_expr.go @@ -0,0 +1,65 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package memo + +import ( + "fmt" + + "github.com/pingcap/tidb/expression" + plannercore "github.com/pingcap/tidb/planner/core" +) + +// GroupExpr is used to store all the logically equivalent expressions which +// have the same root operator. Different from a normal expression, the +// Children of a Group expression are expression Groups, not expressions. +// Another property of Group expression is that the child Group references will +// never be changed once the Group expression is created. +type GroupExpr struct { + ExprNode plannercore.LogicalPlan + Children []*Group + Explored bool + Group *Group + + selfFingerprint string +} + +// NewGroupExpr creates a GroupExpr based on a logical plan node. +func NewGroupExpr(node plannercore.LogicalPlan) *GroupExpr { + return &GroupExpr{ + ExprNode: node, + Children: nil, + Explored: false, + } +} + +// FingerPrint gets the unique fingerprint of the Group expression. +func (e *GroupExpr) FingerPrint() string { + if e.selfFingerprint == "" { + e.selfFingerprint = fmt.Sprintf("%v", e.ExprNode.ID()) + for i := range e.Children { + e.selfFingerprint += e.Children[i].FingerPrint() + } + } + return e.selfFingerprint +} + +// SetChildren sets Children of the GroupExpr. +func (e *GroupExpr) SetChildren(children ...*Group) { + e.Children = children +} + +// Schema gets GroupExpr's Schema. +func (e *GroupExpr) Schema() *expression.Schema { + return e.Group.Prop.Schema +} diff --git a/planner/memo/group_expr_test.go b/planner/memo/group_expr_test.go new file mode 100644 index 0000000..0a68e1e --- /dev/null +++ b/planner/memo/group_expr_test.go @@ -0,0 +1,35 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package memo + +import ( + . "github.com/pingcap/check" + plannercore "github.com/pingcap/tidb/planner/core" +) + +func (s *testMemoSuite) TestNewGroupExpr(c *C) { + p := &plannercore.LogicalLimit{} + expr := NewGroupExpr(p) + c.Assert(expr.ExprNode, Equals, p) + c.Assert(expr.Children, IsNil) + c.Assert(expr.Explored, IsFalse) +} + +func (s *testMemoSuite) TestGroupExprFingerprint(c *C) { + p := &plannercore.LogicalLimit{} + expr := NewGroupExpr(p) + + // we haven't set the id of the created LogicalLimit, so the result is 0. + c.Assert(expr.FingerPrint(), Equals, "0") +} diff --git a/planner/memo/group_test.go b/planner/memo/group_test.go new file mode 100644 index 0000000..10276f9 --- /dev/null +++ b/planner/memo/group_test.go @@ -0,0 +1,212 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package memo + +import ( + "context" + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/parser" + "github.com/pingcap/tidb/parser/model" + plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/planner/property" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/util/testleak" +) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + TestingT(t) +} + +var _ = Suite(&testMemoSuite{}) + +type testMemoSuite struct { + *parser.Parser + is infoschema.InfoSchema + schema *expression.Schema + sctx sessionctx.Context +} + +func (s *testMemoSuite) SetUpSuite(c *C) { + testleak.BeforeTest() + s.is = infoschema.MockInfoSchema([]*model.TableInfo{plannercore.MockSignedTable()}) + s.sctx = plannercore.MockContext() + s.Parser = parser.New() + s.schema = expression.NewSchema() +} + +func (s *testMemoSuite) TearDownSuite(c *C) { + testleak.AfterTest(c)() +} + +func (s *testMemoSuite) TestNewGroup(c *C) { + p := &plannercore.LogicalLimit{} + expr := NewGroupExpr(p) + g := NewGroupWithSchema(expr, s.schema) + + c.Assert(g.Equivalents.Len(), Equals, 1) + c.Assert(g.Equivalents.Front().Value.(*GroupExpr), Equals, expr) + c.Assert(len(g.Fingerprints), Equals, 1) + c.Assert(g.Explored, IsFalse) +} + +func (s *testMemoSuite) TestGroupInsert(c *C) { + p := &plannercore.LogicalLimit{} + expr := NewGroupExpr(p) + g := NewGroupWithSchema(expr, s.schema) + c.Assert(g.Insert(expr), IsFalse) + expr.selfFingerprint = "1" + c.Assert(g.Insert(expr), IsTrue) +} + +func (s *testMemoSuite) TestGroupDelete(c *C) { + p := &plannercore.LogicalLimit{} + expr := NewGroupExpr(p) + g := NewGroupWithSchema(expr, s.schema) + c.Assert(g.Equivalents.Len(), Equals, 1) + + g.Delete(expr) + c.Assert(g.Equivalents.Len(), Equals, 0) + + g.Delete(expr) + c.Assert(g.Equivalents.Len(), Equals, 0) +} + +func (s *testMemoSuite) TestGroupDeleteAll(c *C) { + expr := NewGroupExpr(plannercore.LogicalSelection{}.Init(s.sctx)) + g := NewGroupWithSchema(expr, s.schema) + c.Assert(g.Insert(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx))), IsTrue) + c.Assert(g.Insert(NewGroupExpr(plannercore.LogicalProjection{}.Init(s.sctx))), IsTrue) + c.Assert(g.Equivalents.Len(), Equals, 3) + c.Assert(g.GetFirstElem(OperandProjection), NotNil) + c.Assert(g.Exists(expr), IsTrue) + + g.DeleteAll() + c.Assert(g.Equivalents.Len(), Equals, 0) + c.Assert(g.GetFirstElem(OperandProjection), IsNil) + c.Assert(g.Exists(expr), IsFalse) +} + +func (s *testMemoSuite) TestGroupExists(c *C) { + p := &plannercore.LogicalLimit{} + expr := NewGroupExpr(p) + g := NewGroupWithSchema(expr, s.schema) + c.Assert(g.Exists(expr), IsTrue) + + g.Delete(expr) + c.Assert(g.Exists(expr), IsFalse) +} + +func (s *testMemoSuite) TestGroupGetFirstElem(c *C) { + expr0 := NewGroupExpr(plannercore.LogicalProjection{}.Init(s.sctx)) + expr1 := NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx)) + expr2 := NewGroupExpr(plannercore.LogicalProjection{}.Init(s.sctx)) + expr3 := NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx)) + expr4 := NewGroupExpr(plannercore.LogicalProjection{}.Init(s.sctx)) + + g := NewGroupWithSchema(expr0, s.schema) + g.Insert(expr1) + g.Insert(expr2) + g.Insert(expr3) + g.Insert(expr4) + + c.Assert(g.GetFirstElem(OperandProjection).Value.(*GroupExpr), Equals, expr0) + c.Assert(g.GetFirstElem(OperandLimit).Value.(*GroupExpr), Equals, expr1) + c.Assert(g.GetFirstElem(OperandAny).Value.(*GroupExpr), Equals, expr0) +} + +type fakeImpl struct { + plan plannercore.PhysicalPlan +} + +func (impl *fakeImpl) CalcCost(float64, ...Implementation) float64 { return 0 } +func (impl *fakeImpl) SetCost(float64) {} +func (impl *fakeImpl) GetCost() float64 { return 0 } +func (impl *fakeImpl) GetPlan() plannercore.PhysicalPlan { return impl.plan } +func (impl *fakeImpl) AttachChildren(...Implementation) Implementation { return nil } +func (impl *fakeImpl) ScaleCostLimit(float64) float64 { return 0 } +func (s *testMemoSuite) TestGetInsertGroupImpl(c *C) { + g := NewGroupWithSchema(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx)), s.schema) + emptyProp := &property.PhysicalProperty{} + orderProp := &property.PhysicalProperty{Items: []property.Item{{Col: &expression.Column{}}}} + + impl := g.GetImpl(emptyProp) + c.Assert(impl, IsNil) + + impl = &fakeImpl{plan: &plannercore.PhysicalLimit{}} + g.InsertImpl(emptyProp, impl) + + newImpl := g.GetImpl(emptyProp) + c.Assert(newImpl, Equals, impl) + + newImpl = g.GetImpl(orderProp) + c.Assert(newImpl, IsNil) +} + +func (s *testMemoSuite) TestFirstElemAfterDelete(c *C) { + oldExpr := NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx)) + g := NewGroupWithSchema(oldExpr, s.schema) + newExpr := NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx)) + g.Insert(newExpr) + c.Assert(g.GetFirstElem(OperandLimit), NotNil) + c.Assert(g.GetFirstElem(OperandLimit).Value, Equals, oldExpr) + g.Delete(oldExpr) + c.Assert(g.GetFirstElem(OperandLimit), NotNil) + c.Assert(g.GetFirstElem(OperandLimit).Value, Equals, newExpr) + g.Delete(newExpr) + c.Assert(g.GetFirstElem(OperandLimit), IsNil) +} + +func (s *testMemoSuite) TestBuildKeyInfo(c *C) { + // case 1: primary key has constant constraint + stmt1, err := s.ParseOneStmt("select a from t where a = 10", "", "") + c.Assert(err, IsNil) + p1, _, err := plannercore.BuildLogicalPlan(context.Background(), s.sctx, stmt1, s.is) + c.Assert(err, IsNil) + logic1, ok := p1.(plannercore.LogicalPlan) + c.Assert(ok, IsTrue) + group1 := Convert2Group(logic1) + group1.BuildKeyInfo() + c.Assert(len(group1.Prop.Schema.Keys), Equals, 1) + + // case 2: group by column is key + stmt2, err := s.ParseOneStmt("select b, sum(a) from t group by b", "", "") + c.Assert(err, IsNil) + p2, _, err := plannercore.BuildLogicalPlan(context.Background(), s.sctx, stmt2, s.is) + c.Assert(err, IsNil) + logic2, ok := p2.(plannercore.LogicalPlan) + c.Assert(ok, IsTrue) + group2 := Convert2Group(logic2) + group2.BuildKeyInfo() + c.Assert(len(group2.Prop.Schema.Keys), Equals, 1) + + // case 3: build key info for new Group + newSel := plannercore.LogicalSelection{}.Init(s.sctx) + newExpr1 := NewGroupExpr(newSel) + newExpr1.SetChildren(group2) + newGroup1 := NewGroupWithSchema(newExpr1, group2.Prop.Schema) + newGroup1.BuildKeyInfo() + c.Assert(len(newGroup1.Prop.Schema.Keys), Equals, 1) + + // case 4: build maxOneRow for new Group + newLimit := plannercore.LogicalLimit{Count: 1}.Init(s.sctx) + newExpr2 := NewGroupExpr(newLimit) + newExpr2.SetChildren(group2) + newGroup2 := NewGroupWithSchema(newExpr2, group2.Prop.Schema) + newGroup2.BuildKeyInfo() +} diff --git a/planner/memo/implementation.go b/planner/memo/implementation.go new file mode 100644 index 0000000..dd5a430 --- /dev/null +++ b/planner/memo/implementation.go @@ -0,0 +1,35 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package memo + +import ( + plannercore "github.com/pingcap/tidb/planner/core" +) + +// Implementation defines the interface for cost of physical plan. +type Implementation interface { + CalcCost(outCount float64, children ...Implementation) float64 + SetCost(cost float64) + GetCost() float64 + GetPlan() plannercore.PhysicalPlan + + // AttachChildren is used to attach children implementations and returns it self. + AttachChildren(children ...Implementation) Implementation + + // ScaleCostLimit scales costLimit by the Implementation's concurrency factor. + // Implementation like TiKVSingleGather may divide the cost by its scan concurrency, + // so when we pass the costLimit for pruning the search space, we have to scale + // the costLimit by its concurrency factor. + ScaleCostLimit(costLimit float64) float64 +} diff --git a/planner/memo/pattern.go b/planner/memo/pattern.go new file mode 100644 index 0000000..5cb858a --- /dev/null +++ b/planner/memo/pattern.go @@ -0,0 +1,146 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package memo + +import ( + plannercore "github.com/pingcap/tidb/planner/core" +) + +// Operand is the node of a pattern tree, it represents a logical expression operator. +// Different from logical plan operator which holds the full information about an expression +// operator, Operand only stores the type information. +// An Operand may correspond to a concrete logical plan operator, or it can has special meaning, +// e.g, a placeholder for any logical plan operator. +type Operand int + +const ( + // OperandAny is a placeholder for any Operand. + OperandAny Operand = iota + // OperandJoin is the operand for LogicalJoin. + OperandJoin + // OperandAggregation is the operand for LogicalAggregation. + OperandAggregation + // OperandProjection is the operand for LogicalProjection. + OperandProjection + // OperandSelection is the operand for LogicalSelection. + OperandSelection + // OperandTableDual is the operand for LogicalTableDual. + OperandTableDual + // OperandDataSource is the operand for DataSource. + OperandDataSource + // OperandUnionScan is the operand for LogicalUnionScan. + OperandUnionScan + // OperandSort is the operand for LogicalSort. + OperandSort + // OperandTopN is the operand for LogicalTopN. + OperandTopN + // OperandLimit is the operand for LogicalLimit. + OperandLimit + // OperandTiKVSingleGather is the operand for TiKVSingleGather. + OperandTiKVSingleGather + // OperandTableScan is the operand for TableScan. + OperandTableScan + // OperandIndexScan is the operand for IndexScan. + OperandIndexScan + // OperandShow is the operand for Show. + OperandShow + // OperandUnsupported is the operand for unsupported operators. + OperandUnsupported +) + +// GetOperand maps logical plan operator to Operand. +func GetOperand(p plannercore.LogicalPlan) Operand { + switch p.(type) { + case *plannercore.LogicalJoin: + return OperandJoin + case *plannercore.LogicalAggregation: + return OperandAggregation + case *plannercore.LogicalProjection: + return OperandProjection + case *plannercore.LogicalSelection: + return OperandSelection + case *plannercore.LogicalTableDual: + return OperandTableDual + case *plannercore.DataSource: + return OperandDataSource + case *plannercore.LogicalUnionScan: + return OperandUnionScan + case *plannercore.LogicalSort: + return OperandSort + case *plannercore.LogicalTopN: + return OperandTopN + case *plannercore.LogicalLimit: + return OperandLimit + case *plannercore.TiKVSingleGather: + return OperandTiKVSingleGather + case *plannercore.LogicalTableScan: + return OperandTableScan + case *plannercore.LogicalIndexScan: + return OperandIndexScan + case *plannercore.LogicalShow: + return OperandShow + default: + return OperandUnsupported + } +} + +// Match checks if current Operand matches specified one. +func (o Operand) Match(t Operand) bool { + if o == OperandAny || t == OperandAny { + return true + } + if o == t { + return true + } + return false +} + +// Pattern defines the match pattern for a rule. It's a tree-like structure +// which is a piece of a logical expression. Each node in the Pattern tree is +// defined by an Operand and EngineType pair. +type Pattern struct { + Operand + EngineTypeSet + Children []*Pattern +} + +// Match checks whether the EngineTypeSet contains the given EngineType +// and whether the two Operands match. +func (p *Pattern) Match(o Operand, e EngineType) bool { + return p.EngineTypeSet.Contains(e) && p.Operand.Match(o) +} + +// MatchOperandAny checks whether the pattern's Operand is OperandAny +// and the EngineTypeSet contains the given EngineType. +func (p *Pattern) MatchOperandAny(e EngineType) bool { + return p.EngineTypeSet.Contains(e) && p.Operand == OperandAny +} + +// NewPattern creates a pattern node according to the Operand and EngineType. +func NewPattern(operand Operand, engineTypeSet EngineTypeSet) *Pattern { + return &Pattern{Operand: operand, EngineTypeSet: engineTypeSet} +} + +// SetChildren sets the Children information for a pattern node. +func (p *Pattern) SetChildren(children ...*Pattern) { + p.Children = children +} + +// BuildPattern builds a Pattern from Operand, EngineType and child Patterns. +// Used in GetPattern() of Transformation interface to generate a Pattern. +func BuildPattern(operand Operand, engineTypeSet EngineTypeSet, children ...*Pattern) *Pattern { + p := &Pattern{Operand: operand, EngineTypeSet: engineTypeSet} + p.Children = children + return p +} diff --git a/planner/memo/pattern_test.go b/planner/memo/pattern_test.go new file mode 100644 index 0000000..fb053d9 --- /dev/null +++ b/planner/memo/pattern_test.go @@ -0,0 +1,78 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package memo + +import ( + . "github.com/pingcap/check" + plannercore "github.com/pingcap/tidb/planner/core" +) + +func (s *testMemoSuite) TestGetOperand(c *C) { + c.Assert(GetOperand(&plannercore.LogicalJoin{}), Equals, OperandJoin) + c.Assert(GetOperand(&plannercore.LogicalAggregation{}), Equals, OperandAggregation) + c.Assert(GetOperand(&plannercore.LogicalProjection{}), Equals, OperandProjection) + c.Assert(GetOperand(&plannercore.LogicalSelection{}), Equals, OperandSelection) + c.Assert(GetOperand(&plannercore.LogicalTableDual{}), Equals, OperandTableDual) + c.Assert(GetOperand(&plannercore.DataSource{}), Equals, OperandDataSource) + c.Assert(GetOperand(&plannercore.LogicalUnionScan{}), Equals, OperandUnionScan) + c.Assert(GetOperand(&plannercore.LogicalSort{}), Equals, OperandSort) + c.Assert(GetOperand(&plannercore.LogicalTopN{}), Equals, OperandTopN) + c.Assert(GetOperand(&plannercore.LogicalLimit{}), Equals, OperandLimit) +} + +func (s *testMemoSuite) TestOperandMatch(c *C) { + c.Assert(OperandAny.Match(OperandLimit), IsTrue) + c.Assert(OperandAny.Match(OperandSelection), IsTrue) + c.Assert(OperandAny.Match(OperandJoin), IsTrue) + c.Assert(OperandAny.Match(OperandAny), IsTrue) + + c.Assert(OperandLimit.Match(OperandAny), IsTrue) + c.Assert(OperandSelection.Match(OperandAny), IsTrue) + c.Assert(OperandJoin.Match(OperandAny), IsTrue) + c.Assert(OperandAny.Match(OperandAny), IsTrue) + + c.Assert(OperandLimit.Match(OperandLimit), IsTrue) + c.Assert(OperandSelection.Match(OperandSelection), IsTrue) + c.Assert(OperandJoin.Match(OperandJoin), IsTrue) + c.Assert(OperandAny.Match(OperandAny), IsTrue) + + c.Assert(OperandLimit.Match(OperandSelection), IsFalse) + c.Assert(OperandLimit.Match(OperandJoin), IsFalse) +} + +func (s *testMemoSuite) TestNewPattern(c *C) { + p := NewPattern(OperandAny, EngineAll) + c.Assert(p.Operand, Equals, OperandAny) + c.Assert(p.Children, IsNil) + + p = NewPattern(OperandJoin, EngineAll) + c.Assert(p.Operand, Equals, OperandJoin) + c.Assert(p.Children, IsNil) +} + +func (s *testMemoSuite) TestPatternSetChildren(c *C) { + p := NewPattern(OperandAny, EngineAll) + p.SetChildren(NewPattern(OperandLimit, EngineAll)) + c.Assert(len(p.Children), Equals, 1) + c.Assert(p.Children[0].Operand, Equals, OperandLimit) + c.Assert(p.Children[0].Children, IsNil) + + p = NewPattern(OperandJoin, EngineAll) + p.SetChildren(NewPattern(OperandProjection, EngineAll), NewPattern(OperandSelection, EngineAll)) + c.Assert(len(p.Children), Equals, 2) + c.Assert(p.Children[0].Operand, Equals, OperandProjection) + c.Assert(p.Children[0].Children, IsNil) + c.Assert(p.Children[1].Operand, Equals, OperandSelection) + c.Assert(p.Children[1].Children, IsNil) +} diff --git a/planner/optimize.go b/planner/optimize.go new file mode 100644 index 0000000..d9908df --- /dev/null +++ b/planner/optimize.go @@ -0,0 +1,60 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package planner + +import ( + "context" + + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/planner/cascades" + plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" +) + +// Optimize does optimization and creates a Plan. +// The node must be prepared first. +func Optimize(ctx context.Context, sctx sessionctx.Context, node ast.Node, is infoschema.InfoSchema) (plannercore.Plan, types.NameSlice, error) { + sctx.PrepareTxnFuture(ctx) + + // build logical plan + sctx.GetSessionVars().PlanID = 0 + sctx.GetSessionVars().PlanColumnID = 0 + builder := plannercore.NewPlanBuilder(sctx, is) + p, err := builder.Build(ctx, node) + if err != nil { + return nil, nil, err + } + + names := p.OutputNames() + + // Handle the non-logical plan statement. + logic, isLogicalPlan := p.(plannercore.LogicalPlan) + if !isLogicalPlan { + return p, names, nil + } + + // Handle the logical plan statement, use cascades planner if enabled. + if sctx.GetSessionVars().EnableCascadesPlanner { + finalPlan, err := cascades.DefaultOptimizer.FindBestPlan(sctx, logic) + return finalPlan, names, err + } + finalPlan, err := plannercore.DoOptimize(ctx, builder.GetOptFlag(), logic) + return finalPlan, names, err +} + +func init() { + plannercore.OptimizeAstNode = Optimize +} diff --git a/planner/property/logical_property.go b/planner/property/logical_property.go new file mode 100644 index 0000000..a770e83 --- /dev/null +++ b/planner/property/logical_property.go @@ -0,0 +1,26 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package property + +import ( + "github.com/pingcap/tidb/expression" +) + +// LogicalProperty stands for logical properties such as schema of expression, +// or statistics of columns in schema for output of Group. +// All group expressions in a group share same logical property. +type LogicalProperty struct { + Stats *StatsInfo + Schema *expression.Schema +} diff --git a/planner/property/physical_property.go b/planner/property/physical_property.go new file mode 100644 index 0000000..38da639 --- /dev/null +++ b/planner/property/physical_property.go @@ -0,0 +1,158 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package property + +import ( + "fmt" + + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/util/codec" +) + +// Item wraps the column and its order. +type Item struct { + Col *expression.Column + Desc bool +} + +// PhysicalProperty stands for the required physical property by parents. +// It contains the orders and the task types. +type PhysicalProperty struct { + Items []Item + + // TaskTp means the type of task that an operator requires. + // + // It needs to be specified because two different tasks can't be compared + // with cost directly. e.g. If a copTask takes less cost than a rootTask, + // we can't sure that we must choose the former one. Because the copTask + // must be finished and increase its cost in sometime, but we can't make + // sure the finishing time. So the best way to let the comparison fair is + // to add TaskType to required property. + TaskTp TaskType + + // ExpectedCnt means this operator may be closed after fetching ExpectedCnt + // records. + ExpectedCnt float64 + + // hashcode stores the hash code of a PhysicalProperty, will be lazily + // calculated when function "HashCode()" being called. + hashcode []byte + + // whether need to enforce property. + Enforced bool +} + +// NewPhysicalProperty builds property from columns. +func NewPhysicalProperty(taskTp TaskType, cols []*expression.Column, desc bool, expectCnt float64, enforced bool) *PhysicalProperty { + return &PhysicalProperty{ + Items: ItemsFromCols(cols, desc), + TaskTp: taskTp, + ExpectedCnt: expectCnt, + Enforced: enforced, + } +} + +// ItemsFromCols builds property items from columns. +func ItemsFromCols(cols []*expression.Column, desc bool) []Item { + items := make([]Item, 0, len(cols)) + for _, col := range cols { + items = append(items, Item{Col: col, Desc: desc}) + } + return items +} + +// AllColsFromSchema checks whether all the columns needed by this physical +// property can be found in the given schema. +func (p *PhysicalProperty) AllColsFromSchema(schema *expression.Schema) bool { + for _, col := range p.Items { + if schema.ColumnIndex(col.Col) == -1 { + return false + } + } + return true +} + +// IsPrefix checks whether the order property is the prefix of another. +func (p *PhysicalProperty) IsPrefix(prop *PhysicalProperty) bool { + if len(p.Items) > len(prop.Items) { + return false + } + for i := range p.Items { + if !p.Items[i].Col.Equal(nil, prop.Items[i].Col) || p.Items[i].Desc != prop.Items[i].Desc { + return false + } + } + return true +} + +// IsEmpty checks whether the order property is empty. +func (p *PhysicalProperty) IsEmpty() bool { + return len(p.Items) == 0 +} + +// HashCode calculates hash code for a PhysicalProperty object. +func (p *PhysicalProperty) HashCode() []byte { + if p.hashcode != nil { + return p.hashcode + } + hashcodeSize := 8 + 8 + 8 + (16+8)*len(p.Items) + 8 + p.hashcode = make([]byte, 0, hashcodeSize) + if p.Enforced { + p.hashcode = codec.EncodeInt(p.hashcode, 1) + } else { + p.hashcode = codec.EncodeInt(p.hashcode, 0) + } + p.hashcode = codec.EncodeInt(p.hashcode, int64(p.TaskTp)) + p.hashcode = codec.EncodeFloat(p.hashcode, p.ExpectedCnt) + for _, item := range p.Items { + p.hashcode = append(p.hashcode, item.Col.HashCode(nil)...) + if item.Desc { + p.hashcode = codec.EncodeInt(p.hashcode, 1) + } else { + p.hashcode = codec.EncodeInt(p.hashcode, 0) + } + } + return p.hashcode +} + +// String implements fmt.Stringer interface. Just for test. +func (p *PhysicalProperty) String() string { + return fmt.Sprintf("Prop{cols: %v, TaskTp: %s, expectedCount: %v}", p.Items, p.TaskTp, p.ExpectedCnt) +} + +// Clone returns a copy of PhysicalProperty. Currently, this function is only used to build new +// required property for children plan in `exhaustPhysicalPlans`, so we don't copy `Enforced` field +// because if `Enforced` is true, the `Items` must be empty now, this makes `Enforced` meaningless +// for children nodes. +func (p *PhysicalProperty) Clone() *PhysicalProperty { + prop := &PhysicalProperty{ + Items: p.Items, + TaskTp: p.TaskTp, + ExpectedCnt: p.ExpectedCnt, + } + return prop +} + +// AllSameOrder checks if all the items have same order. +func (p *PhysicalProperty) AllSameOrder() (bool, bool) { + if len(p.Items) == 0 { + return true, false + } + for i := 1; i < len(p.Items); i++ { + if p.Items[i].Desc != p.Items[i-1].Desc { + return false, false + } + } + return true, p.Items[0].Desc +} diff --git a/planner/property/stats_info.go b/planner/property/stats_info.go new file mode 100644 index 0000000..372e3c0 --- /dev/null +++ b/planner/property/stats_info.go @@ -0,0 +1,68 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package property + +import ( + "fmt" + + "github.com/pingcap/tidb/statistics" +) + +// StatsInfo stores the basic information of statistics for the plan's output. It is used for cost estimation. +type StatsInfo struct { + RowCount float64 + Cardinality []float64 + + HistColl *statistics.HistColl + // StatsVersion indicates the statistics version of a table. + // If the StatsInfo is calculated using the pseudo statistics on a table, StatsVersion will be PseudoVersion. + StatsVersion uint64 +} + +// String implements fmt.Stringer interface. +func (s *StatsInfo) String() string { + return fmt.Sprintf("count %v, Cardinality %v", s.RowCount, s.Cardinality) +} + +// Count gets the RowCount in the StatsInfo. +func (s *StatsInfo) Count() int64 { + return int64(s.RowCount) +} + +// Scale receives a selectivity and multiplies it with RowCount and Cardinality. +func (s *StatsInfo) Scale(factor float64) *StatsInfo { + profile := &StatsInfo{ + RowCount: s.RowCount * factor, + Cardinality: make([]float64, len(s.Cardinality)), + HistColl: s.HistColl, + StatsVersion: s.StatsVersion, + } + for i := range profile.Cardinality { + profile.Cardinality[i] = s.Cardinality[i] * factor + } + return profile +} + +// ScaleByExpectCnt tries to Scale StatsInfo to an expectCnt which must be +// smaller than the derived cnt. +// TODO: try to use a better way to do this. +func (s *StatsInfo) ScaleByExpectCnt(expectCnt float64) *StatsInfo { + if expectCnt >= s.RowCount { + return s + } + if s.RowCount > 1.0 { // if s.RowCount is too small, it will cause overflow + return s.Scale(expectCnt / s.RowCount) + } + return s +} diff --git a/planner/property/task_type.go b/planner/property/task_type.go new file mode 100644 index 0000000..93360fc --- /dev/null +++ b/planner/property/task_type.go @@ -0,0 +1,43 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package property + +// TaskType is the type of execution task. +type TaskType int + +const ( + // RootTaskType stands for the tasks that executed in the TiDB layer. + RootTaskType TaskType = iota + + // CopSingleReadTaskType stands for the a TableScan or IndexScan tasks + // executed in the coprocessor layer. + CopSingleReadTaskType + + // CopDoubleReadTaskType stands for the a IndexLookup tasks executed in the + // coprocessor layer. + CopDoubleReadTaskType +) + +// String implements fmt.Stringer interface. +func (t TaskType) String() string { + switch t { + case RootTaskType: + return "rootTask" + case CopSingleReadTaskType: + return "copSingleReadTask" + case CopDoubleReadTaskType: + return "copDoubleReadTask" + } + return "UnknownTaskType" +} diff --git a/planner/util/path.go b/planner/util/path.go new file mode 100644 index 0000000..46520d4 --- /dev/null +++ b/planner/util/path.go @@ -0,0 +1,102 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/ranger" +) + +// AccessPath indicates the way we access a table: by using single index, or by using multiple indexes, +// or just by using table scan. +type AccessPath struct { + Index *model.IndexInfo + FullIdxCols []*expression.Column + FullIdxColLens []int + IdxCols []*expression.Column + IdxColLens []int + Ranges []*ranger.Range + // CountAfterAccess is the row count after we apply range seek and before we use other filter to filter data. + CountAfterAccess float64 + // CountAfterIndex is the row count after we apply filters on index and before we apply the table filters. + CountAfterIndex float64 + AccessConds []expression.Expression + EqCondCount int + EqOrInCondCount int + IndexFilters []expression.Expression + TableFilters []expression.Expression + // IsTablePath indicates whether this path is table path. + IsTablePath bool + // Forced means this path is generated by `use/force index()`. + Forced bool + + IsDNFCond bool +} + +// SplitAccessCondFromFilters move the necessary filter in the form of index_col = constant to access conditions. +func (path *AccessPath) SplitAccessCondFromFilters(eqOrInCount int) (access, remained []expression.Expression) { + access = make([]expression.Expression, len(path.IdxCols)-eqOrInCount) + used := make([]bool, len(path.TableFilters)) + for i := eqOrInCount; i < len(path.IdxCols); i++ { + matched := false + for j, filter := range path.TableFilters { + if used[j] || !isColEqConstant(filter, path.IdxCols[i]) { + continue + } + matched = true + access[i-eqOrInCount] = filter + if path.IdxColLens[i] == types.UnspecifiedLength { + used[j] = true + } + break + } + if !matched { + access = access[:i-eqOrInCount] + break + } + } + for i, ok := range used { + if !ok { + remained = append(remained, path.TableFilters[i]) + } + } + return access, remained +} + +// isColEqConstant checks if the expression is a eq function that one side is constant or correlated column +// and another is column. +func isColEqConstant(filter expression.Expression, col *expression.Column) bool { + f, ok := filter.(*expression.ScalarFunction) + if !ok || f.FuncName.L != ast.EQ { + return false + } + if c, ok := f.GetArgs()[0].(*expression.Column); ok { + if _, ok := f.GetArgs()[1].(*expression.Constant); ok { + if col.Equal(nil, c) { + return true + } + } + } + if c, ok := f.GetArgs()[1].(*expression.Column); ok { + if _, ok := f.GetArgs()[0].(*expression.Constant); ok { + if col.Equal(nil, c) { + return true + } + } + } + return false +} diff --git a/server/buffered_read_conn.go b/server/buffered_read_conn.go new file mode 100644 index 0000000..3eced76 --- /dev/null +++ b/server/buffered_read_conn.go @@ -0,0 +1,38 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "bufio" + "net" +) + +const defaultReaderSize = 16 * 1024 + +// bufferedReadConn is a net.Conn compatible structure that reads from bufio.Reader. +type bufferedReadConn struct { + net.Conn + rb *bufio.Reader +} + +func (conn bufferedReadConn) Read(b []byte) (n int, err error) { + return conn.rb.Read(b) +} + +func newBufferedReadConn(conn net.Conn) *bufferedReadConn { + return &bufferedReadConn{ + Conn: conn, + rb: bufio.NewReaderSize(conn, defaultReaderSize), + } +} diff --git a/server/column.go b/server/column.go new file mode 100644 index 0000000..bb96212 --- /dev/null +++ b/server/column.go @@ -0,0 +1,83 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "github.com/pingcap/tidb/parser/mysql" +) + +// ColumnInfo contains information of a column +type ColumnInfo struct { + Schema string + Table string + OrgTable string + Name string + OrgName string + ColumnLength uint32 + Charset uint16 + Flag uint16 + Decimal uint8 + Type uint8 + DefaultValueLength uint64 + DefaultValue []byte +} + +// Dump dumps ColumnInfo to bytes. +func (column *ColumnInfo) Dump(buffer []byte) []byte { + buffer = dumpLengthEncodedString(buffer, []byte("def")) + buffer = dumpLengthEncodedString(buffer, []byte(column.Schema)) + buffer = dumpLengthEncodedString(buffer, []byte(column.Table)) + buffer = dumpLengthEncodedString(buffer, []byte(column.OrgTable)) + buffer = dumpLengthEncodedString(buffer, []byte(column.Name)) + buffer = dumpLengthEncodedString(buffer, []byte(column.OrgName)) + + buffer = append(buffer, 0x0c) + + buffer = dumpUint16(buffer, column.Charset) + buffer = dumpUint32(buffer, column.ColumnLength) + buffer = append(buffer, dumpType(column.Type)) + buffer = dumpUint16(buffer, dumpFlag(column.Type, column.Flag)) + buffer = append(buffer, column.Decimal) + buffer = append(buffer, 0, 0) + + if column.DefaultValue != nil { + buffer = dumpUint64(buffer, uint64(len(column.DefaultValue))) + buffer = append(buffer, column.DefaultValue...) + } + + return buffer +} + +func dumpFlag(tp byte, flag uint16) uint16 { + switch tp { + case mysql.TypeSet: + return flag | uint16(mysql.SetFlag) + case mysql.TypeEnum: + return flag | uint16(mysql.EnumFlag) + default: + if mysql.HasBinaryFlag(uint(flag)) { + return flag | uint16(mysql.NotNullFlag) + } + return flag + } +} + +func dumpType(tp byte) byte { + switch tp { + case mysql.TypeSet, mysql.TypeEnum: + return mysql.TypeString + default: + return tp + } +} diff --git a/server/column_test.go b/server/column_test.go new file mode 100644 index 0000000..ebd72a9 --- /dev/null +++ b/server/column_test.go @@ -0,0 +1,52 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/mysql" +) + +type ColumnTestSuite struct { +} + +var _ = Suite(new(ColumnTestSuite)) + +func (s ColumnTestSuite) TestDumpColumn(c *C) { + info := ColumnInfo{ + Schema: "testSchema", + Table: "testTable", + OrgTable: "testOrgTable", + Name: "testName", + OrgName: "testOrgName", + ColumnLength: 1, + Charset: 106, + Flag: 0, + Decimal: 1, + Type: 14, + DefaultValueLength: 2, + DefaultValue: []byte{5, 2}, + } + r := info.Dump(nil) + exp := []byte{0x3, 0x64, 0x65, 0x66, 0xa, 0x74, 0x65, 0x73, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x9, 0x74, 0x65, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0xc, 0x74, 0x65, 0x73, 0x74, 0x4f, 0x72, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x8, 0x74, 0x65, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0xb, 0x74, 0x65, 0x73, 0x74, 0x4f, 0x72, 0x67, 0x4e, 0x61, 0x6d, 0x65, 0xc, 0x6a, 0x0, 0x1, 0x0, 0x0, 0x0, 0xe, 0x0, 0x0, 0x1, 0x0, 0x0, 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5, 0x2} + c.Assert(r, DeepEquals, exp) + + c.Assert(dumpFlag(mysql.TypeSet, 0), Equals, uint16(mysql.SetFlag)) + c.Assert(dumpFlag(mysql.TypeEnum, 0), Equals, uint16(mysql.EnumFlag)) + c.Assert(dumpFlag(mysql.TypeString, 0), Equals, uint16(0)) + + c.Assert(dumpType(mysql.TypeSet), Equals, mysql.TypeString) + c.Assert(dumpType(mysql.TypeEnum), Equals, mysql.TypeString) + c.Assert(dumpType(mysql.TypeBit), Equals, mysql.TypeBit) +} diff --git a/server/conn.go b/server/conn.go new file mode 100644 index 0000000..8f6c0a5 --- /dev/null +++ b/server/conn.go @@ -0,0 +1,1108 @@ +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// The MIT License (MIT) +// +// Copyright (c) 2014 wandoulabs +// Copyright (c) 2014 siddontang +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. + +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/binary" + "fmt" + "io" + "net" + "runtime" + "strconv" + "strings" + "sync/atomic" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/executor" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/util/arena" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/hack" + "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/sqlexec" + "go.uber.org/zap" +) + +const ( + connStatusDispatching int32 = iota + connStatusReading + connStatusShutdown // Closed by server. + connStatusWaitShutdown // Notified by server to close. +) + +// newClientConn creates a *clientConn object. +func newClientConn(s *Server) *clientConn { + return &clientConn{ + server: s, + connectionID: atomic.AddUint32(&baseConnID, 1), + collation: mysql.DefaultCollationID, + alloc: arena.NewAllocator(32 * 1024), + status: connStatusDispatching, + } +} + +// clientConn represents a connection between server and client, it maintains connection specific state, +// handles client query. +type clientConn struct { + pkt *packetIO // a helper to read and write data in packet format. + bufReadConn *bufferedReadConn // a buffered-read net.Conn or buffered-read tls.Conn. + tlsConn *tls.Conn // TLS connection, nil if not TLS. + server *Server // a reference of server instance. + capability uint32 // client capability affects the way server handles client request. + connectionID uint32 // atomically allocated by a global variable, unique in process scope. + user string // user of the client. + dbname string // default database name. + salt []byte // random bytes used for authentication. + alloc arena.Allocator // an memory allocator for reducing memory allocation. + lastPacket []byte // latest sql query string, currently used for logging error. + ctx QueryCtx // an interface to execute sql statements. + attrs map[string]string // attributes parsed from client handshake response, not used for now. + peerHost string // peer host + peerPort string // peer port + status int32 // dispatching/reading/shutdown/waitshutdown + lastCode uint16 // last error code + collation uint8 // collation used by client, may be different from the collation used by database. +} + +func (cc *clientConn) String() string { + collationStr := mysql.Collations[cc.collation] + return fmt.Sprintf("id:%d, addr:%s status:%b, collation:%s, user:%s", + cc.connectionID, cc.bufReadConn.RemoteAddr(), cc.ctx.Status(), collationStr, cc.user, + ) +} + +// handshake works like TCP handshake, but in a higher level, it first writes initial packet to client, +// during handshake, client and server negotiate compatible features and do authentication. +// After handshake, client can send sql query to server. +func (cc *clientConn) handshake(ctx context.Context) error { + if err := cc.writeInitialHandshake(); err != nil { + return err + } + if err := cc.readOptionalSSLRequestAndHandshakeResponse(ctx); err != nil { + err1 := cc.writeError(err) + if err1 != nil { + logutil.Logger(ctx).Debug("writeError failed", zap.Error(err1)) + } + return err + } + data := cc.alloc.AllocWithLen(4, 32) + data = append(data, mysql.OKHeader) + data = append(data, 0, 0) + if cc.capability&mysql.ClientProtocol41 > 0 { + data = dumpUint16(data, mysql.ServerStatusAutocommit) + data = append(data, 0, 0) + } + + err := cc.writePacket(data) + cc.pkt.sequence = 0 + if err != nil { + return err + } + + return cc.flush() +} + +func (cc *clientConn) Close() error { + cc.server.rwlock.Lock() + delete(cc.server.clients, cc.connectionID) + connections := len(cc.server.clients) + cc.server.rwlock.Unlock() + return closeConn(cc, connections) +} + +func closeConn(cc *clientConn, connections int) error { + + err := cc.bufReadConn.Close() + terror.Log(err) + if cc.ctx != nil { + return cc.ctx.Close() + } + return nil +} + +func (cc *clientConn) closeWithoutLock() error { + delete(cc.server.clients, cc.connectionID) + return closeConn(cc, len(cc.server.clients)) +} + +// writeInitialHandshake sends server version, connection ID, server capability, collation, server status +// and auth salt to the client. +func (cc *clientConn) writeInitialHandshake() error { + data := make([]byte, 4, 128) + + // min version 10 + data = append(data, 10) + // server version[00] + data = append(data, mysql.ServerVersion...) + data = append(data, 0) + // connection id + data = append(data, byte(cc.connectionID), byte(cc.connectionID>>8), byte(cc.connectionID>>16), byte(cc.connectionID>>24)) + // auth-plugin-data-part-1 + data = append(data, cc.salt[0:8]...) + // filler [00] + data = append(data, 0) + // capability flag lower 2 bytes, using default capability here + data = append(data, byte(cc.server.capability), byte(cc.server.capability>>8)) + // charset + if cc.collation == 0 { + cc.collation = uint8(mysql.DefaultCollationID) + } + data = append(data, cc.collation) + // status + data = dumpUint16(data, mysql.ServerStatusAutocommit) + // below 13 byte may not be used + // capability flag upper 2 bytes, using default capability here + data = append(data, byte(cc.server.capability>>16), byte(cc.server.capability>>24)) + // length of auth-plugin-data + data = append(data, byte(len(cc.salt)+1)) + // reserved 10 [00] + data = append(data, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) + // auth-plugin-data-part-2 + data = append(data, cc.salt[8:]...) + data = append(data, 0) + // auth-plugin name + data = append(data, []byte("mysql_native_password")...) + data = append(data, 0) + err := cc.writePacket(data) + if err != nil { + return err + } + return cc.flush() +} + +func (cc *clientConn) readPacket() ([]byte, error) { + return cc.pkt.readPacket() +} + +func (cc *clientConn) writePacket(data []byte) error { + failpoint.Inject("FakeClientConn", func() { + if cc.pkt == nil { + failpoint.Return(nil) + } + }) + return cc.pkt.writePacket(data) +} + +// getSessionVarsWaitTimeout get session variable wait_timeout +func (cc *clientConn) getSessionVarsWaitTimeout(ctx context.Context) uint64 { + valStr, exists := cc.ctx.GetSessionVars().GetSystemVar(variable.WaitTimeout) + if !exists { + return variable.DefWaitTimeout + } + waitTimeout, err := strconv.ParseUint(valStr, 10, 64) + if err != nil { + logutil.Logger(ctx).Warn("get sysval wait_timeout failed, use default value", zap.Error(err)) + // if get waitTimeout error, use default value + return variable.DefWaitTimeout + } + return waitTimeout +} + +type handshakeResponse41 struct { + Capability uint32 + Collation uint8 + User string + DBName string + Auth []byte + Attrs map[string]string +} + +// parseOldHandshakeResponseHeader parses the old version handshake header HandshakeResponse320 +func parseOldHandshakeResponseHeader(ctx context.Context, packet *handshakeResponse41, data []byte) (parsedBytes int, err error) { + // Ensure there are enough data to read: + // https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse320 + logutil.Logger(ctx).Debug("try to parse hanshake response as Protocol::HandshakeResponse320", zap.ByteString("packetData", data)) + if len(data) < 2+3 { + logutil.Logger(ctx).Error("got malformed handshake response", zap.ByteString("packetData", data)) + return 0, mysql.ErrMalformPacket + } + offset := 0 + // capability + capability := binary.LittleEndian.Uint16(data[:2]) + packet.Capability = uint32(capability) + + // be compatible with Protocol::HandshakeResponse41 + packet.Capability = packet.Capability | mysql.ClientProtocol41 + + offset += 2 + // skip max packet size + offset += 3 + // usa default CharsetID + packet.Collation = mysql.CollationNames["utf8mb4_general_ci"] + + return offset, nil +} + +// parseOldHandshakeResponseBody parse the HandshakeResponse for Protocol::HandshakeResponse320 (except the common header part). +func parseOldHandshakeResponseBody(ctx context.Context, packet *handshakeResponse41, data []byte, offset int) (err error) { + defer func() { + // Check malformat packet cause out of range is disgusting, but don't panic! + if r := recover(); r != nil { + logutil.Logger(ctx).Error("handshake panic", zap.ByteString("packetData", data)) + err = mysql.ErrMalformPacket + } + }() + // user name + packet.User = string(data[offset : offset+bytes.IndexByte(data[offset:], 0)]) + offset += len(packet.User) + 1 + + if packet.Capability&mysql.ClientConnectWithDB > 0 { + if len(data[offset:]) > 0 { + idx := bytes.IndexByte(data[offset:], 0) + packet.DBName = string(data[offset : offset+idx]) + offset = offset + idx + 1 + } + if len(data[offset:]) > 0 { + packet.Auth = data[offset : offset+bytes.IndexByte(data[offset:], 0)] + } + } else { + packet.Auth = data[offset : offset+bytes.IndexByte(data[offset:], 0)] + } + + return nil +} + +// parseHandshakeResponseHeader parses the common header of SSLRequest and HandshakeResponse41. +func parseHandshakeResponseHeader(ctx context.Context, packet *handshakeResponse41, data []byte) (parsedBytes int, err error) { + // Ensure there are enough data to read: + // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::SSLRequest + if len(data) < 4+4+1+23 { + logutil.Logger(ctx).Error("got malformed handshake response", zap.ByteString("packetData", data)) + return 0, mysql.ErrMalformPacket + } + + offset := 0 + // capability + capability := binary.LittleEndian.Uint32(data[:4]) + packet.Capability = capability + offset += 4 + // skip max packet size + offset += 4 + // charset, skip, if you want to use another charset, use set names + packet.Collation = data[offset] + offset++ + // skip reserved 23[00] + offset += 23 + + return offset, nil +} + +// parseHandshakeResponseBody parse the HandshakeResponse (except the common header part). +func parseHandshakeResponseBody(ctx context.Context, packet *handshakeResponse41, data []byte, offset int) (err error) { + defer func() { + // Check malformat packet cause out of range is disgusting, but don't panic! + if r := recover(); r != nil { + logutil.Logger(ctx).Error("handshake panic", zap.ByteString("packetData", data)) + err = mysql.ErrMalformPacket + } + }() + // user name + packet.User = string(data[offset : offset+bytes.IndexByte(data[offset:], 0)]) + offset += len(packet.User) + 1 + + if packet.Capability&mysql.ClientPluginAuthLenencClientData > 0 { + // MySQL client sets the wrong capability, it will set this bit even server doesn't + // support ClientPluginAuthLenencClientData. + // https://github.com/mysql/mysql-server/blob/5.7/sql-common/client.c#L3478 + num, null, off := parseLengthEncodedInt(data[offset:]) + offset += off + if !null { + packet.Auth = data[offset : offset+int(num)] + offset += int(num) + } + } else if packet.Capability&mysql.ClientSecureConnection > 0 { + // auth length and auth + authLen := int(data[offset]) + offset++ + packet.Auth = data[offset : offset+authLen] + offset += authLen + } else { + packet.Auth = data[offset : offset+bytes.IndexByte(data[offset:], 0)] + offset += len(packet.Auth) + 1 + } + + if packet.Capability&mysql.ClientConnectWithDB > 0 { + if len(data[offset:]) > 0 { + idx := bytes.IndexByte(data[offset:], 0) + packet.DBName = string(data[offset : offset+idx]) + offset = offset + idx + 1 + } + } + + if packet.Capability&mysql.ClientPluginAuth > 0 { + // TODO: Support mysql.ClientPluginAuth, skip it now + idx := bytes.IndexByte(data[offset:], 0) + offset = offset + idx + 1 + } + + if packet.Capability&mysql.ClientConnectAtts > 0 { + if len(data[offset:]) == 0 { + // Defend some ill-formated packet, connection attribute is not important and can be ignored. + return nil + } + if num, null, off := parseLengthEncodedInt(data[offset:]); !null { + offset += off + row := data[offset : offset+int(num)] + attrs, err := parseAttrs(row) + if err != nil { + logutil.Logger(ctx).Warn("parse attrs failed", zap.Error(err)) + return nil + } + packet.Attrs = attrs + } + } + + return nil +} + +func parseAttrs(data []byte) (map[string]string, error) { + attrs := make(map[string]string) + pos := 0 + for pos < len(data) { + key, _, off, err := parseLengthEncodedBytes(data[pos:]) + if err != nil { + return attrs, err + } + pos += off + value, _, off, err := parseLengthEncodedBytes(data[pos:]) + if err != nil { + return attrs, err + } + pos += off + + attrs[string(key)] = string(value) + } + return attrs, nil +} + +func (cc *clientConn) readOptionalSSLRequestAndHandshakeResponse(ctx context.Context) error { + // Read a packet. It may be a SSLRequest or HandshakeResponse. + data, err := cc.readPacket() + if err != nil { + return err + } + + isOldVersion := false + + var resp handshakeResponse41 + var pos int + + if len(data) < 2 { + logutil.Logger(ctx).Error("got malformed handshake response", zap.ByteString("packetData", data)) + return mysql.ErrMalformPacket + } + + capability := uint32(binary.LittleEndian.Uint16(data[:2])) + if capability&mysql.ClientProtocol41 > 0 { + pos, err = parseHandshakeResponseHeader(ctx, &resp, data) + } else { + pos, err = parseOldHandshakeResponseHeader(ctx, &resp, data) + isOldVersion = true + } + + if err != nil { + return err + } + + if (resp.Capability&mysql.ClientSSL > 0) && cc.server.tlsConfig != nil { + // The packet is a SSLRequest, let's switch to TLS. + if err = cc.upgradeToTLS(cc.server.tlsConfig); err != nil { + return err + } + // Read the following HandshakeResponse packet. + data, err = cc.readPacket() + if err != nil { + return err + } + if isOldVersion { + pos, err = parseOldHandshakeResponseHeader(ctx, &resp, data) + } else { + pos, err = parseHandshakeResponseHeader(ctx, &resp, data) + } + if err != nil { + return err + } + } + + // Read the remaining part of the packet. + if isOldVersion { + err = parseOldHandshakeResponseBody(ctx, &resp, data, pos) + } else { + err = parseHandshakeResponseBody(ctx, &resp, data, pos) + } + if err != nil { + return err + } + + cc.capability = resp.Capability & cc.server.capability + cc.user = resp.User + cc.dbname = resp.DBName + cc.collation = resp.Collation + cc.attrs = resp.Attrs + + err = cc.openSessionAndDoAuth() + return err +} + +func (cc *clientConn) SessionStatusToString() string { + status := cc.ctx.Status() + inTxn, autoCommit := 0, 0 + if status&mysql.ServerStatusInTrans > 0 { + inTxn = 1 + } + if status&mysql.ServerStatusAutocommit > 0 { + autoCommit = 1 + } + return fmt.Sprintf("inTxn:%d, autocommit:%d", + inTxn, autoCommit, + ) +} + +func (cc *clientConn) openSessionAndDoAuth() error { + var tlsStatePtr *tls.ConnectionState + if cc.tlsConn != nil { + tlsState := cc.tlsConn.ConnectionState() + tlsStatePtr = &tlsState + } + var err error + cc.ctx, err = cc.server.driver.OpenCtx(uint64(cc.connectionID), cc.capability, cc.collation, cc.dbname, tlsStatePtr) + if err != nil { + return err + } + if cc.dbname != "" { + err = cc.useDB(context.Background(), cc.dbname) + if err != nil { + return err + } + } + return nil +} + +func (cc *clientConn) PeerHost(hasPassword string) (host string, err error) { + if len(cc.peerHost) > 0 { + return cc.peerHost, nil + } + host = variable.DefHostname + addr := cc.bufReadConn.RemoteAddr().String() + var port string + host, port, err = net.SplitHostPort(addr) + if err != nil { + err = errAccessDenied.GenWithStackByArgs(cc.user, addr, hasPassword) + return + } + cc.peerHost = host + cc.peerPort = port + return +} + +// Run reads client query and writes query result to client in for loop, if there is a panic during query handling, +// it will be recovered and log the panic error. +// This function returns and the connection is closed if there is an IO error or there is a panic. +func (cc *clientConn) Run(ctx context.Context) { + const size = 4096 + defer func() { + r := recover() + if r != nil { + buf := make([]byte, size) + stackSize := runtime.Stack(buf, false) + buf = buf[:stackSize] + logutil.Logger(ctx).Error("connection running loop panic", + zap.Stringer("lastSQL", getLastStmtInConn{cc}), + zap.String("err", fmt.Sprintf("%v", r)), + zap.String("stack", string(buf)), + ) + + } + if atomic.LoadInt32(&cc.status) != connStatusShutdown { + err := cc.Close() + terror.Log(err) + } + }() + // Usually, client connection status changes between [dispatching] <=> [reading]. + // When some event happens, server may notify this client connection by setting + // the status to special values, for example: kill or graceful shutdown. + // The client connection would detect the events when it fails to change status + // by CAS operation, it would then take some actions accordingly. + for { + if !atomic.CompareAndSwapInt32(&cc.status, connStatusDispatching, connStatusReading) { + return + } + + cc.alloc.Reset() + // close connection when idle time is more than wait_timeout + waitTimeout := cc.getSessionVarsWaitTimeout(ctx) + cc.pkt.setReadTimeout(time.Duration(waitTimeout) * time.Second) + start := time.Now() + data, err := cc.readPacket() + if err != nil { + if terror.ErrorNotEqual(err, io.EOF) { + if netErr, isNetErr := errors.Cause(err).(net.Error); isNetErr && netErr.Timeout() { + idleTime := time.Since(start) + logutil.Logger(ctx).Info("read packet timeout, close this connection", + zap.Duration("idle", idleTime), + zap.Uint64("waitTimeout", waitTimeout), + zap.Error(err), + ) + } else { + errStack := errors.ErrorStack(err) + if !strings.Contains(errStack, "use of closed network connection") { + logutil.Logger(ctx).Warn("read packet failed, close this connection", + zap.Error(errors.SuspendStack(err))) + } + } + } + return + } + + if !atomic.CompareAndSwapInt32(&cc.status, connStatusReading, connStatusDispatching) { + return + } + + if err = cc.dispatch(ctx, data); err != nil { + if terror.ErrorEqual(err, io.EOF) { + + return + } else if terror.ErrResultUndetermined.Equal(err) { + logutil.Logger(ctx).Error("result undetermined, close this connection", zap.Error(err)) + return + } else if terror.ErrCritical.Equal(err) { + logutil.Logger(ctx).Error("critical error, stop the server listener", zap.Error(err)) + + select { + case cc.server.stopListenerCh <- struct{}{}: + default: + } + return + } + logutil.Logger(ctx).Warn("command dispatched failed", + zap.String("connInfo", cc.String()), + zap.String("command", mysql.Command2Str[data[0]]), + zap.String("status", cc.SessionStatusToString()), + zap.Stringer("sql", getLastStmtInConn{cc}), + zap.String("err", errStrForLog(err)), + ) + err1 := cc.writeError(err) + terror.Log(err1) + } + + cc.pkt.sequence = 0 + } +} + +// ShutdownOrNotify will Shutdown this client connection, or do its best to notify. +func (cc *clientConn) ShutdownOrNotify() bool { + if (cc.ctx.Status() & mysql.ServerStatusInTrans) > 0 { + return false + } + // If the client connection status is reading, it's safe to shutdown it. + if atomic.CompareAndSwapInt32(&cc.status, connStatusReading, connStatusShutdown) { + return true + } + // If the client connection status is dispatching, we can't shutdown it immediately, + // so set the status to WaitShutdown as a notification, the client will detect it + // and then exit. + atomic.StoreInt32(&cc.status, connStatusWaitShutdown) + return false +} + +func queryStrForLog(query string) string { + const size = 4096 + if len(query) > size { + return query[:size] + fmt.Sprintf("(len: %d)", len(query)) + } + return query +} + +func errStrForLog(err error) string { + if kv.ErrKeyExists.Equal(err) { + // Do not log stack for duplicated entry error. + return err.Error() + } + return errors.ErrorStack(err) +} + +// dispatch handles client request based on command which is the first byte of the data. +// It also gets a token from server which is used to limit the concurrently handling clients. +// The most frequently used command is ComQuery. +func (cc *clientConn) dispatch(ctx context.Context, data []byte) error { + cc.lastPacket = data + cmd := data[0] + data = data[1:] + vars := cc.ctx.GetSessionVars() + atomic.StoreUint32(&vars.Killed, 0) + if cmd < mysql.ComEnd { + cc.ctx.SetCommandValue(cmd) + } + + dataStr := string(hack.String(data)) + + switch cmd { + case mysql.ComSleep: + // TODO: According to mysql document, this command is supposed to be used only internally. + // So it's just a temp fix, not sure if it's done right. + // Investigate this command and write test case later. + return nil + case mysql.ComQuit: + return io.EOF + case mysql.ComQuery: // Most frequently used command. + // For issue 1989 + // Input payload may end with byte '\0', we didn't find related mysql document about it, but mysql + // implementation accept that case. So trim the last '\0' here as if the payload an EOF string. + // See http://dev.mysql.com/doc/internals/en/com-query.html + if len(data) > 0 && data[len(data)-1] == 0 { + data = data[:len(data)-1] + dataStr = string(hack.String(data)) + } + return cc.handleQuery(ctx, dataStr) + case mysql.ComPing: + return cc.writeOK() + case mysql.ComInitDB: + if err := cc.useDB(ctx, dataStr); err != nil { + return err + } + return cc.writeOK() + case mysql.ComFieldList: + return cc.handleFieldList(dataStr) + default: + return mysql.NewErrf(mysql.ErrUnknown, "command %d not supported now", cmd) + } +} + +func (cc *clientConn) useDB(ctx context.Context, db string) (err error) { + // if input is "use `SELECT`", mysql client just send "SELECT" + // so we add `` around db. + _, err = cc.ctx.Execute(ctx, "use `"+db+"`") + if err != nil { + return err + } + cc.dbname = db + return +} + +func (cc *clientConn) flush() error { + failpoint.Inject("FakeClientConn", func() { + if cc.pkt == nil { + failpoint.Return(nil) + } + }) + return cc.pkt.flush() +} + +func (cc *clientConn) writeOK() error { + return cc.writeOkWith("", cc.ctx.AffectedRows(), cc.ctx.LastInsertID(), cc.ctx.Status(), cc.ctx.WarningCount()) +} + +func (cc *clientConn) writeOkWith(msg string, affectedRows, lastInsertID uint64, status, warnCnt uint16) error { + enclen := 0 + if len(msg) > 0 { + enclen = lengthEncodedIntSize(uint64(len(msg))) + len(msg) + } + + data := cc.alloc.AllocWithLen(4, 32+enclen) + data = append(data, mysql.OKHeader) + data = dumpLengthEncodedInt(data, affectedRows) + data = dumpLengthEncodedInt(data, lastInsertID) + if cc.capability&mysql.ClientProtocol41 > 0 { + data = dumpUint16(data, status) + data = dumpUint16(data, warnCnt) + } + if enclen > 0 { + // although MySQL manual says the info message is string(https://dev.mysql.com/doc/internals/en/packet-OK_Packet.html), + // it is actually string + data = dumpLengthEncodedString(data, []byte(msg)) + } + + err := cc.writePacket(data) + if err != nil { + return err + } + + return cc.flush() +} + +func (cc *clientConn) writeError(e error) error { + var ( + m *mysql.SQLError + te *terror.Error + ok bool + ) + originErr := errors.Cause(e) + if te, ok = originErr.(*terror.Error); ok { + m = te.ToSQLError() + } else { + e := errors.Cause(originErr) + switch y := e.(type) { + case *terror.Error: + m = y.ToSQLError() + default: + m = mysql.NewErrf(mysql.ErrUnknown, "%s", e.Error()) + } + } + + cc.lastCode = m.Code + data := cc.alloc.AllocWithLen(4, 16+len(m.Message)) + data = append(data, mysql.ErrHeader) + data = append(data, byte(m.Code), byte(m.Code>>8)) + if cc.capability&mysql.ClientProtocol41 > 0 { + data = append(data, '#') + data = append(data, m.State...) + } + + data = append(data, m.Message...) + + err := cc.writePacket(data) + if err != nil { + return err + } + return cc.flush() +} + +// writeEOF writes an EOF packet. +// Note this function won't flush the stream because maybe there are more +// packets following it. +// serverStatus, a flag bit represents server information +// in the packet. +func (cc *clientConn) writeEOF(serverStatus uint16) error { + data := cc.alloc.AllocWithLen(4, 9) + + data = append(data, mysql.EOFHeader) + if cc.capability&mysql.ClientProtocol41 > 0 { + data = dumpUint16(data, cc.ctx.WarningCount()) + status := cc.ctx.Status() + status |= serverStatus + data = dumpUint16(data, status) + } + + err := cc.writePacket(data) + return err +} + +// handleQuery executes the sql query string and writes result set or result ok to the client. +func (cc *clientConn) handleQuery(ctx context.Context, sql string) (err error) { + rss, err := cc.ctx.Execute(ctx, sql) + if err != nil { + + return err + } + status := atomic.LoadInt32(&cc.status) + if rss != nil && (status == connStatusShutdown || status == connStatusWaitShutdown) { + for _, rs := range rss { + terror.Call(rs.Close) + } + return executor.ErrQueryInterrupted + } + if rss != nil { + if len(rss) == 1 { + err = cc.writeResultset(ctx, rss[0], false, 0, 0) + } else { + err = cc.writeMultiResultset(ctx, rss, false) + } + } else { + err = cc.writeOK() + } + return err +} + +// handleFieldList returns the field list for a table. +// The sql string is composed of a table name and a terminating character \x00. +func (cc *clientConn) handleFieldList(sql string) (err error) { + parts := strings.Split(sql, "\x00") + columns, err := cc.ctx.FieldList(parts[0]) + if err != nil { + return err + } + data := cc.alloc.AllocWithLen(4, 1024) + for _, column := range columns { + // Current we doesn't output defaultValue but reserve defaultValue length byte to make mariadb client happy. + // https://dev.mysql.com/doc/internals/en/com-query-response.html#column-definition + // TODO: fill the right DefaultValues. + column.DefaultValueLength = 0 + column.DefaultValue = []byte{} + + data = data[0:4] + data = column.Dump(data) + if err := cc.writePacket(data); err != nil { + return err + } + } + if err := cc.writeEOF(0); err != nil { + return err + } + return cc.flush() +} + +// writeResultset writes data into a resultset and uses rs.Next to get row data back. +// If binary is true, the data would be encoded in BINARY format. +// serverStatus, a flag bit represents server information. +// fetchSize, the desired number of rows to be fetched each time when client uses cursor. +// resultsets, it's used to support the MULTI_RESULTS capability in mysql protocol. +func (cc *clientConn) writeResultset(ctx context.Context, rs ResultSet, binary bool, serverStatus uint16, fetchSize int) (runErr error) { + defer func() { + // close ResultSet when cursor doesn't exist + if !mysql.HasCursorExistsFlag(serverStatus) { + terror.Call(rs.Close) + } + r := recover() + if r == nil { + return + } + if _, ok := r.(string); !ok { + panic(r) + } + + runErr = errors.Errorf("%v", r) + buf := make([]byte, 4096) + stackSize := runtime.Stack(buf, false) + buf = buf[:stackSize] + logutil.Logger(ctx).Error("write query result panic", zap.Stringer("lastSQL", getLastStmtInConn{cc}), zap.String("stack", string(buf))) + }() + var err error + if mysql.HasCursorExistsFlag(serverStatus) { + err = cc.writeChunksWithFetchSize(ctx, rs, serverStatus, fetchSize) + } else { + err = cc.writeChunks(ctx, rs, binary, serverStatus) + } + if err != nil { + return err + } + + return cc.flush() +} + +func (cc *clientConn) writeColumnInfo(columns []*ColumnInfo, serverStatus uint16) error { + data := cc.alloc.AllocWithLen(4, 1024) + data = dumpLengthEncodedInt(data, uint64(len(columns))) + if err := cc.writePacket(data); err != nil { + return err + } + for _, v := range columns { + data = data[0:4] + data = v.Dump(data) + if err := cc.writePacket(data); err != nil { + return err + } + } + return cc.writeEOF(serverStatus) +} + +// writeChunks writes data from a Chunk, which filled data by a ResultSet, into a connection. +// binary specifies the way to dump data. It throws any error while dumping data. +// serverStatus, a flag bit represents server information +func (cc *clientConn) writeChunks(ctx context.Context, rs ResultSet, binary bool, serverStatus uint16) error { + data := cc.alloc.AllocWithLen(4, 1024) + req := rs.NewChunk() + gotColumnInfo := false + for { + // Here server.tidbResultSet implements Next method. + err := rs.Next(ctx, req) + if err != nil { + return err + } + if !gotColumnInfo { + // We need to call Next before we get columns. + // Otherwise, we will get incorrect columns info. + columns := rs.Columns() + err = cc.writeColumnInfo(columns, serverStatus) + if err != nil { + return err + } + gotColumnInfo = true + } + rowCount := req.NumRows() + if rowCount == 0 { + break + } + for i := 0; i < rowCount; i++ { + data = data[0:4] + if binary { + data, err = dumpBinaryRow(data, rs.Columns(), req.GetRow(i)) + } else { + data, err = dumpTextRow(data, rs.Columns(), req.GetRow(i)) + } + if err != nil { + return err + } + if err = cc.writePacket(data); err != nil { + return err + } + } + } + return cc.writeEOF(serverStatus) +} + +// writeChunksWithFetchSize writes data from a Chunk, which filled data by a ResultSet, into a connection. +// binary specifies the way to dump data. It throws any error while dumping data. +// serverStatus, a flag bit represents server information. +// fetchSize, the desired number of rows to be fetched each time when client uses cursor. +func (cc *clientConn) writeChunksWithFetchSize(ctx context.Context, rs ResultSet, serverStatus uint16, fetchSize int) error { + fetchedRows := rs.GetFetchedRows() + + // if fetchedRows is not enough, getting data from recordSet. + req := rs.NewChunk() + for len(fetchedRows) < fetchSize { + // Here server.tidbResultSet implements Next method. + err := rs.Next(ctx, req) + if err != nil { + return err + } + rowCount := req.NumRows() + if rowCount == 0 { + break + } + // filling fetchedRows with chunk + for i := 0; i < rowCount; i++ { + fetchedRows = append(fetchedRows, req.GetRow(i)) + } + req = chunk.Renew(req, cc.ctx.GetSessionVars().MaxChunkSize) + } + + // tell the client COM_STMT_FETCH has finished by setting proper serverStatus, + // and close ResultSet. + if len(fetchedRows) == 0 { + serverStatus |= mysql.ServerStatusLastRowSend + terror.Call(rs.Close) + return cc.writeEOF(serverStatus) + } + + // construct the rows sent to the client according to fetchSize. + var curRows []chunk.Row + if fetchSize < len(fetchedRows) { + curRows = fetchedRows[:fetchSize] + fetchedRows = fetchedRows[fetchSize:] + } else { + curRows = fetchedRows[:] + fetchedRows = fetchedRows[:0] + } + rs.StoreFetchedRows(fetchedRows) + + data := cc.alloc.AllocWithLen(4, 1024) + var err error + for _, row := range curRows { + data = data[0:4] + data, err = dumpBinaryRow(data, rs.Columns(), row) + if err != nil { + return err + } + if err = cc.writePacket(data); err != nil { + return err + } + } + if cl, ok := rs.(fetchNotifier); ok { + cl.OnFetchReturned() + } + return cc.writeEOF(serverStatus) +} + +func (cc *clientConn) writeMultiResultset(ctx context.Context, rss []ResultSet, binary bool) error { + for i, rs := range rss { + lastRs := i == len(rss)-1 + if r, ok := rs.(*tidbResultSet).recordSet.(sqlexec.MultiQueryNoDelayResult); ok { + status := r.Status() + if !lastRs { + status |= mysql.ServerMoreResultsExists + } + if err := cc.writeOkWith("", r.AffectedRows(), r.LastInsertID(), status, r.WarnCount()); err != nil { + return err + } + continue + } + status := uint16(0) + if !lastRs { + status |= mysql.ServerMoreResultsExists + } + if err := cc.writeResultset(ctx, rs, binary, status, 0); err != nil { + return err + } + } + return nil +} + +func (cc *clientConn) setConn(conn net.Conn) { + cc.bufReadConn = newBufferedReadConn(conn) + if cc.pkt == nil { + cc.pkt = newPacketIO(cc.bufReadConn) + } else { + // Preserve current sequence number. + cc.pkt.setBufferedReadConn(cc.bufReadConn) + } +} + +func (cc *clientConn) upgradeToTLS(tlsConfig *tls.Config) error { + // Important: read from buffered reader instead of the original net.Conn because it may contain data we need. + tlsConn := tls.Server(cc.bufReadConn, tlsConfig) + if err := tlsConn.Handshake(); err != nil { + return err + } + cc.setConn(tlsConn) + cc.tlsConn = tlsConn + return nil +} + +var _ fmt.Stringer = getLastStmtInConn{} + +type getLastStmtInConn struct { + *clientConn +} + +func (cc getLastStmtInConn) String() string { + if len(cc.lastPacket) == 0 { + return "" + } + cmd, data := cc.lastPacket[0], cc.lastPacket[1:] + switch cmd { + case mysql.ComInitDB: + return "Use " + string(data) + case mysql.ComFieldList: + return "ListFields " + string(data) + case mysql.ComQuery, mysql.ComStmtPrepare: + return queryStrForLog(string(hack.String(data))) + case mysql.ComStmtClose, mysql.ComStmtReset: + stmtID := binary.LittleEndian.Uint32(data[0:4]) + return mysql.Command2Str[cmd] + " " + strconv.Itoa(int(stmtID)) + default: + if cmdStr, ok := mysql.Command2Str[cmd]; ok { + return cmdStr + } + return string(hack.String(data)) + } +} diff --git a/server/conn_test.go b/server/conn_test.go new file mode 100644 index 0000000..3133dfa --- /dev/null +++ b/server/conn_test.go @@ -0,0 +1,264 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "bufio" + "bytes" + "context" + "encoding/binary" + . "github.com/pingcap/check" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/executor" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/testleak" +) + +type ConnTestSuite struct { + dom *domain.Domain + store kv.Storage +} + +var _ = Suite(&ConnTestSuite{}) + +func (ts *ConnTestSuite) SetUpSuite(c *C) { + testleak.BeforeTest() + var err error + ts.store, err = mockstore.NewMockTikvStore() + c.Assert(err, IsNil) + ts.dom, err = session.BootstrapSession(ts.store) + c.Assert(err, IsNil) +} + +func (ts *ConnTestSuite) TearDownSuite(c *C) { + ts.dom.Close() + ts.store.Close() + testleak.AfterTest(c)() +} + +func (ts *ConnTestSuite) TestMalformHandshakeHeader(c *C) { + c.Parallel() + data := []byte{0x00} + var p handshakeResponse41 + _, err := parseHandshakeResponseHeader(context.Background(), &p, data) + c.Assert(err, NotNil) +} + +func (ts *ConnTestSuite) TestParseHandshakeResponse(c *C) { + c.Parallel() + // test data from http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse41 + data := []byte{ + 0x85, 0xa2, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x40, 0x08, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x72, 0x6f, 0x6f, 0x74, 0x00, 0x14, 0x22, 0x50, 0x79, 0xa2, 0x12, 0xd4, + 0xe8, 0x82, 0xe5, 0xb3, 0xf4, 0x1a, 0x97, 0x75, 0x6b, 0xc8, 0xbe, 0xdb, 0x9f, 0x80, 0x6d, 0x79, + 0x73, 0x71, 0x6c, 0x5f, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x70, 0x61, 0x73, 0x73, 0x77, + 0x6f, 0x72, 0x64, 0x00, 0x61, 0x03, 0x5f, 0x6f, 0x73, 0x09, 0x64, 0x65, 0x62, 0x69, 0x61, 0x6e, + 0x36, 0x2e, 0x30, 0x0c, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x08, 0x6c, 0x69, 0x62, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x04, 0x5f, 0x70, 0x69, 0x64, 0x05, 0x32, + 0x32, 0x33, 0x34, 0x34, 0x0f, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x08, 0x35, 0x2e, 0x36, 0x2e, 0x36, 0x2d, 0x6d, 0x39, 0x09, 0x5f, 0x70, + 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x06, 0x78, 0x38, 0x36, 0x5f, 0x36, 0x34, 0x03, 0x66, + 0x6f, 0x6f, 0x03, 0x62, 0x61, 0x72, + } + var p handshakeResponse41 + offset, err := parseHandshakeResponseHeader(context.Background(), &p, data) + c.Assert(err, IsNil) + c.Assert(p.Capability&mysql.ClientConnectAtts, Equals, mysql.ClientConnectAtts) + err = parseHandshakeResponseBody(context.Background(), &p, data, offset) + c.Assert(err, IsNil) + eq := mapIdentical(p.Attrs, map[string]string{ + "_client_version": "5.6.6-m9", + "_platform": "x86_64", + "foo": "bar", + "_os": "debian6.0", + "_client_name": "libmysql", + "_pid": "22344"}) + c.Assert(eq, IsTrue) + + data = []byte{ + 0x8d, 0xa6, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x70, 0x61, 0x6d, 0x00, 0x14, 0xab, 0x09, 0xee, 0xf6, 0xbc, 0xb1, 0x32, + 0x3e, 0x61, 0x14, 0x38, 0x65, 0xc0, 0x99, 0x1d, 0x95, 0x7d, 0x75, 0xd4, 0x47, 0x74, 0x65, 0x73, + 0x74, 0x00, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x5f, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x70, + 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x00, + } + p = handshakeResponse41{} + offset, err = parseHandshakeResponseHeader(context.Background(), &p, data) + c.Assert(err, IsNil) + capability := mysql.ClientProtocol41 | + mysql.ClientPluginAuth | + mysql.ClientSecureConnection | + mysql.ClientConnectWithDB + c.Assert(p.Capability&capability, Equals, capability) + err = parseHandshakeResponseBody(context.Background(), &p, data, offset) + c.Assert(err, IsNil) + c.Assert(p.User, Equals, "pam") + c.Assert(p.DBName, Equals, "test") + + // Test for compatibility of Protocol::HandshakeResponse320 + data = []byte{ + 0x00, 0x80, 0x00, 0x00, 0x01, 0x72, 0x6f, 0x6f, 0x74, 0x00, 0x00, + } + p = handshakeResponse41{} + offset, err = parseOldHandshakeResponseHeader(context.Background(), &p, data) + c.Assert(err, IsNil) + capability = mysql.ClientProtocol41 | + mysql.ClientSecureConnection + c.Assert(p.Capability&capability, Equals, capability) + err = parseOldHandshakeResponseBody(context.Background(), &p, data, offset) + c.Assert(err, IsNil) + c.Assert(p.User, Equals, "root") +} + +func (ts *ConnTestSuite) TestIssue1768(c *C) { + c.Parallel() + // this data is from captured handshake packet, using mysql client. + // TiDB should handle authorization correctly, even mysql client set + // the ClientPluginAuthLenencClientData capability. + data := []byte{ + 0x85, 0xa6, 0xff, 0x01, 0x00, 0x00, 0x00, 0x01, 0x21, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x74, 0x65, 0x73, 0x74, 0x00, 0x14, 0xe9, 0x7a, 0x2b, 0xec, 0x4a, 0xa8, + 0xea, 0x67, 0x8a, 0xc2, 0x46, 0x4d, 0x32, 0xa4, 0xda, 0x39, 0x77, 0xe5, 0x61, 0x1a, 0x65, 0x03, + 0x5f, 0x6f, 0x73, 0x05, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x0c, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x08, 0x6c, 0x69, 0x62, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x04, + 0x5f, 0x70, 0x69, 0x64, 0x04, 0x39, 0x30, 0x33, 0x30, 0x0f, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x06, 0x35, 0x2e, 0x37, 0x2e, 0x31, 0x34, + 0x09, 0x5f, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x06, 0x78, 0x38, 0x36, 0x5f, 0x36, + 0x34, 0x0c, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x05, 0x6d, + 0x79, 0x73, 0x71, 0x6c, + } + p := handshakeResponse41{} + offset, err := parseHandshakeResponseHeader(context.Background(), &p, data) + c.Assert(err, IsNil) + c.Assert(p.Capability&mysql.ClientPluginAuthLenencClientData, Equals, mysql.ClientPluginAuthLenencClientData) + err = parseHandshakeResponseBody(context.Background(), &p, data, offset) + c.Assert(err, IsNil) + c.Assert(len(p.Auth) > 0, IsTrue) +} + +func (ts *ConnTestSuite) TestInitialHandshake(c *C) { + c.Parallel() + var outBuffer bytes.Buffer + cc := &clientConn{ + connectionID: 1, + salt: []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14}, + server: &Server{ + capability: defaultCapability, + }, + pkt: &packetIO{ + bufWriter: bufio.NewWriter(&outBuffer), + }, + } + err := cc.writeInitialHandshake() + c.Assert(err, IsNil) + + expected := new(bytes.Buffer) + expected.WriteByte(0x0a) // Protocol + expected.WriteString(mysql.ServerVersion) // Version + expected.WriteByte(0x00) // NULL + binary.Write(expected, binary.LittleEndian, uint32(1)) // Connection ID + expected.Write([]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00}) // Salt + binary.Write(expected, binary.LittleEndian, uint16(defaultCapability&0xFFFF)) // Server Capability + expected.WriteByte(uint8(mysql.DefaultCollationID)) // Server Language + binary.Write(expected, binary.LittleEndian, mysql.ServerStatusAutocommit) // Server Status + binary.Write(expected, binary.LittleEndian, uint16((defaultCapability>>16)&0xFFFF)) // Extended Server Capability + expected.WriteByte(0x15) // Authentication Plugin Length + expected.Write([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}) // Unused + expected.Write([]byte{0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x00}) // Salt + expected.WriteString("mysql_native_password") // Authentication Plugin + expected.WriteByte(0x00) // NULL + c.Assert(outBuffer.Bytes()[4:], DeepEquals, expected.Bytes()) +} + +func mapIdentical(m1, m2 map[string]string) bool { + return mapBelong(m1, m2) && mapBelong(m2, m1) +} + +func mapBelong(m1, m2 map[string]string) bool { + for k1, v1 := range m1 { + v2, ok := m2[k1] + if !ok && v1 != v2 { + return false + } + } + return true +} + +type mockTiDBCtx struct { + TiDBContext + rs []ResultSet + err error +} + +func (c *mockTiDBCtx) Execute(ctx context.Context, sql string) ([]ResultSet, error) { + return c.rs, c.err +} + +func (c *mockTiDBCtx) GetSessionVars() *variable.SessionVars { + return &variable.SessionVars{} +} + +type mockRecordSet struct{} + +func (m mockRecordSet) Fields() []*ast.ResultField { return nil } +func (m mockRecordSet) Next(ctx context.Context, req *chunk.Chunk) error { return nil } +func (m mockRecordSet) NewChunk() *chunk.Chunk { return nil } +func (m mockRecordSet) Close() error { return nil } + +func (ts *ConnTestSuite) TestShutDown(c *C) { + cc := &clientConn{} + + rs := &tidbResultSet{recordSet: mockRecordSet{}} + // mock delay response + cc.ctx = &mockTiDBCtx{rs: []ResultSet{rs}, err: nil} + // set killed flag + cc.status = connStatusShutdown + // assert ErrQueryInterrupted + err := cc.handleQuery(context.Background(), "dummy") + c.Assert(err, Equals, executor.ErrQueryInterrupted) + c.Assert(rs.closed, Equals, int32(1)) +} + +func (ts *ConnTestSuite) TestShutdownOrNotify(c *C) { + c.Parallel() + se, err := session.CreateSession4Test(ts.store) + c.Assert(err, IsNil) + tc := &TiDBContext{ + session: se, + } + cc := &clientConn{ + connectionID: 1, + server: &Server{ + capability: defaultCapability, + }, + status: connStatusWaitShutdown, + ctx: tc, + } + c.Assert(cc.ShutdownOrNotify(), IsFalse) + cc.status = connStatusReading + c.Assert(cc.ShutdownOrNotify(), IsTrue) + c.Assert(cc.status, Equals, connStatusShutdown) + cc.status = connStatusDispatching + c.Assert(cc.ShutdownOrNotify(), IsFalse) + c.Assert(cc.status, Equals, connStatusWaitShutdown) +} diff --git a/server/driver.go b/server/driver.go new file mode 100644 index 0000000..bb0a094 --- /dev/null +++ b/server/driver.go @@ -0,0 +1,129 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "context" + "crypto/tls" + "fmt" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" +) + +// IDriver opens IContext. +type IDriver interface { + // OpenCtx opens an IContext with connection id, client capability, collation, dbname and optionally the tls state. + OpenCtx(connID uint64, capability uint32, collation uint8, dbname string, tlsState *tls.ConnectionState) (QueryCtx, error) +} + +// QueryCtx is the interface to execute command. +type QueryCtx interface { + // Status returns server status code. + Status() uint16 + + // LastInsertID returns last inserted ID. + LastInsertID() uint64 + + // AffectedRows returns affected rows of last executed command. + AffectedRows() uint64 + + // Value returns the value associated with this context for key. + Value(key fmt.Stringer) interface{} + + // SetValue saves a value associated with this context for key. + SetValue(key fmt.Stringer, value interface{}) + + // CommitTxn commits the transaction operations. + CommitTxn(ctx context.Context) error + + // RollbackTxn undoes the transaction operations. + RollbackTxn() + + // WarningCount returns warning count of last executed command. + WarningCount() uint16 + + // CurrentDB returns current DB. + CurrentDB() string + + // Execute executes a SQL statement. + Execute(ctx context.Context, sql string) ([]ResultSet, error) + + // SetClientCapability sets client capability flags + SetClientCapability(uint32) + + // FieldList returns columns of a table. + FieldList(tableName string) (columns []*ColumnInfo, err error) + + // Close closes the QueryCtx. + Close() error + + // GetSessionVars return SessionVars. + GetSessionVars() *variable.SessionVars + + SetCommandValue(command byte) +} + +// PreparedStatement is the interface to use a prepared statement. +type PreparedStatement interface { + // ID returns statement ID + ID() int + + // Execute executes the statement. + Execute(context.Context, []types.Datum) (ResultSet, error) + + // AppendParam appends parameter to the statement. + AppendParam(paramID int, data []byte) error + + // NumParams returns number of parameters. + NumParams() int + + // BoundParams returns bound parameters. + BoundParams() [][]byte + + // SetParamsType sets type for parameters. + SetParamsType([]byte) + + // GetParamsType returns the type for parameters. + GetParamsType() []byte + + // StoreResultSet stores ResultSet for subsequent stmt fetching + StoreResultSet(rs ResultSet) + + // GetResultSet gets ResultSet associated this statement + GetResultSet() ResultSet + + // Reset removes all bound parameters. + Reset() + + // Close closes the statement. + Close() error +} + +// ResultSet is the result set of an query. +type ResultSet interface { + Columns() []*ColumnInfo + NewChunk() *chunk.Chunk + Next(context.Context, *chunk.Chunk) error + StoreFetchedRows(rows []chunk.Row) + GetFetchedRows() []chunk.Row + Close() error +} + +// fetchNotifier represents notifier will be called in COM_FETCH. +type fetchNotifier interface { + // OnFetchReturned be called when COM_FETCH returns. + // it will be used in server-side cursor. + OnFetchReturned() +} diff --git a/server/driver_tidb.go b/server/driver_tidb.go new file mode 100644 index 0000000..85e11b4 --- /dev/null +++ b/server/driver_tidb.go @@ -0,0 +1,287 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "context" + "crypto/tls" + "fmt" + "sync/atomic" + + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/charset" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/sqlexec" +) + +// TiDBDriver implements IDriver. +type TiDBDriver struct { + store kv.Storage +} + +// NewTiDBDriver creates a new TiDBDriver. +func NewTiDBDriver(store kv.Storage) *TiDBDriver { + driver := &TiDBDriver{ + store: store, + } + return driver +} + +// TiDBContext implements QueryCtx. +type TiDBContext struct { + session session.Session + currentDB string +} + +// OpenCtx implements IDriver. +func (qd *TiDBDriver) OpenCtx(connID uint64, capability uint32, collation uint8, dbname string, tlsState *tls.ConnectionState) (QueryCtx, error) { + se, err := session.CreateSession(qd.store) + if err != nil { + return nil, err + } + se.SetTLSState(tlsState) + err = se.SetCollation(int(collation)) + if err != nil { + return nil, err + } + se.SetClientCapability(capability) + se.SetConnectionID(connID) + tc := &TiDBContext{ + session: se, + currentDB: dbname, + } + return tc, nil +} + +// Status implements QueryCtx Status method. +func (tc *TiDBContext) Status() uint16 { + return tc.session.Status() +} + +// LastInsertID implements QueryCtx LastInsertID method. +func (tc *TiDBContext) LastInsertID() uint64 { + return tc.session.LastInsertID() +} + +// Value implements QueryCtx Value method. +func (tc *TiDBContext) Value(key fmt.Stringer) interface{} { + return tc.session.Value(key) +} + +// SetValue implements QueryCtx SetValue method. +func (tc *TiDBContext) SetValue(key fmt.Stringer, value interface{}) { + tc.session.SetValue(key, value) +} + +// CommitTxn implements QueryCtx CommitTxn method. +func (tc *TiDBContext) CommitTxn(ctx context.Context) error { + return tc.session.CommitTxn(ctx) +} + +// RollbackTxn implements QueryCtx RollbackTxn method. +func (tc *TiDBContext) RollbackTxn() { + tc.session.RollbackTxn(context.TODO()) +} + +// AffectedRows implements QueryCtx AffectedRows method. +func (tc *TiDBContext) AffectedRows() uint64 { + return tc.session.AffectedRows() +} + +// CurrentDB implements QueryCtx CurrentDB method. +func (tc *TiDBContext) CurrentDB() string { + return tc.currentDB +} + +// WarningCount implements QueryCtx WarningCount method. +func (tc *TiDBContext) WarningCount() uint16 { + return tc.session.GetSessionVars().StmtCtx.WarningCount() +} + +// Execute implements QueryCtx Execute method. +func (tc *TiDBContext) Execute(ctx context.Context, sql string) (rs []ResultSet, err error) { + rsList, err := tc.session.Execute(ctx, sql) + if err != nil { + return + } + if len(rsList) == 0 { // result ok + return + } + rs = make([]ResultSet, len(rsList)) + for i := 0; i < len(rsList); i++ { + rs[i] = &tidbResultSet{ + recordSet: rsList[i], + } + } + return +} + +// SetClientCapability implements QueryCtx SetClientCapability method. +func (tc *TiDBContext) SetClientCapability(flags uint32) { + tc.session.SetClientCapability(flags) +} + +// Close implements QueryCtx Close method. +func (tc *TiDBContext) Close() error { + tc.session.Close() + return nil +} + +// FieldList implements QueryCtx FieldList method. +func (tc *TiDBContext) FieldList(table string) (columns []*ColumnInfo, err error) { + fields, err := tc.session.FieldList(table) + if err != nil { + return nil, err + } + columns = make([]*ColumnInfo, 0, len(fields)) + for _, f := range fields { + columns = append(columns, convertColumnInfo(f)) + } + return columns, nil +} + +// SetCommandValue implements QueryCtx SetCommandValue method. +func (tc *TiDBContext) SetCommandValue(command byte) { + tc.session.SetCommandValue(command) +} + +// GetSessionVars return SessionVars. +func (tc *TiDBContext) GetSessionVars() *variable.SessionVars { + return tc.session.GetSessionVars() +} + +type tidbResultSet struct { + recordSet sqlexec.RecordSet + columns []*ColumnInfo + rows []chunk.Row + closed int32 +} + +func (trs *tidbResultSet) NewChunk() *chunk.Chunk { + return trs.recordSet.NewChunk() +} + +func (trs *tidbResultSet) Next(ctx context.Context, req *chunk.Chunk) error { + return trs.recordSet.Next(ctx, req) +} + +func (trs *tidbResultSet) StoreFetchedRows(rows []chunk.Row) { + trs.rows = rows +} + +func (trs *tidbResultSet) GetFetchedRows() []chunk.Row { + if trs.rows == nil { + trs.rows = make([]chunk.Row, 0, 1024) + } + return trs.rows +} + +func (trs *tidbResultSet) Close() error { + if !atomic.CompareAndSwapInt32(&trs.closed, 0, 1) { + return nil + } + err := trs.recordSet.Close() + trs.recordSet = nil + return err +} + +// OnFetchReturned implements fetchNotifier#OnFetchReturned +func (trs *tidbResultSet) OnFetchReturned() { + if cl, ok := trs.recordSet.(fetchNotifier); ok { + cl.OnFetchReturned() + } +} + +func (trs *tidbResultSet) Columns() []*ColumnInfo { + if trs.columns != nil { + return trs.columns + } + if trs.columns == nil { + fields := trs.recordSet.Fields() + for _, v := range fields { + trs.columns = append(trs.columns, convertColumnInfo(v)) + } + } + return trs.columns +} + +func convertColumnInfo(fld *ast.ResultField) (ci *ColumnInfo) { + ci = &ColumnInfo{ + Name: fld.ColumnAsName.O, + OrgName: fld.Column.Name.O, + Table: fld.TableAsName.O, + Schema: fld.DBName.O, + Flag: uint16(fld.Column.Flag), + Charset: uint16(mysql.CharsetNameToID(fld.Column.Charset)), + Type: fld.Column.Tp, + } + + if fld.Table != nil { + ci.OrgTable = fld.Table.Name.O + } + if fld.Column.Flen == types.UnspecifiedLength { + ci.ColumnLength = 0 + } else { + ci.ColumnLength = uint32(fld.Column.Flen) + } + if fld.Column.Tp == mysql.TypeNewDecimal { + // Consider the negative sign. + ci.ColumnLength++ + if fld.Column.Decimal > int(types.DefaultFsp) { + // Consider the decimal point. + ci.ColumnLength++ + } + } else if types.IsString(fld.Column.Tp) { + // Fix issue #4540. + // The flen is a hint, not a precise value, so most client will not use the value. + // But we found in rare MySQL client, like Navicat for MySQL(version before 12) will truncate + // the `show create table` result. To fix this case, we must use a large enough flen to prevent + // the truncation, in MySQL, it will multiply bytes length by a multiple based on character set. + // For examples: + // * latin, the multiple is 1 + // * gb2312, the multiple is 2 + // * Utf-8, the multiple is 3 + // * utf8mb4, the multiple is 4 + // We used to check non-string types to avoid the truncation problem in some MySQL + // client such as Navicat. Now we only allow string type enter this branch. + charsetDesc, err := charset.GetCharsetDesc(fld.Column.Charset) + if err != nil { + ci.ColumnLength = ci.ColumnLength * 4 + } else { + ci.ColumnLength = ci.ColumnLength * uint32(charsetDesc.Maxlen) + } + } + + if fld.Column.Decimal == types.UnspecifiedLength { + if fld.Column.Tp == mysql.TypeDuration { + ci.Decimal = uint8(types.DefaultFsp) + } else { + ci.Decimal = mysql.NotFixedDec + } + } else { + ci.Decimal = uint8(fld.Column.Decimal) + } + + // Keep things compatible for old clients. + // Refer to mysql-server/sql/protocol.cc send_result_set_metadata() + if ci.Type == mysql.TypeVarchar { + ci.Type = mysql.TypeVarString + } + return +} diff --git a/server/driver_tidb_test.go b/server/driver_tidb_test.go new file mode 100644 index 0000000..e440c2d --- /dev/null +++ b/server/driver_tidb_test.go @@ -0,0 +1,113 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/charset" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" +) + +type tidbResultSetTestSuite struct{} + +var _ = Suite(tidbResultSetTestSuite{}) + +func createColumnByTypeAndLen(tp byte, len uint32) *ColumnInfo { + return &ColumnInfo{ + Schema: "test", + Table: "dual", + OrgTable: "", + Name: "a", + OrgName: "a", + ColumnLength: len, + Charset: uint16(mysql.CharsetNameToID(charset.CharsetUTF8)), + Flag: uint16(mysql.UnsignedFlag), + Decimal: uint8(0), + Type: tp, + DefaultValueLength: uint64(0), + DefaultValue: nil, + } +} +func (ts tidbResultSetTestSuite) TestConvertColumnInfo(c *C) { + // Test "mysql.TypeBit", for: https://github.com/pingcap/tidb/issues/5405. + resultField := ast.ResultField{ + Column: &model.ColumnInfo{ + Name: model.NewCIStr("a"), + ID: 0, + Offset: 0, + FieldType: types.FieldType{ + Tp: mysql.TypeBit, + Flag: mysql.UnsignedFlag, + Flen: 1, + Decimal: 0, + Charset: charset.CharsetUTF8, + Collate: charset.CollationUTF8, + }, + Comment: "column a is the first column in table dual", + }, + ColumnAsName: model.NewCIStr("a"), + TableAsName: model.NewCIStr("dual"), + DBName: model.NewCIStr("test"), + } + colInfo := convertColumnInfo(&resultField) + c.Assert(colInfo, DeepEquals, createColumnByTypeAndLen(mysql.TypeBit, 1)) + + // Test "mysql.TypeTiny", for: https://github.com/pingcap/tidb/issues/5405. + resultField = ast.ResultField{ + Column: &model.ColumnInfo{ + Name: model.NewCIStr("a"), + ID: 0, + Offset: 0, + FieldType: types.FieldType{ + Tp: mysql.TypeTiny, + Flag: mysql.UnsignedFlag, + Flen: 1, + Decimal: 0, + Charset: charset.CharsetUTF8, + Collate: charset.CollationUTF8, + }, + Comment: "column a is the first column in table dual", + }, + ColumnAsName: model.NewCIStr("a"), + TableAsName: model.NewCIStr("dual"), + DBName: model.NewCIStr("test"), + } + colInfo = convertColumnInfo(&resultField) + c.Assert(colInfo, DeepEquals, createColumnByTypeAndLen(mysql.TypeTiny, 1)) + + resultField = ast.ResultField{ + Column: &model.ColumnInfo{ + Name: model.NewCIStr("a"), + ID: 0, + Offset: 0, + FieldType: types.FieldType{ + Tp: mysql.TypeYear, + Flag: mysql.ZerofillFlag, + Flen: 4, + Decimal: 0, + Charset: charset.CharsetBin, + Collate: charset.CollationBin, + }, + Comment: "column a is the first column in table dual", + }, + ColumnAsName: model.NewCIStr("a"), + TableAsName: model.NewCIStr("dual"), + DBName: model.NewCIStr("test"), + } + colInfo = convertColumnInfo(&resultField) + c.Assert(colInfo.ColumnLength, Equals, uint32(4)) +} diff --git a/server/http_status.go b/server/http_status.go new file mode 100644 index 0000000..f13ff03 --- /dev/null +++ b/server/http_status.go @@ -0,0 +1,107 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "bytes" + "fmt" + "go.uber.org/zap" + "net" + "net/http" + "net/http/pprof" + + "github.com/gorilla/mux" + "github.com/pingcap/tidb/util" + "github.com/pingcap/tidb/util/logutil" + "github.com/soheilhy/cmux" +) + +const defaultStatusPort = 10080 + +func (s *Server) startStatusHTTP() { + go s.startHTTPServer() +} + +func (s *Server) startHTTPServer() { + router := mux.NewRouter() + + addr := fmt.Sprintf("%s:%d", s.cfg.Status.StatusHost, s.cfg.Status.StatusPort) + if s.cfg.Status.StatusPort == 0 { + addr = fmt.Sprintf("%s:%d", s.cfg.Status.StatusHost, defaultStatusPort) + } + + serverMux := http.NewServeMux() + serverMux.Handle("/", router) + + serverMux.HandleFunc("/debug/pprof/", pprof.Index) + serverMux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) + serverMux.HandleFunc("/debug/pprof/profile", pprof.Profile) + serverMux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + serverMux.HandleFunc("/debug/pprof/trace", pprof.Trace) + + var ( + httpRouterPage bytes.Buffer + pathTemplate string + err error + ) + + err = router.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error { + pathTemplate, err = route.GetPathTemplate() + if err != nil { + logutil.BgLogger().Error("get HTTP router path failed", zap.Error(err)) + } + name := route.GetName() + // If the name attribute is not set, GetName returns "". + if name != "" { + httpRouterPage.WriteString("" + name + "") + } + return nil + }) + if err != nil { + logutil.BgLogger().Error("generate root failed", zap.Error(err)) + } + httpRouterPage.WriteString("Debug") + httpRouterPage.WriteString("") + router.HandleFunc("/", func(responseWriter http.ResponseWriter, request *http.Request) { + _, err = responseWriter.Write([]byte(httpRouterPage.String())) + if err != nil { + logutil.BgLogger().Error("write HTTP index page failed", zap.Error(err)) + } + }) + + s.setupStatusServer(addr, serverMux) +} + +func (s *Server) setupStatusServer(addr string, serverMux *http.ServeMux) { + l, err := net.Listen("tcp", addr) + if err != nil { + logutil.BgLogger().Info("listen failed", zap.Error(err)) + return + } + m := cmux.New(l) + // Match connections in order: + // First HTTP, and otherwise grpc. + httpL := m.Match(cmux.HTTP1Fast()) + + s.statusServer = &http.Server{Addr: addr, Handler: CorsHandler{handler: serverMux, cfg: s.cfg}} + + go util.WithRecovery(func() { + err := s.statusServer.Serve(httpL) + logutil.BgLogger().Error("http server error", zap.Error(err)) + }, nil) + err = m.Serve() + if err != nil { + logutil.BgLogger().Error("start status/rpc server error", zap.Error(err)) + } +} diff --git a/server/packetio.go b/server/packetio.go new file mode 100644 index 0000000..e785a50 --- /dev/null +++ b/server/packetio.go @@ -0,0 +1,175 @@ +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// The MIT License (MIT) +// +// Copyright (c) 2014 wandoulabs +// Copyright (c) 2014 siddontang +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. + +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "bufio" + "io" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" +) + +const defaultWriterSize = 16 * 1024 + +// packetIO is a helper to read and write data in packet format. +type packetIO struct { + bufReadConn *bufferedReadConn + bufWriter *bufio.Writer + sequence uint8 + readTimeout time.Duration +} + +func newPacketIO(bufReadConn *bufferedReadConn) *packetIO { + p := &packetIO{sequence: 0} + p.setBufferedReadConn(bufReadConn) + return p +} + +func (p *packetIO) setBufferedReadConn(bufReadConn *bufferedReadConn) { + p.bufReadConn = bufReadConn + p.bufWriter = bufio.NewWriterSize(bufReadConn, defaultWriterSize) +} + +func (p *packetIO) setReadTimeout(timeout time.Duration) { + p.readTimeout = timeout +} + +func (p *packetIO) readOnePacket() ([]byte, error) { + var header [4]byte + if p.readTimeout > 0 { + if err := p.bufReadConn.SetReadDeadline(time.Now().Add(p.readTimeout)); err != nil { + return nil, err + } + } + if _, err := io.ReadFull(p.bufReadConn, header[:]); err != nil { + return nil, errors.Trace(err) + } + + sequence := header[3] + if sequence != p.sequence { + return nil, errInvalidSequence.GenWithStack("invalid sequence %d != %d", sequence, p.sequence) + } + + p.sequence++ + + length := int(uint32(header[0]) | uint32(header[1])<<8 | uint32(header[2])<<16) + + data := make([]byte, length) + if p.readTimeout > 0 { + if err := p.bufReadConn.SetReadDeadline(time.Now().Add(p.readTimeout)); err != nil { + return nil, err + } + } + if _, err := io.ReadFull(p.bufReadConn, data); err != nil { + return nil, errors.Trace(err) + } + return data, nil +} + +func (p *packetIO) readPacket() ([]byte, error) { + data, err := p.readOnePacket() + if err != nil { + return nil, errors.Trace(err) + } + + if len(data) < mysql.MaxPayloadLen { + return data, nil + } + + // handle multi-packet + for { + buf, err := p.readOnePacket() + if err != nil { + return nil, errors.Trace(err) + } + + data = append(data, buf...) + + if len(buf) < mysql.MaxPayloadLen { + break + } + } + + return data, nil +} + +// writePacket writes data that already have header +func (p *packetIO) writePacket(data []byte) error { + length := len(data) - 4 + + for length >= mysql.MaxPayloadLen { + data[0] = 0xff + data[1] = 0xff + data[2] = 0xff + + data[3] = p.sequence + + if n, err := p.bufWriter.Write(data[:4+mysql.MaxPayloadLen]); err != nil { + return errors.Trace(mysql.ErrBadConn) + } else if n != (4 + mysql.MaxPayloadLen) { + return errors.Trace(mysql.ErrBadConn) + } else { + p.sequence++ + length -= mysql.MaxPayloadLen + data = data[mysql.MaxPayloadLen:] + } + } + + data[0] = byte(length) + data[1] = byte(length >> 8) + data[2] = byte(length >> 16) + data[3] = p.sequence + + if n, err := p.bufWriter.Write(data); err != nil { + terror.Log(errors.Trace(err)) + return errors.Trace(mysql.ErrBadConn) + } else if n != len(data) { + return errors.Trace(mysql.ErrBadConn) + } else { + p.sequence++ + return nil + } +} + +func (p *packetIO) flush() error { + err := p.bufWriter.Flush() + if err != nil { + return errors.Trace(err) + } + return err +} diff --git a/server/packetio_test.go b/server/packetio_test.go new file mode 100644 index 0000000..002e887 --- /dev/null +++ b/server/packetio_test.go @@ -0,0 +1,126 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "bufio" + "bytes" + "net" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/mysql" +) + +type PacketIOTestSuite struct { +} + +var _ = Suite(new(PacketIOTestSuite)) + +func (s *PacketIOTestSuite) TestWrite(c *C) { + // Test write one packet + var outBuffer bytes.Buffer + pkt := &packetIO{bufWriter: bufio.NewWriter(&outBuffer)} + err := pkt.writePacket([]byte{0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03}) + c.Assert(err, IsNil) + err = pkt.flush() + c.Assert(err, IsNil) + c.Assert(outBuffer.Bytes(), DeepEquals, []byte{0x03, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03}) + + // Test write more than one packet + outBuffer.Reset() + largeInput := make([]byte, mysql.MaxPayloadLen+4) + pkt = &packetIO{bufWriter: bufio.NewWriter(&outBuffer)} + err = pkt.writePacket(largeInput) + c.Assert(err, IsNil) + err = pkt.flush() + c.Assert(err, IsNil) + res := outBuffer.Bytes() + c.Assert(res[0], Equals, byte(0xff)) + c.Assert(res[1], Equals, byte(0xff)) + c.Assert(res[2], Equals, byte(0xff)) + c.Assert(res[3], Equals, byte(0)) +} + +func (s *PacketIOTestSuite) TestRead(c *C) { + var inBuffer bytes.Buffer + _, err := inBuffer.Write([]byte{0x01, 0x00, 0x00, 0x00, 0x01}) + c.Assert(err, IsNil) + // Test read one packet + brc := newBufferedReadConn(&bytesConn{inBuffer}) + pkt := newPacketIO(brc) + bytes, err := pkt.readPacket() + c.Assert(err, IsNil) + c.Assert(pkt.sequence, Equals, uint8(1)) + c.Assert(bytes, DeepEquals, []byte{0x01}) + + inBuffer.Reset() + buf := make([]byte, mysql.MaxPayloadLen+9) + buf[0] = 0xff + buf[1] = 0xff + buf[2] = 0xff + buf[3] = 0 + buf[2+mysql.MaxPayloadLen] = 0x00 + buf[3+mysql.MaxPayloadLen] = 0x00 + buf[4+mysql.MaxPayloadLen] = 0x01 + buf[7+mysql.MaxPayloadLen] = 0x01 + buf[8+mysql.MaxPayloadLen] = 0x0a + + _, err = inBuffer.Write(buf) + c.Assert(err, IsNil) + // Test read multiple packets + brc = newBufferedReadConn(&bytesConn{inBuffer}) + pkt = newPacketIO(brc) + bytes, err = pkt.readPacket() + c.Assert(err, IsNil) + c.Assert(pkt.sequence, Equals, uint8(2)) + c.Assert(len(bytes), Equals, mysql.MaxPayloadLen+1) + c.Assert(bytes[mysql.MaxPayloadLen], DeepEquals, byte(0x0a)) +} + +type bytesConn struct { + b bytes.Buffer +} + +func (c *bytesConn) Read(b []byte) (n int, err error) { + return c.b.Read(b) +} + +func (c *bytesConn) Write(b []byte) (n int, err error) { + return 0, nil +} + +func (c *bytesConn) Close() error { + return nil +} + +func (c *bytesConn) LocalAddr() net.Addr { + return nil +} + +func (c *bytesConn) RemoteAddr() net.Addr { + return nil +} + +func (c *bytesConn) SetDeadline(t time.Time) error { + return nil +} + +func (c *bytesConn) SetReadDeadline(t time.Time) error { + return nil +} + +func (c *bytesConn) SetWriteDeadline(t time.Time) error { + return nil +} diff --git a/server/server.go b/server/server.go new file mode 100644 index 0000000..92835bb --- /dev/null +++ b/server/server.go @@ -0,0 +1,336 @@ +// The MIT License (MIT) +// +// Copyright (c) 2014 wandoulabs +// Copyright (c) 2014 siddontang +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. + +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "context" + "crypto/tls" + "fmt" + "math/rand" + "net" + "net/http" + "sync" + "sync/atomic" + "time" + + // For pprof + _ "net/http/pprof" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/config" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + + "github.com/pingcap/tidb/util" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +var ( + baseConnID uint32 +) + +var ( + errInvalidSequence = terror.ClassServer.New(mysql.ErrInvalidSequence, mysql.MySQLErrName[mysql.ErrInvalidSequence]) + errInvalidType = terror.ClassServer.New(mysql.ErrInvalidType, mysql.MySQLErrName[mysql.ErrInvalidType]) + errAccessDenied = terror.ClassServer.New(mysql.ErrAccessDenied, mysql.MySQLErrName[mysql.ErrAccessDenied]) +) + +// DefaultCapability is the capability of the server when it is created using the default configuration. +// When server is configured with SSL, the server will have extra capabilities compared to DefaultCapability. +const defaultCapability = mysql.ClientLongPassword | mysql.ClientLongFlag | + mysql.ClientConnectWithDB | mysql.ClientProtocol41 | + mysql.ClientTransactions | mysql.ClientSecureConnection | mysql.ClientFoundRows | + mysql.ClientMultiStatements | mysql.ClientMultiResults | mysql.ClientLocalFiles | + mysql.ClientConnectAtts | mysql.ClientPluginAuth | mysql.ClientInteractive + +// Server is the MySQL protocol server +type Server struct { + cfg *config.Config + tlsConfig *tls.Config + driver IDriver + listener net.Listener + socket net.Listener + rwlock sync.RWMutex + clients map[uint32]*clientConn + capability uint32 + dom *domain.Domain + + // stopListenerCh is used when a critical error occurred, we don't want to exit the process, because there may be + // a supervisor automatically restart it, then new client connection will be created, but we can't server it. + // So we just stop the listener and store to force clients to chose other TiDB servers. + stopListenerCh chan struct{} + statusServer *http.Server +} + +// ConnectionCount gets current connection count. +func (s *Server) ConnectionCount() int { + s.rwlock.RLock() + cnt := len(s.clients) + s.rwlock.RUnlock() + return cnt +} + +// SetDomain use to set the server domain. +func (s *Server) SetDomain(dom *domain.Domain) { + s.dom = dom +} + +// newConn creates a new *clientConn from a net.Conn. +// It allocates a connection ID and random salt data for authentication. +func (s *Server) newConn(conn net.Conn) *clientConn { + cc := newClientConn(s) + if tcpConn, ok := conn.(*net.TCPConn); ok { + if err := tcpConn.SetKeepAlive(true); err != nil { + logutil.BgLogger().Error("failed to set tcp keep alive option", zap.Error(err)) + } + } + cc.setConn(conn) + cc.salt = util.RandomBuf(20) + return cc +} + +// NewServer creates a new Server. +func NewServer(cfg *config.Config, driver IDriver) (*Server, error) { + s := &Server{ + cfg: cfg, + driver: driver, + clients: make(map[uint32]*clientConn), + stopListenerCh: make(chan struct{}, 1), + } + + s.capability = defaultCapability + if s.tlsConfig != nil { + s.capability |= mysql.ClientSSL + } + + var err error + + if s.cfg.Host != "" && s.cfg.Port != 0 { + addr := fmt.Sprintf("%s:%d", s.cfg.Host, s.cfg.Port) + if s.listener, err = net.Listen("tcp", addr); err == nil { + logutil.BgLogger().Info("server is running MySQL protocol", zap.String("addr", addr)) + } + } else { + err = errors.New("Server not configured to listen on either -socket or -host and -port") + } + + if err != nil { + return nil, errors.Trace(err) + } + + // Init rand seed for randomBuf() + rand.Seed(time.Now().UTC().UnixNano()) + return s, nil +} + +// Run runs the server. +func (s *Server) Run() error { + + // Start HTTP API to report tidb info such as TPS. + if s.cfg.Status.ReportStatus { + s.startStatusHTTP() + } + for { + conn, err := s.listener.Accept() + if err != nil { + if opErr, ok := err.(*net.OpError); ok { + if opErr.Err.Error() == "use of closed network connection" { + return nil + } + } + + logutil.BgLogger().Error("accept failed", zap.Error(err)) + return errors.Trace(err) + } + if s.shouldStopListener() { + err = conn.Close() + terror.Log(errors.Trace(err)) + break + } + + clientConn := s.newConn(conn) + + go s.onConn(clientConn) + } + err := s.listener.Close() + terror.Log(errors.Trace(err)) + s.listener = nil + for { + logutil.BgLogger().Error("listener stopped, waiting for manual kill.") + time.Sleep(time.Minute) + } +} + +func (s *Server) shouldStopListener() bool { + select { + case <-s.stopListenerCh: + return true + default: + return false + } +} + +// Close closes the server. +func (s *Server) Close() { + s.rwlock.Lock() + defer s.rwlock.Unlock() + + if s.listener != nil { + err := s.listener.Close() + terror.Log(errors.Trace(err)) + s.listener = nil + } + if s.socket != nil { + err := s.socket.Close() + terror.Log(errors.Trace(err)) + s.socket = nil + } + if s.statusServer != nil { + err := s.statusServer.Close() + terror.Log(errors.Trace(err)) + s.statusServer = nil + } + +} + +// onConn runs in its own goroutine, handles queries from this connection. +func (s *Server) onConn(conn *clientConn) { + ctx := logutil.WithConnID(context.Background(), conn.connectionID) + if err := conn.handshake(ctx); err != nil { + err = conn.Close() + terror.Log(errors.Trace(err)) + return + } + + logutil.Logger(ctx).Info("new connection", zap.String("remoteAddr", conn.bufReadConn.RemoteAddr().String())) + + defer func() { + logutil.Logger(ctx).Info("connection closed") + }() + s.rwlock.Lock() + s.clients[conn.connectionID] = conn + s.rwlock.Unlock() + conn.Run(ctx) +} + +func killConn(conn *clientConn) { + sessVars := conn.ctx.GetSessionVars() + atomic.CompareAndSwapUint32(&sessVars.Killed, 0, 1) +} + +// KillAllConnections kills all connections when server is not gracefully shutdown. +func (s *Server) KillAllConnections() { + logutil.BgLogger().Info("[server] kill all connections.") + + s.rwlock.RLock() + defer s.rwlock.RUnlock() + for _, conn := range s.clients { + atomic.StoreInt32(&conn.status, connStatusShutdown) + if err := conn.closeWithoutLock(); err != nil { + terror.Log(err) + } + killConn(conn) + } +} + +var gracefulCloseConnectionsTimeout = 15 * time.Second + +// TryGracefulDown will try to gracefully close all connection first with timeout. if timeout, will close all connection directly. +func (s *Server) TryGracefulDown() { + ctx, cancel := context.WithTimeout(context.Background(), gracefulCloseConnectionsTimeout) + defer cancel() + done := make(chan struct{}) + go func() { + s.GracefulDown(ctx, done) + }() + select { + case <-ctx.Done(): + s.KillAllConnections() + case <-done: + return + } +} + +// GracefulDown waits all clients to close. +func (s *Server) GracefulDown(ctx context.Context, done chan struct{}) { + logutil.Logger(ctx).Info("[server] graceful shutdown.") + + count := s.ConnectionCount() + for i := 0; count > 0; i++ { + s.kickIdleConnection() + + count = s.ConnectionCount() + if count == 0 { + break + } + // Print information for every 30s. + if i%30 == 0 { + logutil.Logger(ctx).Info("graceful shutdown...", zap.Int("conn count", count)) + } + ticker := time.After(time.Second) + select { + case <-ctx.Done(): + return + case <-ticker: + } + } + close(done) +} + +func (s *Server) kickIdleConnection() { + var conns []*clientConn + s.rwlock.RLock() + for _, cc := range s.clients { + if cc.ShutdownOrNotify() { + // Shutdowned conn will be closed by us, and notified conn will exist themselves. + conns = append(conns, cc) + } + } + s.rwlock.RUnlock() + + for _, cc := range conns { + err := cc.Close() + if err != nil { + logutil.BgLogger().Error("close connection", zap.Error(err)) + } + } +} + +func init() { + serverMySQLErrCodes := map[terror.ErrCode]uint16{ + mysql.ErrNotAllowedCommand: mysql.ErrNotAllowedCommand, + mysql.ErrAccessDenied: mysql.ErrAccessDenied, + mysql.ErrUnknownFieldType: mysql.ErrUnknownFieldType, + mysql.ErrInvalidSequence: mysql.ErrInvalidSequence, + mysql.ErrInvalidType: mysql.ErrInvalidType, + } + terror.ErrClassToMySQLCodes[terror.ClassServer] = serverMySQLErrCodes +} diff --git a/server/server_test.go b/server/server_test.go new file mode 100644 index 0000000..93b6465 --- /dev/null +++ b/server/server_test.go @@ -0,0 +1,181 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "database/sql" + "fmt" + "io/ioutil" + "net/http" + "os" + "testing" + "time" + + "github.com/go-sql-driver/mysql" + . "github.com/pingcap/check" + "github.com/pingcap/log" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + logLevel := os.Getenv("log_level") + logutil.InitZapLogger(logutil.NewLogConfig(logLevel, logutil.DefaultLogFormat, logutil.EmptyFileLogConfig, false)) + TestingT(t) +} + +var defaultDSNConfig = mysql.Config{ + User: "root", + Net: "tcp", + Addr: "127.0.0.1:4001", + DBName: "test", + Strict: true, +} + +type configOverrider func(*mysql.Config) + +// getDSN generates a DSN string for MySQL connection. +func getDSN(overriders ...configOverrider) string { + var config = defaultDSNConfig + for _, overrider := range overriders { + if overrider != nil { + overrider(&config) + } + } + return config.FormatDSN() +} + +// runTests runs tests using the default database `test`. +func runTests(c *C, overrider configOverrider, tests ...func(dbt *DBTest)) { + db, err := sql.Open("mysql", getDSN(overrider)) + c.Assert(err, IsNil, Commentf("Error connecting")) + defer db.Close() + + db.Exec("DROP TABLE IF EXISTS test") + + dbt := &DBTest{c, db} + for _, test := range tests { + test(dbt) + dbt.db.Exec("DROP TABLE IF EXISTS test") + } +} + +// runTestsOnNewDB runs tests using a specified database which will be created before the test and destroyed after the test. +func runTestsOnNewDB(c *C, overrider configOverrider, dbName string, tests ...func(dbt *DBTest)) { + dsn := getDSN(overrider, func(config *mysql.Config) { + config.DBName = "" + }) + db, err := sql.Open("mysql", dsn) + c.Assert(err, IsNil, Commentf("Error connecting")) + defer db.Close() + + _, err = db.Exec(fmt.Sprintf("DROP DATABASE IF EXISTS `%s`;", dbName)) + c.Assert(err, IsNil, Commentf("Error drop database %s: %s", dbName, err)) + + _, err = db.Exec(fmt.Sprintf("CREATE DATABASE `%s`;", dbName)) + c.Assert(err, IsNil, Commentf("Error create database %s: %s", dbName, err)) + + defer func() { + _, err = db.Exec(fmt.Sprintf("DROP DATABASE IF EXISTS `%s`;", dbName)) + c.Assert(err, IsNil, Commentf("Error drop database %s: %s", dbName, err)) + }() + + _, err = db.Exec(fmt.Sprintf("USE `%s`;", dbName)) + c.Assert(err, IsNil, Commentf("Error use database %s: %s", dbName, err)) + + dbt := &DBTest{c, db} + for _, test := range tests { + test(dbt) + dbt.db.Exec("DROP TABLE IF EXISTS test") + } +} + +type DBTest struct { + *C + db *sql.DB +} + +func (dbt *DBTest) mustExec(query string) (res sql.Result) { + res, err := dbt.db.Exec(query) + dbt.Assert(err, IsNil, Commentf("Exec %s", query)) + return res +} + +func runTestIssue3662(c *C) { + db, err := sql.Open("mysql", getDSN(func(config *mysql.Config) { + config.DBName = "non_existing_schema" + })) + c.Assert(err, IsNil) + defer db.Close() + + // According to documentation, "Open may just validate its arguments without + // creating a connection to the database. To verify that the data source name + // is valid, call Ping." + err = db.Ping() + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "Error 1049: Unknown database 'non_existing_schema'") +} + +func runTestDBNameEscape(c *C) { + runTests(c, nil, func(dbt *DBTest) { + dbt.mustExec("CREATE DATABASE `aa-a`;") + }) + runTests(c, func(config *mysql.Config) { + config.DBName = "aa-a" + }, func(dbt *DBTest) { + dbt.mustExec(`USE mysql;`) + dbt.mustExec("DROP DATABASE `aa-a`") + }) +} + +func runTestResultFieldTableIsNull(c *C) { + runTestsOnNewDB(c, nil, "ResultFieldTableIsNull", func(dbt *DBTest) { + dbt.mustExec("drop table if exists test;") + dbt.mustExec("create table test (c int);") + dbt.mustExec("explain select * from test;") + }) +} + +const retryTime = 100 + +func waitUntilServerOnline(statusPort uint) { + // connect server + retry := 0 + for ; retry < retryTime; retry++ { + time.Sleep(time.Millisecond * 10) + db, err := sql.Open("mysql", getDSN()) + if err == nil { + db.Close() + break + } + } + if retry == retryTime { + log.Fatal("failed to connect DB in every 10 ms", zap.Int("retryTime", retryTime)) + } + // connect http status + statusURL := fmt.Sprintf("http://127.0.0.1:%d/status", statusPort) + for retry = 0; retry < retryTime; retry++ { + resp, err := http.Get(statusURL) + if err == nil { + ioutil.ReadAll(resp.Body) + resp.Body.Close() + break + } + time.Sleep(time.Millisecond * 10) + } + if retry == retryTime { + log.Fatal("failed to connect HTTP status in every 10 ms", zap.Int("retryTime", retryTime)) + } +} diff --git a/server/tidb_test.go b/server/tidb_test.go new file mode 100644 index 0000000..ba06214 --- /dev/null +++ b/server/tidb_test.go @@ -0,0 +1,118 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. +// +build !race + +package server + +import ( + "context" + . "github.com/pingcap/check" + "github.com/pingcap/tidb/config" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" + tmysql "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/store/mockstore" +) + +type TidbTestSuite struct { + tidbdrv *TiDBDriver + server *Server + domain *domain.Domain + store kv.Storage +} + +var suite = new(TidbTestSuite) +var _ = Suite(suite) + +func (ts *TidbTestSuite) SetUpSuite(c *C) { + var err error + ts.store, err = mockstore.NewMockTikvStore() + session.DisableStats4Test() + c.Assert(err, IsNil) + ts.domain, err = session.BootstrapSession(ts.store) + c.Assert(err, IsNil) + ts.tidbdrv = NewTiDBDriver(ts.store) + cfg := config.NewConfig() + cfg.Port = 4001 + cfg.Status.ReportStatus = true + cfg.Status.StatusPort = 10090 + + server, err := NewServer(cfg, ts.tidbdrv) + c.Assert(err, IsNil) + ts.server = server + go ts.server.Run() + waitUntilServerOnline(cfg.Status.StatusPort) +} + +func (ts *TidbTestSuite) TearDownSuite(c *C) { + if ts.store != nil { + ts.store.Close() + } + if ts.domain != nil { + ts.domain.Close() + } + if ts.server != nil { + ts.server.Close() + } +} + +func (ts *TidbTestSuite) TestIssues(c *C) { + c.Parallel() + runTestIssue3662(c) +} + +func (ts *TidbTestSuite) TestDBNameEscape(c *C) { + c.Parallel() + runTestDBNameEscape(c) +} + +func (ts *TidbTestSuite) TestResultFieldTableIsNull(c *C) { + c.Parallel() + runTestResultFieldTableIsNull(c) +} + +func (ts *TidbTestSuite) TestShowTablesFlen(c *C) { + qctx, err := ts.tidbdrv.OpenCtx(uint64(0), 0, uint8(tmysql.DefaultCollationID), "test", nil) + c.Assert(err, IsNil) + _, err = qctx.Execute(context.Background(), "use test;") + c.Assert(err, IsNil) + + ctx := context.Background() + testSQL := "create table abcdefghijklmnopqrstuvwxyz (i int)" + _, err = qctx.Execute(ctx, testSQL) + c.Assert(err, IsNil) + rs, err := qctx.Execute(ctx, "show tables") + c.Assert(err, IsNil) + req := rs[0].NewChunk() + err = rs[0].Next(ctx, req) + c.Assert(err, IsNil) + cols := rs[0].Columns() + c.Assert(err, IsNil) + c.Assert(len(cols), Equals, 1) + c.Assert(int(cols[0].ColumnLength), Equals, 26*tmysql.MaxBytesOfCharacter) +} + +func (ts *TidbTestSuite) TestNullFlag(c *C) { + // issue #9689 + qctx, err := ts.tidbdrv.OpenCtx(uint64(0), 0, uint8(tmysql.DefaultCollationID), "test", nil) + c.Assert(err, IsNil) + + ctx := context.Background() + rs, err := qctx.Execute(ctx, "select 1") + c.Assert(err, IsNil) + cols := rs[0].Columns() + c.Assert(len(cols), Equals, 1) + expectFlag := uint16(tmysql.NotNullFlag | tmysql.BinaryFlag) + c.Assert(dumpFlag(cols[0].Type, cols[0].Flag), Equals, expectFlag) +} diff --git a/server/util.go b/server/util.go new file mode 100644 index 0000000..730bc47 --- /dev/null +++ b/server/util.go @@ -0,0 +1,309 @@ +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// The MIT License (MIT) +// +// Copyright (c) 2014 wandoulabs +// Copyright (c) 2014 siddontang +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. + +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "bytes" + "github.com/pingcap/tidb/config" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "io" + "math" + "net/http" + "strconv" +) + +func parseNullTermString(b []byte) (str []byte, remain []byte) { + off := bytes.IndexByte(b, 0) + if off == -1 { + return nil, b + } + return b[:off], b[off+1:] +} + +func parseLengthEncodedInt(b []byte) (num uint64, isNull bool, n int) { + switch b[0] { + // 251: NULL + case 0xfb: + n = 1 + isNull = true + return + + // 252: value of following 2 + case 0xfc: + num = uint64(b[1]) | uint64(b[2])<<8 + n = 3 + return + + // 253: value of following 3 + case 0xfd: + num = uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 + n = 4 + return + + // 254: value of following 8 + case 0xfe: + num = uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 | + uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 | + uint64(b[7])<<48 | uint64(b[8])<<56 + n = 9 + return + } + + // https://dev.mysql.com/doc/internals/en/integer.html#length-encoded-integer: If the first byte of a packet is a length-encoded integer and its byte value is 0xfe, you must check the length of the packet to verify that it has enough space for a 8-byte integer. + // TODO: 0xff is undefined + + // 0-250: value of first byte + num = uint64(b[0]) + n = 1 + return +} + +func dumpLengthEncodedInt(buffer []byte, n uint64) []byte { + switch { + case n <= 250: + return append(buffer, tinyIntCache[n]...) + + case n <= 0xffff: + return append(buffer, 0xfc, byte(n), byte(n>>8)) + + case n <= 0xffffff: + return append(buffer, 0xfd, byte(n), byte(n>>8), byte(n>>16)) + + case n <= 0xffffffffffffffff: + return append(buffer, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24), + byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56)) + } + + return buffer +} + +func parseLengthEncodedBytes(b []byte) ([]byte, bool, int, error) { + // Get length + num, isNull, n := parseLengthEncodedInt(b) + if num < 1 { + return nil, isNull, n, nil + } + + n += int(num) + + // Check data length + if len(b) >= n { + return b[n-int(num) : n], false, n, nil + } + + return nil, false, n, io.EOF +} + +func dumpLengthEncodedString(buffer []byte, bytes []byte) []byte { + buffer = dumpLengthEncodedInt(buffer, uint64(len(bytes))) + buffer = append(buffer, bytes...) + return buffer +} + +func dumpUint16(buffer []byte, n uint16) []byte { + buffer = append(buffer, byte(n)) + buffer = append(buffer, byte(n>>8)) + return buffer +} + +func dumpUint32(buffer []byte, n uint32) []byte { + buffer = append(buffer, byte(n)) + buffer = append(buffer, byte(n>>8)) + buffer = append(buffer, byte(n>>16)) + buffer = append(buffer, byte(n>>24)) + return buffer +} + +func dumpUint64(buffer []byte, n uint64) []byte { + buffer = append(buffer, byte(n)) + buffer = append(buffer, byte(n>>8)) + buffer = append(buffer, byte(n>>16)) + buffer = append(buffer, byte(n>>24)) + buffer = append(buffer, byte(n>>32)) + buffer = append(buffer, byte(n>>40)) + buffer = append(buffer, byte(n>>48)) + buffer = append(buffer, byte(n>>56)) + return buffer +} + +var tinyIntCache [251][]byte + +func init() { + for i := 0; i < len(tinyIntCache); i++ { + tinyIntCache[i] = []byte{byte(i)} + } +} + +func dumpBinaryRow(buffer []byte, columns []*ColumnInfo, row chunk.Row) ([]byte, error) { + buffer = append(buffer, mysql.OKHeader) + nullBitmapOff := len(buffer) + numBytes4Null := (len(columns) + 7 + 2) / 8 + for i := 0; i < numBytes4Null; i++ { + buffer = append(buffer, 0) + } + for i := range columns { + if row.IsNull(i) { + bytePos := (i + 2) / 8 + bitPos := byte((i + 2) % 8) + buffer[nullBitmapOff+bytePos] |= 1 << bitPos + continue + } + switch columns[i].Type { + case mysql.TypeTiny: + buffer = append(buffer, byte(row.GetInt64(i))) + case mysql.TypeShort, mysql.TypeYear: + buffer = dumpUint16(buffer, uint16(row.GetInt64(i))) + case mysql.TypeInt24, mysql.TypeLong: + buffer = dumpUint32(buffer, uint32(row.GetInt64(i))) + case mysql.TypeLonglong: + buffer = dumpUint64(buffer, row.GetUint64(i)) + case mysql.TypeFloat: + buffer = dumpUint32(buffer, math.Float32bits(row.GetFloat32(i))) + case mysql.TypeDouble: + buffer = dumpUint64(buffer, math.Float64bits(row.GetFloat64(i))) + case mysql.TypeString, mysql.TypeVarString, mysql.TypeVarchar, mysql.TypeBit, + mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob, mysql.TypeBlob: + buffer = dumpLengthEncodedString(buffer, row.GetBytes(i)) + default: + return nil, errInvalidType.GenWithStack("invalid type %v", columns[i].Type) + } + } + return buffer, nil +} + +func dumpTextRow(buffer []byte, columns []*ColumnInfo, row chunk.Row) ([]byte, error) { + tmp := make([]byte, 0, 20) + for i, col := range columns { + if row.IsNull(i) { + buffer = append(buffer, 0xfb) + continue + } + switch col.Type { + case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong: + tmp = strconv.AppendInt(tmp[:0], row.GetInt64(i), 10) + buffer = dumpLengthEncodedString(buffer, tmp) + case mysql.TypeYear: + year := row.GetInt64(i) + tmp = tmp[:0] + if year == 0 { + tmp = append(tmp, '0', '0', '0', '0') + } else { + tmp = strconv.AppendInt(tmp, year, 10) + } + buffer = dumpLengthEncodedString(buffer, tmp) + case mysql.TypeLonglong: + if mysql.HasUnsignedFlag(uint(columns[i].Flag)) { + tmp = strconv.AppendUint(tmp[:0], row.GetUint64(i), 10) + } else { + tmp = strconv.AppendInt(tmp[:0], row.GetInt64(i), 10) + } + buffer = dumpLengthEncodedString(buffer, tmp) + case mysql.TypeFloat: + prec := -1 + if columns[i].Decimal > 0 && int(col.Decimal) != mysql.NotFixedDec { + prec = int(col.Decimal) + } + tmp = appendFormatFloat(tmp[:0], float64(row.GetFloat32(i)), prec, 32) + buffer = dumpLengthEncodedString(buffer, tmp) + case mysql.TypeDouble: + prec := types.UnspecifiedLength + if col.Decimal > 0 && int(col.Decimal) != mysql.NotFixedDec { + prec = int(col.Decimal) + } + tmp = appendFormatFloat(tmp[:0], row.GetFloat64(i), prec, 64) + buffer = dumpLengthEncodedString(buffer, tmp) + case mysql.TypeString, mysql.TypeVarString, mysql.TypeVarchar, mysql.TypeBit, + mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob, mysql.TypeBlob: + buffer = dumpLengthEncodedString(buffer, row.GetBytes(i)) + default: + return nil, errInvalidType.GenWithStack("invalid type %v", columns[i].Type) + } + } + return buffer, nil +} + +func lengthEncodedIntSize(n uint64) int { + switch { + case n <= 250: + return 1 + + case n <= 0xffff: + return 3 + + case n <= 0xffffff: + return 4 + } + + return 9 +} + +const ( + expFormatBig = 1e15 + expFormatSmall = 1e-15 +) + +func appendFormatFloat(in []byte, fVal float64, prec, bitSize int) []byte { + absVal := math.Abs(fVal) + var out []byte + if prec == types.UnspecifiedLength && (absVal >= expFormatBig || (absVal != 0 && absVal < expFormatSmall)) { + out = strconv.AppendFloat(in, fVal, 'e', prec, bitSize) + valStr := out[len(in):] + // remove the '+' from the string for compatibility. + plusPos := bytes.IndexByte(valStr, '+') + if plusPos > 0 { + plusPosInOut := len(in) + plusPos + out = append(out[:plusPosInOut], out[plusPosInOut+1:]...) + } + } else { + out = strconv.AppendFloat(in, fVal, 'f', prec, bitSize) + } + return out +} + +// CorsHandler adds Cors Header if `cors` config is set. +type CorsHandler struct { + handler http.Handler + cfg *config.Config +} + +func (h CorsHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if h.cfg.Cors != "" { + w.Header().Set("Access-Control-Allow-Origin", h.cfg.Cors) + w.Header().Set("Access-Control-Allow-Methods", "GET") + } + h.handler.ServeHTTP(w, req) +} diff --git a/server/util_test.go b/server/util_test.go new file mode 100644 index 0000000..4957ed4 --- /dev/null +++ b/server/util_test.go @@ -0,0 +1,387 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/testleak" +) + +var _ = Suite(&testUtilSuite{}) + +func newStoreWithBootstrap() (kv.Storage, *domain.Domain, error) { + store, err := mockstore.NewMockTikvStore() + if err != nil { + return nil, nil, errors.Trace(err) + } + session.SetSchemaLease(0) + dom, err := session.BootstrapSession(store) + return store, dom, errors.Trace(err) +} + +type testUtilSuite struct { + store kv.Storage + dom *domain.Domain +} + +func (s *testUtilSuite) SetUpSuite(c *C) { + testleak.BeforeTest() + + var err error + s.store, s.dom, err = newStoreWithBootstrap() + c.Assert(err, IsNil) +} + +func (s *testUtilSuite) TearDownSuite(c *C) { + s.dom.Close() + s.store.Close() + + testleak.AfterTest(c)() +} + +func (s *testUtilSuite) TestDumpTextValue(c *C) { + columns := []*ColumnInfo{{ + Type: mysql.TypeLonglong, + Decimal: mysql.NotFixedDec, + }} + + null := types.NewIntDatum(0) + null.SetNull() + bs, err := dumpTextRow(nil, columns, chunk.MutRowFromDatums([]types.Datum{null}).ToRow()) + c.Assert(err, IsNil) + _, isNull, _, err := parseLengthEncodedBytes(bs) + c.Assert(err, IsNil) + c.Assert(isNull, IsTrue) + + bs, err = dumpTextRow(nil, columns, chunk.MutRowFromDatums([]types.Datum{types.NewIntDatum(10)}).ToRow()) + c.Assert(err, IsNil) + c.Assert(mustDecodeStr(c, bs), Equals, "10") + + bs, err = dumpTextRow(nil, columns, chunk.MutRowFromDatums([]types.Datum{types.NewUintDatum(11)}).ToRow()) + c.Assert(err, IsNil) + c.Assert(mustDecodeStr(c, bs), Equals, "11") + + columns[0].Flag = columns[0].Flag | uint16(mysql.UnsignedFlag) + bs, err = dumpTextRow(nil, columns, chunk.MutRowFromDatums([]types.Datum{types.NewUintDatum(11)}).ToRow()) + c.Assert(err, IsNil) + c.Assert(mustDecodeStr(c, bs), Equals, "11") + + columns[0].Type = mysql.TypeFloat + columns[0].Decimal = 1 + f32 := types.NewFloat32Datum(1.2) + bs, err = dumpTextRow(nil, columns, chunk.MutRowFromDatums([]types.Datum{f32}).ToRow()) + c.Assert(err, IsNil) + c.Assert(mustDecodeStr(c, bs), Equals, "1.2") + + columns[0].Decimal = 2 + bs, err = dumpTextRow(nil, columns, chunk.MutRowFromDatums([]types.Datum{f32}).ToRow()) + c.Assert(err, IsNil) + c.Assert(mustDecodeStr(c, bs), Equals, "1.20") + + f64 := types.NewFloat64Datum(2.2) + columns[0].Type = mysql.TypeDouble + columns[0].Decimal = 1 + bs, err = dumpTextRow(nil, columns, chunk.MutRowFromDatums([]types.Datum{f64}).ToRow()) + c.Assert(err, IsNil) + c.Assert(mustDecodeStr(c, bs), Equals, "2.2") + + columns[0].Decimal = 2 + bs, err = dumpTextRow(nil, columns, chunk.MutRowFromDatums([]types.Datum{f64}).ToRow()) + c.Assert(err, IsNil) + c.Assert(mustDecodeStr(c, bs), Equals, "2.20") + + columns[0].Type = mysql.TypeBlob + bs, err = dumpTextRow(nil, columns, chunk.MutRowFromDatums([]types.Datum{types.NewBytesDatum([]byte("foo"))}).ToRow()) + c.Assert(err, IsNil) + c.Assert(mustDecodeStr(c, bs), Equals, "foo") + + columns[0].Type = mysql.TypeVarchar + bs, err = dumpTextRow(nil, columns, chunk.MutRowFromDatums([]types.Datum{types.NewStringDatum("bar")}).ToRow()) + c.Assert(err, IsNil) + c.Assert(mustDecodeStr(c, bs), Equals, "bar") + + year := types.NewIntDatum(0) + columns[0].Type = mysql.TypeYear + bs, err = dumpTextRow(nil, columns, chunk.MutRowFromDatums([]types.Datum{year}).ToRow()) + c.Assert(err, IsNil) + c.Assert(mustDecodeStr(c, bs), Equals, "0000") + + year.SetInt64(1984) + columns[0].Type = mysql.TypeYear + bs, err = dumpTextRow(nil, columns, chunk.MutRowFromDatums([]types.Datum{year}).ToRow()) + c.Assert(err, IsNil) + c.Assert(mustDecodeStr(c, bs), Equals, "1984") +} + +func mustDecodeStr(c *C, b []byte) string { + str, _, _, err := parseLengthEncodedBytes(b) + c.Assert(err, IsNil) + return string(str) +} + +func (s *testUtilSuite) TestAppendFormatFloat(c *C) { + tests := []struct { + fVal float64 + out string + prec int + bitSize int + }{ + { + 99999999999999999999, + "1e20", + -1, + 64, + }, + { + 1e15, + "1e15", + -1, + 64, + }, + { + 9e14, + "900000000000000", + -1, + 64, + }, + { + -9999999999999999, + "-1e16", + -1, + 64, + }, + { + 999999999999999, + "999999999999999", + -1, + 64, + }, + { + 0.000000000000001, + "0.000000000000001", + -1, + 64, + }, + { + 0.0000000000000009, + "9e-16", + -1, + 64, + }, + { + -0.0000000000000009, + "-9e-16", + -1, + 64, + }, + { + 0.11111, + "0.111", + 3, + 64, + }, + { + 0.11111, + "0.111", + 3, + 64, + }, + { + 0.1111111111111111111, + "0.11111111", + -1, + 32, + }, + { + 0.1111111111111111111, + "0.1111111111111111", + -1, + 64, + }, + { + 0.0000000000000009, + "0.000", + 3, + 64, + }, + { + 0, + "0", + -1, + 64, + }, + } + for _, t := range tests { + c.Assert(string(appendFormatFloat(nil, t.fVal, t.prec, t.bitSize)), Equals, t.out) + } +} + +func (s *testUtilSuite) TestDumpLengthEncodedInt(c *C) { + testCases := []struct { + num uint64 + buffer []byte + }{ + { + uint64(0), + []byte{0x00}, + }, + { + uint64(513), + []byte{'\xfc', '\x01', '\x02'}, + }, + { + uint64(197121), + []byte{'\xfd', '\x01', '\x02', '\x03'}, + }, + { + uint64(578437695752307201), + []byte{'\xfe', '\x01', '\x02', '\x03', '\x04', '\x05', '\x06', '\x07', '\x08'}, + }, + } + for _, tc := range testCases { + b := dumpLengthEncodedInt(nil, tc.num) + c.Assert(b, DeepEquals, tc.buffer) + } +} + +func (s *testUtilSuite) TestParseLengthEncodedInt(c *C) { + testCases := []struct { + buffer []byte + num uint64 + isNull bool + n int + }{ + { + []byte{'\xfb'}, + uint64(0), + true, + 1, + }, + { + []byte{'\x00'}, + uint64(0), + false, + 1, + }, + { + []byte{'\xfc', '\x01', '\x02'}, + uint64(513), + false, + 3, + }, + { + []byte{'\xfd', '\x01', '\x02', '\x03'}, + uint64(197121), + false, + 4, + }, + { + []byte{'\xfe', '\x01', '\x02', '\x03', '\x04', '\x05', '\x06', '\x07', '\x08'}, + uint64(578437695752307201), + false, + 9, + }, + } + + for _, tc := range testCases { + num, isNull, n := parseLengthEncodedInt(tc.buffer) + c.Assert(num, Equals, tc.num) + c.Assert(isNull, Equals, tc.isNull) + c.Assert(n, Equals, tc.n) + + c.Assert(lengthEncodedIntSize(tc.num), Equals, tc.n) + } +} + +func (s *testUtilSuite) TestDumpUint(c *C) { + testCases := []uint64{ + 0, + 1, + 1<<64 - 1, + } + parseUint64 := func(b []byte) uint64 { + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | + uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | + uint64(b[6])<<48 | uint64(b[7])<<56 + } + for _, tc := range testCases { + b := dumpUint64(nil, tc) + c.Assert(len(b), Equals, 8) + c.Assert(parseUint64(b), Equals, tc) + } +} + +func (s *testUtilSuite) TestParseLengthEncodedBytes(c *C) { + buffer := []byte{'\xfb'} + b, isNull, n, err := parseLengthEncodedBytes(buffer) + c.Assert(b, IsNil) + c.Assert(isNull, IsTrue) + c.Assert(n, Equals, 1) + c.Assert(err, IsNil) + + buffer = []byte{0} + b, isNull, n, err = parseLengthEncodedBytes(buffer) + c.Assert(b, IsNil) + c.Assert(isNull, IsFalse) + c.Assert(n, Equals, 1) + c.Assert(err, IsNil) + + buffer = []byte{'\x01'} + b, isNull, n, err = parseLengthEncodedBytes(buffer) + c.Assert(b, IsNil) + c.Assert(isNull, IsFalse) + c.Assert(n, Equals, 2) + c.Assert(err.Error(), Equals, "EOF") +} + +func (s *testUtilSuite) TestParseNullTermString(c *C) { + for _, t := range []struct { + input string + str string + remain string + }{ + { + "abc\x00def", + "abc", + "def", + }, + { + "\x00def", + "", + "def", + }, + { + "def\x00hig\x00k", + "def", + "hig\x00k", + }, + { + "abcdef", + "", + "abcdef", + }, + } { + str, remain := parseNullTermString([]byte(t.input)) + c.Assert(string(str), Equals, t.str) + c.Assert(string(remain), Equals, t.remain) + } +} diff --git a/session/bench_test.go b/session/bench_test.go new file mode 100644 index 0000000..4e4b2c2 --- /dev/null +++ b/session/bench_test.go @@ -0,0 +1,434 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package session + +import ( + "context" + "fmt" + "math/rand" + "strconv" + "testing" + "time" + + "github.com/pingcap/log" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/sqlexec" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +var smallCount = 100 +var bigCount = 10000 + +func prepareBenchSession() (Session, *domain.Domain, kv.Storage) { + store, err := mockstore.NewMockTikvStore() + if err != nil { + logutil.BgLogger().Fatal(err.Error()) + } + domain, err := BootstrapSession(store) + if err != nil { + logutil.BgLogger().Fatal(err.Error()) + } + log.SetLevel(zapcore.ErrorLevel) + se, err := CreateSession4Test(store) + if err != nil { + logutil.BgLogger().Fatal(err.Error()) + } + mustExecute(se, "use test") + return se, domain, store +} + +func prepareBenchData(se Session, colType string, valueFormat string, valueCount int) { + mustExecute(se, "drop table if exists t") + mustExecute(se, fmt.Sprintf("create table t (pk int primary key auto_increment, col %s, index idx (col))", colType)) + mustExecute(se, "begin") + for i := 0; i < valueCount; i++ { + mustExecute(se, "insert t (col) values ("+fmt.Sprintf(valueFormat, i)+")") + } + mustExecute(se, "commit") +} + +func prepareSortBenchData(se Session, colType string, valueFormat string, valueCount int) { + mustExecute(se, "drop table if exists t") + mustExecute(se, fmt.Sprintf("create table t (pk int primary key auto_increment, col %s)", colType)) + mustExecute(se, "begin") + r := rand.New(rand.NewSource(time.Now().UnixNano())) + for i := 0; i < valueCount; i++ { + if i%1000 == 0 { + mustExecute(se, "commit") + mustExecute(se, "begin") + } + mustExecute(se, "insert t (col) values ("+fmt.Sprintf(valueFormat, r.Intn(valueCount))+")") + } + mustExecute(se, "commit") +} + +func prepareJoinBenchData(se Session, colType string, valueFormat string, valueCount int) { + mustExecute(se, "drop table if exists t") + mustExecute(se, fmt.Sprintf("create table t (pk int primary key auto_increment, col %s)", colType)) + mustExecute(se, "begin") + for i := 0; i < valueCount; i++ { + mustExecute(se, "insert t (col) values ("+fmt.Sprintf(valueFormat, i)+")") + } + mustExecute(se, "commit") +} + +func readResult(ctx context.Context, rs sqlexec.RecordSet, count int) { + req := rs.NewChunk() + for count > 0 { + err := rs.Next(ctx, req) + if err != nil { + logutil.Logger(ctx).Fatal("read result failed", zap.Error(err)) + } + if req.NumRows() == 0 { + logutil.Logger(ctx).Fatal(strconv.Itoa(count)) + } + count -= req.NumRows() + } + rs.Close() +} + +func BenchmarkBasic(b *testing.B) { + ctx := context.Background() + se, do, st := prepareBenchSession() + defer func() { + se.Close() + st.Close() + do.Close() + }() + b.ResetTimer() + for i := 0; i < b.N; i++ { + rs, err := se.Execute(ctx, "select 1") + if err != nil { + b.Fatal(err) + } + readResult(ctx, rs[0], 1) + } + b.StopTimer() +} + +func BenchmarkTableScan(b *testing.B) { + ctx := context.Background() + se, do, st := prepareBenchSession() + defer func() { + se.Close() + st.Close() + do.Close() + }() + prepareBenchData(se, "int", "%v", smallCount) + b.ResetTimer() + for i := 0; i < b.N; i++ { + rs, err := se.Execute(ctx, "select * from t") + if err != nil { + b.Fatal(err) + } + readResult(ctx, rs[0], smallCount) + } + b.StopTimer() +} + +func BenchmarkExplainTableScan(b *testing.B) { + ctx := context.Background() + se, do, st := prepareBenchSession() + defer func() { + se.Close() + st.Close() + do.Close() + }() + prepareBenchData(se, "int", "%v", 0) + b.ResetTimer() + for i := 0; i < b.N; i++ { + rs, err := se.Execute(ctx, "explain select * from t") + if err != nil { + b.Fatal(err) + } + readResult(ctx, rs[0], 1) + } + b.StopTimer() +} + +func BenchmarkTableLookup(b *testing.B) { + ctx := context.Background() + se, do, st := prepareBenchSession() + defer func() { + se.Close() + st.Close() + do.Close() + }() + prepareBenchData(se, "int", "%d", smallCount) + b.ResetTimer() + for i := 0; i < b.N; i++ { + rs, err := se.Execute(ctx, "select * from t where pk = 64") + if err != nil { + b.Fatal(err) + } + readResult(ctx, rs[0], 1) + } + b.StopTimer() +} + +func BenchmarkExplainTableLookup(b *testing.B) { + ctx := context.Background() + se, do, st := prepareBenchSession() + defer func() { + se.Close() + st.Close() + do.Close() + }() + prepareBenchData(se, "int", "%d", 0) + b.ResetTimer() + for i := 0; i < b.N; i++ { + rs, err := se.Execute(ctx, "explain select * from t where pk = 64") + if err != nil { + b.Fatal(err) + } + readResult(ctx, rs[0], 1) + } + b.StopTimer() +} + +func BenchmarkStringIndexScan(b *testing.B) { + ctx := context.Background() + se, do, st := prepareBenchSession() + defer func() { + se.Close() + st.Close() + do.Close() + }() + prepareBenchData(se, "varchar(255)", "'hello %d'", smallCount) + b.ResetTimer() + for i := 0; i < b.N; i++ { + rs, err := se.Execute(ctx, "select * from t where col > 'hello'") + if err != nil { + b.Fatal(err) + } + readResult(ctx, rs[0], smallCount) + } + b.StopTimer() +} + +func BenchmarkExplainStringIndexScan(b *testing.B) { + ctx := context.Background() + se, do, st := prepareBenchSession() + defer func() { + se.Close() + st.Close() + do.Close() + }() + prepareBenchData(se, "varchar(255)", "'hello %d'", 0) + b.ResetTimer() + for i := 0; i < b.N; i++ { + rs, err := se.Execute(ctx, "explain select * from t where col > 'hello'") + if err != nil { + b.Fatal(err) + } + readResult(ctx, rs[0], 1) + } + b.StopTimer() +} + +func BenchmarkStringIndexLookup(b *testing.B) { + ctx := context.Background() + se, do, st := prepareBenchSession() + defer func() { + se.Close() + st.Close() + do.Close() + }() + prepareBenchData(se, "varchar(255)", "'hello %d'", smallCount) + b.ResetTimer() + for i := 0; i < b.N; i++ { + rs, err := se.Execute(ctx, "select * from t where col = 'hello 64'") + if err != nil { + b.Fatal(err) + } + readResult(ctx, rs[0], 1) + } + b.StopTimer() +} + +func BenchmarkIntegerIndexScan(b *testing.B) { + ctx := context.Background() + se, do, st := prepareBenchSession() + defer func() { + se.Close() + st.Close() + do.Close() + }() + prepareBenchData(se, "int", "%v", smallCount) + b.ResetTimer() + for i := 0; i < b.N; i++ { + rs, err := se.Execute(ctx, "select * from t where col >= 0") + if err != nil { + b.Fatal(err) + } + readResult(ctx, rs[0], smallCount) + } + b.StopTimer() +} + +func BenchmarkIntegerIndexLookup(b *testing.B) { + ctx := context.Background() + se, do, st := prepareBenchSession() + defer func() { + se.Close() + st.Close() + do.Close() + }() + prepareBenchData(se, "int", "%v", smallCount) + b.ResetTimer() + for i := 0; i < b.N; i++ { + rs, err := se.Execute(ctx, "select * from t where col = 64") + if err != nil { + b.Fatal(err) + } + readResult(ctx, rs[0], 1) + } + b.StopTimer() +} + +func BenchmarkDecimalIndexScan(b *testing.B) { + ctx := context.Background() + se, do, st := prepareBenchSession() + defer func() { + se.Close() + st.Close() + do.Close() + }() + prepareBenchData(se, "decimal(32,6)", "%v.1234", smallCount) + b.ResetTimer() + for i := 0; i < b.N; i++ { + rs, err := se.Execute(ctx, "select * from t where col >= 0") + if err != nil { + b.Fatal(err) + } + readResult(ctx, rs[0], smallCount) + } + b.StopTimer() +} + +func BenchmarkDecimalIndexLookup(b *testing.B) { + ctx := context.Background() + se, do, st := prepareBenchSession() + defer func() { + se.Close() + st.Close() + do.Close() + }() + prepareBenchData(se, "decimal(32,6)", "%v.1234", smallCount) + b.ResetTimer() + for i := 0; i < b.N; i++ { + rs, err := se.Execute(ctx, "select * from t where col = 64.1234") + if err != nil { + b.Fatal(err) + } + readResult(ctx, rs[0], 1) + } + b.StopTimer() +} + +func BenchmarkInsertWithIndex(b *testing.B) { + se, do, st := prepareBenchSession() + defer func() { + se.Close() + st.Close() + do.Close() + }() + mustExecute(se, "drop table if exists t") + mustExecute(se, "create table t (pk int primary key, col int, index idx (col))") + b.ResetTimer() + for i := 0; i < b.N; i++ { + mustExecute(se, fmt.Sprintf("insert t values (%d, %d)", i, i)) + } + b.StopTimer() +} + +func BenchmarkInsertNoIndex(b *testing.B) { + se, do, st := prepareBenchSession() + defer func() { + se.Close() + st.Close() + do.Close() + }() + mustExecute(se, "drop table if exists t") + mustExecute(se, "create table t (pk int primary key, col int)") + b.ResetTimer() + for i := 0; i < b.N; i++ { + mustExecute(se, fmt.Sprintf("insert t values (%d, %d)", i, i)) + } + b.StopTimer() +} + +func BenchmarkSort(b *testing.B) { + ctx := context.Background() + se, do, st := prepareBenchSession() + defer func() { + se.Close() + st.Close() + do.Close() + }() + prepareSortBenchData(se, "int", "%v", bigCount) + b.ResetTimer() + for i := 0; i < b.N; i++ { + rs, err := se.Execute(ctx, "select * from t order by col limit 50") + if err != nil { + b.Fatal(err) + } + readResult(ctx, rs[0], 50) + } + b.StopTimer() +} + +func BenchmarkJoin(b *testing.B) { + ctx := context.Background() + se, do, st := prepareBenchSession() + defer func() { + se.Close() + st.Close() + do.Close() + }() + prepareJoinBenchData(se, "int", "%v", smallCount) + b.ResetTimer() + for i := 0; i < b.N; i++ { + rs, err := se.Execute(ctx, "select * from t a join t b on a.col = b.col") + if err != nil { + b.Fatal(err) + } + readResult(ctx, rs[0], smallCount) + } + b.StopTimer() +} + +func BenchmarkJoinLimit(b *testing.B) { + ctx := context.Background() + se, do, st := prepareBenchSession() + defer func() { + se.Close() + st.Close() + do.Close() + }() + prepareJoinBenchData(se, "int", "%v", smallCount) + b.ResetTimer() + for i := 0; i < b.N; i++ { + rs, err := se.Execute(ctx, "select * from t a join t b on a.col = b.col limit 1") + if err != nil { + b.Fatal(err) + } + readResult(ctx, rs[0], 1) + } + b.StopTimer() +} diff --git a/session/bootstrap.go b/session/bootstrap.go new file mode 100644 index 0000000..a9a167d --- /dev/null +++ b/session/bootstrap.go @@ -0,0 +1,285 @@ +// Copyright 2013 The ql Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSES/QL-LICENSE file. + +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package session + +import ( + "context" + "fmt" + "runtime/debug" + "strings" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +const ( + // CreateGloablVariablesTable is the SQL statement creates global variable table in system db. + // TODO: MySQL puts GLOBAL_VARIABLES table in INFORMATION_SCHEMA db. + // INFORMATION_SCHEMA is a virtual db in TiDB. So we put this table in system db. + // Maybe we will put it back to INFORMATION_SCHEMA. + CreateGloablVariablesTable = `CREATE TABLE if not exists mysql.GLOBAL_VARIABLES( + VARIABLE_NAME VARCHAR(64) Not Null PRIMARY KEY, + VARIABLE_VALUE VARCHAR(1024) DEFAULT Null);` + // CreateTiDBTable is the SQL statement creates a table in system db. + // This table is a key-value struct contains some information used by TiDB. + // Currently we only put bootstrapped in it which indicates if the system is already bootstrapped. + CreateTiDBTable = `CREATE TABLE if not exists mysql.tidb( + VARIABLE_NAME VARCHAR(64) Not Null PRIMARY KEY, + VARIABLE_VALUE VARCHAR(1024) DEFAULT Null, + COMMENT VARCHAR(1024));` + + // CreateStatsMetaTable stores the meta of table statistics. + CreateStatsMetaTable = `CREATE TABLE if not exists mysql.stats_meta ( + version bigint(64) unsigned NOT NULL, + table_id bigint(64) NOT NULL, + modify_count bigint(64) NOT NULL DEFAULT 0, + count bigint(64) unsigned NOT NULL DEFAULT 0, + index idx_ver(version), + unique index tbl(table_id) + );` + + // CreateStatsColsTable stores the statistics of table columns. + CreateStatsColsTable = `CREATE TABLE if not exists mysql.stats_histograms ( + table_id bigint(64) NOT NULL, + is_index tinyint(2) NOT NULL, + hist_id bigint(64) NOT NULL, + distinct_count bigint(64) NOT NULL, + null_count bigint(64) NOT NULL DEFAULT 0, + tot_col_size bigint(64) NOT NULL DEFAULT 0, + modify_count bigint(64) NOT NULL DEFAULT 0, + version bigint(64) unsigned NOT NULL DEFAULT 0, + cm_sketch blob, + stats_ver bigint(64) NOT NULL DEFAULT 0, + flag bigint(64) NOT NULL DEFAULT 0, + correlation double NOT NULL DEFAULT 0, + last_analyze_pos blob DEFAULT NULL, + unique index tbl(table_id, is_index, hist_id) + );` + + // CreateStatsBucketsTable stores the histogram info for every table columns. + CreateStatsBucketsTable = `CREATE TABLE if not exists mysql.stats_buckets ( + table_id bigint(64) NOT NULL, + is_index tinyint(2) NOT NULL, + hist_id bigint(64) NOT NULL, + bucket_id bigint(64) NOT NULL, + count bigint(64) NOT NULL, + repeats bigint(64) NOT NULL, + upper_bound blob NOT NULL, + lower_bound blob , + unique index tbl(table_id, is_index, hist_id, bucket_id) + );` + + // CreateGCDeleteRangeTable stores schemas which can be deleted by DeleteRange. + CreateGCDeleteRangeTable = `CREATE TABLE IF NOT EXISTS mysql.gc_delete_range ( + job_id BIGINT NOT NULL COMMENT "the DDL job ID", + element_id BIGINT NOT NULL COMMENT "the schema element ID", + start_key VARCHAR(255) NOT NULL COMMENT "encoded in hex", + end_key VARCHAR(255) NOT NULL COMMENT "encoded in hex", + ts BIGINT NOT NULL COMMENT "timestamp in uint64", + UNIQUE KEY delete_range_index (job_id, element_id) + );` + + // CreateGCDeleteRangeDoneTable stores schemas which are already deleted by DeleteRange. + CreateGCDeleteRangeDoneTable = `CREATE TABLE IF NOT EXISTS mysql.gc_delete_range_done ( + job_id BIGINT NOT NULL COMMENT "the DDL job ID", + element_id BIGINT NOT NULL COMMENT "the schema element ID", + start_key VARCHAR(255) NOT NULL COMMENT "encoded in hex", + end_key VARCHAR(255) NOT NULL COMMENT "encoded in hex", + ts BIGINT NOT NULL COMMENT "timestamp in uint64", + UNIQUE KEY delete_range_done_index (job_id, element_id) + );` + + // CreateStatsTopNTable stores topn data of a cmsketch with top n. + CreateStatsTopNTable = `CREATE TABLE if not exists mysql.stats_top_n ( + table_id bigint(64) NOT NULL, + is_index tinyint(2) NOT NULL, + hist_id bigint(64) NOT NULL, + value longblob, + count bigint(64) UNSIGNED NOT NULL, + index tbl(table_id, is_index, hist_id) + );` +) + +// bootstrap initiates system DB for a store. +func bootstrap(s Session) { + startTime := time.Now() + dom := domain.GetDomain(s) + for { + b, err := checkBootstrapped(s) + if err != nil { + logutil.BgLogger().Fatal("check bootstrap error", + zap.Error(err)) + } + // To reduce conflict when multiple TiDB-server start at the same time. + // Actually only one server need to do the bootstrap. So we chose DDL owner to do this. + if !b && dom.DDL().OwnerManager().IsOwner() { + doDDLWorks(s) + doDMLWorks(s) + logutil.BgLogger().Info("bootstrap successful", + zap.Duration("take time", time.Since(startTime))) + return + } + time.Sleep(200 * time.Millisecond) + } +} + +const ( + // The variable name in mysql.TiDB table. + // It is used for checking if the store is boostrapped by any TiDB server. + bootstrappedVar = "bootstrapped" + // The variable value in mysql.TiDB table for bootstrappedVar. + // If the value true, the store is already boostrapped by a TiDB server. + bootstrappedVarTrue = "True" + // The variable name in mysql.TiDB table. + // It is used for getting the version of the TiDB server which bootstrapped the store. + tidbServerVersionVar = "tidb_server_version" +) + +func checkBootstrapped(s Session) (bool, error) { + // Check if system db exists. + _, err := s.Execute(context.Background(), fmt.Sprintf("USE %s;", mysql.SystemDB)) + if err != nil && infoschema.ErrDatabaseNotExists.NotEqual(err) { + logutil.BgLogger().Fatal("check bootstrap error", + zap.Error(err)) + } + // Check bootstrapped variable value in TiDB table. + sVal, _, err := getTiDBVar(s, bootstrappedVar) + if err != nil { + if infoschema.ErrTableNotExists.Equal(err) { + return false, nil + } + return false, errors.Trace(err) + } + isBootstrapped := sVal == bootstrappedVarTrue + if isBootstrapped { + // Make sure that doesn't affect the following operations. + if err = s.CommitTxn(context.Background()); err != nil { + return false, errors.Trace(err) + } + } + return isBootstrapped, nil +} + +// getTiDBVar gets variable value from mysql.tidb table. +// Those variables are used by TiDB server. +func getTiDBVar(s Session, name string) (sVal string, isNull bool, e error) { + sql := fmt.Sprintf(`SELECT HIGH_PRIORITY VARIABLE_VALUE FROM %s.%s WHERE VARIABLE_NAME="%s"`, + mysql.SystemDB, mysql.TiDBTable, name) + ctx := context.Background() + rs, err := s.Execute(ctx, sql) + if err != nil { + return "", true, errors.Trace(err) + } + if len(rs) != 1 { + return "", true, errors.New("Wrong number of Recordset") + } + r := rs[0] + defer terror.Call(r.Close) + req := r.NewChunk() + err = r.Next(ctx, req) + if err != nil || req.NumRows() == 0 { + return "", true, errors.Trace(err) + } + row := req.GetRow(0) + if row.IsNull(0) { + return "", true, nil + } + return row.GetString(0), false, nil +} + +// doDDLWorks executes DDL statements in bootstrap stage. +func doDDLWorks(s Session) { + // Create a test database. + mustExecute(s, "CREATE DATABASE IF NOT EXISTS test") + // Create system db. + mustExecute(s, fmt.Sprintf("CREATE DATABASE IF NOT EXISTS %s;", mysql.SystemDB)) + // Create global system variable table. + mustExecute(s, CreateGloablVariablesTable) + // Create TiDB table. + mustExecute(s, CreateTiDBTable) + // Create stats_meta table. + mustExecute(s, CreateStatsMetaTable) + // Create stats_columns table. + mustExecute(s, CreateStatsColsTable) + // Create stats_buckets table. + mustExecute(s, CreateStatsBucketsTable) + // Create gc_delete_range table. + mustExecute(s, CreateGCDeleteRangeTable) + // Create gc_delete_range_done table. + mustExecute(s, CreateGCDeleteRangeDoneTable) + // Create stats_topn_store table. + mustExecute(s, CreateStatsTopNTable) +} + +// doDMLWorks executes DML statements in bootstrap stage. +// All the statements run in a single transaction. +func doDMLWorks(s Session) { + mustExecute(s, "BEGIN") + + // Init global system variables table. + values := make([]string, 0, len(variable.SysVars)) + for k, v := range variable.SysVars { + // Session only variable should not be inserted. + if v.Scope != variable.ScopeSession { + value := fmt.Sprintf(`("%s", "%s")`, strings.ToLower(k), v.Value) + values = append(values, value) + } + } + sql := fmt.Sprintf("INSERT HIGH_PRIORITY INTO %s.%s VALUES %s;", mysql.SystemDB, mysql.GlobalVariablesTable, + strings.Join(values, ", ")) + mustExecute(s, sql) + + sql = fmt.Sprintf(`INSERT HIGH_PRIORITY INTO %s.%s VALUES("%s", "%s", "Bootstrap flag. Do not delete.")`, + mysql.SystemDB, mysql.TiDBTable, bootstrappedVar, bootstrappedVarTrue) + mustExecute(s, sql) + + sql = fmt.Sprintf(`INSERT HIGH_PRIORITY INTO %s.%s VALUES("%s", "%d", "Bootstrap version. Do not delete.")`, + mysql.SystemDB, mysql.TiDBTable, tidbServerVersionVar, bootstrapped) + mustExecute(s, sql) + + _, err := s.Execute(context.Background(), "COMMIT") + if err != nil { + sleepTime := 1 * time.Second + logutil.BgLogger().Info("doDMLWorks failed", zap.Error(err), zap.Duration("sleeping time", sleepTime)) + time.Sleep(sleepTime) + // Check if TiDB is already bootstrapped. + b, err1 := checkBootstrapped(s) + if err1 != nil { + logutil.BgLogger().Fatal("doDMLWorks failed", zap.Error(err1)) + } + if b { + return + } + logutil.BgLogger().Fatal("doDMLWorks failed", zap.Error(err)) + } +} + +func mustExecute(s Session, sql string) { + _, err := s.Execute(context.Background(), sql) + if err != nil { + debug.PrintStack() + logutil.BgLogger().Fatal("mustExecute error", zap.Error(err)) + } +} diff --git a/session/isolation_test.go b/session/isolation_test.go new file mode 100644 index 0000000..361e0d7 --- /dev/null +++ b/session/isolation_test.go @@ -0,0 +1,74 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package session_test + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/store/mockstore/mocktikv" + "github.com/pingcap/tidb/util/testkit" + "github.com/pingcap/tidb/util/testleak" +) + +var _ = Suite(&testIsolationSuite{}) + +type testIsolationSuite struct { + cluster *mocktikv.Cluster + mvccStore mocktikv.MVCCStore + store kv.Storage + dom *domain.Domain +} + +func (s *testIsolationSuite) SetUpSuite(c *C) { + testleak.BeforeTest() + s.cluster = mocktikv.NewCluster() + mocktikv.BootstrapWithSingleStore(s.cluster) + s.mvccStore = mocktikv.MustNewMVCCStore() + store, err := mockstore.NewMockTikvStore( + mockstore.WithCluster(s.cluster), + mockstore.WithMVCCStore(s.mvccStore), + ) + c.Assert(err, IsNil) + s.store = store + session.SetSchemaLease(0) + session.DisableStats4Test() + s.dom, err = session.BootstrapSession(s.store) + c.Assert(err, IsNil) +} + +func (s *testIsolationSuite) TearDownSuite(c *C) { + s.dom.Close() + s.store.Close() + testleak.AfterTest(c)() +} + +func (s *testIsolationSuite) TestA3Phantom(c *C) { + session1 := testkit.NewTestKitWithInit(c, s.store) + session2 := testkit.NewTestKitWithInit(c, s.store) + + session1.MustExec("drop table if exists x;") + session1.MustExec("create table x (id int primary key, c int);") + session1.MustExec("insert into x values(1, 1);") + + session1.MustExec("begin;") + session2.MustExec("begin;") + session2.MustQuery("select c from x where id < 5;").Check(testkit.Rows("1")) + session1.MustExec("insert into x values(2, 1);") + session1.MustExec("commit;") + session2.MustQuery("select c from x where id < 5;").Check(testkit.Rows("1")) + session2.MustExec("commit;") +} diff --git a/session/session.go b/session/session.go new file mode 100644 index 0000000..bd807b4 --- /dev/null +++ b/session/session.go @@ -0,0 +1,1103 @@ +// Copyright 2013 The ql Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSES/QL-LICENSE file. + +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package session + +import ( + "context" + "crypto/tls" + "encoding/json" + "fmt" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/ngaut/pools" + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/executor" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta" + "github.com/pingcap/tidb/owner" + "github.com/pingcap/tidb/parser" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/charset" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/sqlexec" + "go.uber.org/zap" +) + +// Session context, it is consistent with the lifecycle of a client connection. +type Session interface { + sessionctx.Context + Status() uint16 // Flag of current status, such as autocommit. + LastInsertID() uint64 // LastInsertID is the last inserted auto_increment ID. + AffectedRows() uint64 // Affected rows by latest executed stmt. + Execute(context.Context, string) ([]sqlexec.RecordSet, error) // Execute a sql statement. + String() string // String is used to debug. + CommitTxn(context.Context) error + RollbackTxn(context.Context) + SetClientCapability(uint32) // Set client capability flags. + SetConnectionID(uint64) + SetCommandValue(byte) + SetTLSState(*tls.ConnectionState) + SetCollation(coID int) error + Close() + // PrePareTxnCtx is exported for test. + PrepareTxnCtx(context.Context) + // FieldList returns fields list of a table. + FieldList(tableName string) (fields []*ast.ResultField, err error) +} + +var ( + _ Session = (*session)(nil) +) + +type stmtRecord struct { + st sqlexec.Statement + stmtCtx *stmtctx.StatementContext +} + +// StmtHistory holds all histories of statements in a txn. +type StmtHistory struct { + history []*stmtRecord +} + +// Add appends a stmt to history list. +func (h *StmtHistory) Add(st sqlexec.Statement, stmtCtx *stmtctx.StatementContext) { + s := &stmtRecord{ + st: st, + stmtCtx: stmtCtx, + } + h.history = append(h.history, s) +} + +// Count returns the count of the history. +func (h *StmtHistory) Count() int { + return len(h.history) +} + +type session struct { + txn TxnState + + mu struct { + sync.RWMutex + values map[fmt.Stringer]interface{} + } + + store kv.Storage + + parser *parser.Parser + + sessionVars *variable.SessionVars + + // ddlOwnerChecker is used in `select tidb_is_ddl_owner()` statement; + ddlOwnerChecker owner.DDLOwnerChecker + + // shared coprocessor client per session + client kv.Client +} + +// DDLOwnerChecker returns s.ddlOwnerChecker. +func (s *session) DDLOwnerChecker() owner.DDLOwnerChecker { + return s.ddlOwnerChecker +} + +func (s *session) getMembufCap() int { + return kv.DefaultTxnMembufCap +} + +func (s *session) Status() uint16 { + return s.sessionVars.Status +} + +func (s *session) LastInsertID() uint64 { + if s.sessionVars.StmtCtx.LastInsertID > 0 { + return s.sessionVars.StmtCtx.LastInsertID + } + return s.sessionVars.StmtCtx.InsertID +} + +func (s *session) AffectedRows() uint64 { + return s.sessionVars.StmtCtx.AffectedRows() +} + +func (s *session) SetClientCapability(capability uint32) { + s.sessionVars.ClientCapability = capability +} + +func (s *session) SetConnectionID(connectionID uint64) { + s.sessionVars.ConnectionID = connectionID +} + +func (s *session) SetTLSState(tlsState *tls.ConnectionState) { + // If user is not connected via TLS, then tlsState == nil. + if tlsState != nil { + s.sessionVars.TLSConnectionState = tlsState + } +} + +func (s *session) SetCommandValue(command byte) { + atomic.StoreUint32(&s.sessionVars.CommandValue, uint32(command)) +} + +func (s *session) SetCollation(coID int) error { + cs, co, err := charset.GetCharsetInfoByID(coID) + if err != nil { + return err + } + for _, v := range variable.SetNamesVariables { + terror.Log(s.sessionVars.SetSystemVar(v, cs)) + } + terror.Log(s.sessionVars.SetSystemVar(variable.CollationConnection, co)) + return nil +} + +// FieldList returns fields list of a table. +func (s *session) FieldList(tableName string) ([]*ast.ResultField, error) { + is := infoschema.GetInfoSchema(s) + dbName := model.NewCIStr(s.GetSessionVars().CurrentDB) + tName := model.NewCIStr(tableName) + table, err := is.TableByName(dbName, tName) + if err != nil { + return nil, err + } + + cols := table.Cols() + fields := make([]*ast.ResultField, 0, len(cols)) + for _, col := range table.Cols() { + rf := &ast.ResultField{ + ColumnAsName: col.Name, + TableAsName: tName, + DBName: dbName, + Table: table.Meta(), + Column: col.ColumnInfo, + } + fields = append(fields, rf) + } + return fields, nil +} + +func (s *session) doCommit(ctx context.Context) error { + if !s.txn.Valid() { + return nil + } + defer func() { + s.txn.changeToInvalid() + s.sessionVars.SetStatusFlag(mysql.ServerStatusInTrans, false) + }() + if s.txn.IsReadOnly() { + return nil + } + + // mockCommitError and mockGetTSErrorInRetry use to test PR #8743. + failpoint.Inject("mockCommitError", func(val failpoint.Value) { + if val.(bool) && kv.IsMockCommitErrorEnable() { + kv.MockCommitErrorDisable() + failpoint.Return(kv.ErrTxnRetryable) + } + }) + + // Get the related table IDs. + relatedTables := s.GetSessionVars().TxnCtx.TableDeltaMap + tableIDs := make([]int64, 0, len(relatedTables)) + for id := range relatedTables { + tableIDs = append(tableIDs, id) + } + // Set this option for 2 phase commit to validate schema lease. + s.txn.SetOption(kv.SchemaChecker, domain.NewSchemaChecker(domain.GetDomain(s), s.sessionVars.TxnCtx.SchemaVersion, tableIDs)) + + return s.txn.Commit(sessionctx.SetCommitCtx(ctx, s)) +} + +func (s *session) commitTxn(ctx context.Context) error { + defer func() { + s.txn.changeToInvalid() + }() + if !s.txn.Valid() { + // If the transaction is invalid, maybe it has already been rolled back by the client. + return nil + } + err := s.doCommit(ctx) + + if isoLevelOneShot := &s.sessionVars.TxnIsolationLevelOneShot; isoLevelOneShot.State != 0 { + switch isoLevelOneShot.State { + case 1: + isoLevelOneShot.State = 2 + case 2: + isoLevelOneShot.State = 0 + isoLevelOneShot.Value = "" + } + } + + if err != nil { + logutil.Logger(ctx).Warn("commit failed", + zap.String("finished txn", s.txn.GoString()), + zap.Error(err)) + return err + } + return nil +} + +func (s *session) CommitTxn(ctx context.Context) error { + err := s.commitTxn(ctx) + + failpoint.Inject("keepHistory", func(val failpoint.Value) { + if val.(bool) { + failpoint.Return(err) + } + }) + + s.sessionVars.TxnCtx.Cleanup() + return err +} + +func (s *session) RollbackTxn(ctx context.Context) { + if s.txn.Valid() { + terror.Log(s.txn.Rollback()) + } + s.txn.changeToInvalid() + s.sessionVars.TxnCtx.Cleanup() + s.sessionVars.SetStatusFlag(mysql.ServerStatusInTrans, false) +} + +func (s *session) GetClient() kv.Client { + return s.client +} + +func (s *session) String() string { + // TODO: how to print binded context in values appropriately? + sessVars := s.sessionVars + data := map[string]interface{}{ + "id": sessVars.ConnectionID, + "user": "", + "currDBName": sessVars.CurrentDB, + "status": sessVars.Status, + "strictMode": sessVars.StrictSQLMode, + } + if s.txn.Valid() { + // if txn is committed or rolled back, txn is nil. + data["txn"] = s.txn.String() + } + if sessVars.StmtCtx.LastInsertID > 0 { + data["lastInsertID"] = sessVars.StmtCtx.LastInsertID + } + b, err := json.MarshalIndent(data, "", " ") + terror.Log(errors.Trace(err)) + return string(b) +} + +// SchemaChangedWithoutRetry is used for testing. +var SchemaChangedWithoutRetry uint32 + +func (s *session) checkTxnAborted(stmt sqlexec.Statement) error { + if s.txn.doNotCommit == nil { + return nil + } + // If the transaction is aborted, the following statements do not need to execute, except `commit` and `rollback`, + // because they are used to finish the aborted transaction. + if _, ok := stmt.(*executor.ExecStmt).StmtNode.(*ast.CommitStmt); ok { + return nil + } + if _, ok := stmt.(*executor.ExecStmt).StmtNode.(*ast.RollbackStmt); ok { + return nil + } + return errors.New("current transaction is aborted, commands ignored until end of transaction block:" + s.txn.doNotCommit.Error()) +} + +type sessionPool interface { + Get() (pools.Resource, error) + Put(pools.Resource) +} + +func (s *session) sysSessionPool() sessionPool { + return domain.GetDomain(s).SysSessionPool() +} + +// ExecRestrictedSQL implements RestrictedSQLExecutor interface. +// This is used for executing some restricted sql statements, usually executed during a normal statement execution. +// Unlike normal Exec, it doesn't reset statement status, doesn't commit or rollback the current transaction +// and doesn't write binlog. +func (s *session) ExecRestrictedSQL(sql string) ([]chunk.Row, []*ast.ResultField, error) { + ctx := context.TODO() + + // Use special session to execute the sql. + tmp, err := s.sysSessionPool().Get() + if err != nil { + return nil, nil, err + } + se := tmp.(*session) + defer s.sysSessionPool().Put(tmp) + + return execRestrictedSQL(ctx, se, sql) +} + +func execRestrictedSQL(ctx context.Context, se *session, sql string) ([]chunk.Row, []*ast.ResultField, error) { + recordSets, err := se.Execute(ctx, sql) + if err != nil { + return nil, nil, err + } + + var ( + rows []chunk.Row + fields []*ast.ResultField + ) + // Execute all recordset, take out the first one as result. + for i, rs := range recordSets { + tmp, err := drainRecordSet(ctx, se, rs) + if err != nil { + return nil, nil, err + } + if err = rs.Close(); err != nil { + return nil, nil, err + } + + if i == 0 { + rows = tmp + fields = rs.Fields() + } + } + return rows, fields, nil +} + +func createSessionFunc(store kv.Storage) pools.Factory { + return func() (pools.Resource, error) { + se, err := createSession(store) + if err != nil { + return nil, err + } + err = variable.SetSessionSystemVar(se.sessionVars, variable.AutoCommit, types.NewStringDatum("1")) + if err != nil { + return nil, err + } + err = variable.SetSessionSystemVar(se.sessionVars, variable.MaxExecutionTime, types.NewUintDatum(0)) + if err != nil { + return nil, errors.Trace(err) + } + se.sessionVars.CommonGlobalLoaded = true + se.sessionVars.InRestrictedSQL = true + return se, nil + } +} + +func createSessionWithDomainFunc(store kv.Storage) func(*domain.Domain) (pools.Resource, error) { + return func(dom *domain.Domain) (pools.Resource, error) { + se, err := CreateSessionWithDomain(store, dom) + if err != nil { + return nil, err + } + err = variable.SetSessionSystemVar(se.sessionVars, variable.AutoCommit, types.NewStringDatum("1")) + if err != nil { + return nil, err + } + err = variable.SetSessionSystemVar(se.sessionVars, variable.MaxExecutionTime, types.NewUintDatum(0)) + if err != nil { + return nil, errors.Trace(err) + } + se.sessionVars.CommonGlobalLoaded = true + se.sessionVars.InRestrictedSQL = true + return se, nil + } +} + +func drainRecordSet(ctx context.Context, se *session, rs sqlexec.RecordSet) ([]chunk.Row, error) { + var rows []chunk.Row + req := rs.NewChunk() + for { + err := rs.Next(ctx, req) + if err != nil || req.NumRows() == 0 { + return rows, err + } + iter := chunk.NewIterator4Chunk(req) + for r := iter.Begin(); r != iter.End(); r = iter.Next() { + rows = append(rows, r) + } + req = chunk.Renew(req, se.sessionVars.MaxChunkSize) + } +} + +// getExecRet executes restricted sql and the result is one column. +// It returns a string value. +func (s *session) getExecRet(ctx sessionctx.Context, sql string) (string, error) { + rows, fields, err := s.ExecRestrictedSQL(sql) + if err != nil { + return "", err + } + if len(rows) == 0 { + return "", executor.ErrResultIsEmpty + } + d := rows[0].GetDatum(0, &fields[0].Column.FieldType) + value, err := d.ToString() + if err != nil { + return "", err + } + return value, nil +} + +// GetAllSysVars implements GlobalVarAccessor.GetAllSysVars interface. +func (s *session) GetAllSysVars() (map[string]string, error) { + if s.Value(sessionctx.Initing) != nil { + return nil, nil + } + sql := `SELECT VARIABLE_NAME, VARIABLE_VALUE FROM %s.%s;` + sql = fmt.Sprintf(sql, mysql.SystemDB, mysql.GlobalVariablesTable) + rows, _, err := s.ExecRestrictedSQL(sql) + if err != nil { + return nil, err + } + ret := make(map[string]string) + for _, r := range rows { + k, v := r.GetString(0), r.GetString(1) + ret[k] = v + } + return ret, nil +} + +// GetGlobalSysVar implements GlobalVarAccessor.GetGlobalSysVar interface. +func (s *session) GetGlobalSysVar(name string) (string, error) { + if s.Value(sessionctx.Initing) != nil { + // When running bootstrap or upgrade, we should not access global storage. + return "", nil + } + sql := fmt.Sprintf(`SELECT VARIABLE_VALUE FROM %s.%s WHERE VARIABLE_NAME="%s";`, + mysql.SystemDB, mysql.GlobalVariablesTable, name) + sysVar, err := s.getExecRet(s, sql) + if err != nil { + if executor.ErrResultIsEmpty.Equal(err) { + if sv, ok := variable.SysVars[name]; ok { + return sv.Value, nil + } + return "", variable.ErrUnknownSystemVar.GenWithStackByArgs(name) + } + return "", err + } + return sysVar, nil +} + +// SetGlobalSysVar implements GlobalVarAccessor.SetGlobalSysVar interface. +func (s *session) SetGlobalSysVar(name, value string) error { + if name == variable.SQLModeVar { + value = mysql.FormatSQLModeStr(value) + if _, err := mysql.GetSQLMode(value); err != nil { + return err + } + } + var sVal string + var err error + sVal, err = variable.ValidateSetSystemVar(s.sessionVars, name, value) + if err != nil { + return err + } + name = strings.ToLower(name) + sql := fmt.Sprintf(`REPLACE %s.%s VALUES ('%s', '%s');`, + mysql.SystemDB, mysql.GlobalVariablesTable, name, sVal) + _, _, err = s.ExecRestrictedSQL(sql) + return err +} + +func (s *session) ParseSQL(ctx context.Context, sql, charset, collation string) ([]ast.StmtNode, []error, error) { + s.parser.SetSQLMode(s.sessionVars.SQLMode) + return s.parser.Parse(sql, charset, collation) +} + +func (s *session) executeStatement(ctx context.Context, connID uint64, stmtNode ast.StmtNode, stmt sqlexec.Statement, recordSets []sqlexec.RecordSet, inMulitQuery bool) ([]sqlexec.RecordSet, error) { + s.SetValue(sessionctx.QueryString, stmt.OriginText()) + if _, ok := stmtNode.(ast.DDLNode); ok { + s.SetValue(sessionctx.LastExecuteDDL, true) + } else { + s.ClearValue(sessionctx.LastExecuteDDL) + } + recordSet, err := runStmt(ctx, s, stmt) + if err != nil { + if !kv.ErrKeyExists.Equal(err) { + logutil.Logger(ctx).Warn("run statement failed", + zap.Int64("schemaVersion", s.sessionVars.TxnCtx.SchemaVersion), + zap.Error(err), + zap.String("session", s.String())) + } + return nil, err + } + + if inMulitQuery && recordSet == nil { + recordSet = &multiQueryNoDelayRecordSet{ + affectedRows: s.AffectedRows(), + warnCount: s.sessionVars.StmtCtx.WarningCount(), + lastInsertID: s.sessionVars.StmtCtx.LastInsertID, + status: s.sessionVars.Status, + } + } + + if recordSet != nil { + recordSets = append(recordSets, recordSet) + } + return recordSets, nil +} + +func (s *session) Execute(ctx context.Context, sql string) (recordSets []sqlexec.RecordSet, err error) { + if recordSets, err = s.execute(ctx, sql); err != nil { + s.sessionVars.StmtCtx.AppendError(err) + } + return +} + +func (s *session) execute(ctx context.Context, sql string) (recordSets []sqlexec.RecordSet, err error) { + s.PrepareTxnCtx(ctx) + connID := s.sessionVars.ConnectionID + err = s.loadCommonGlobalVariablesIfNeeded() + if err != nil { + return nil, err + } + + charsetInfo, collation := s.sessionVars.GetCharsetInfo() + + // Step1: Compile query string to abstract syntax trees(ASTs). + parseStartTime := time.Now() + stmtNodes, warns, err := s.ParseSQL(ctx, sql, charsetInfo, collation) + if err != nil { + s.rollbackOnError(ctx) + logutil.Logger(ctx).Warn("parse SQL failed", + zap.Error(err), + zap.String("SQL", sql)) + return nil, util.SyntaxError(err) + } + durParse := time.Since(parseStartTime) + s.GetSessionVars().DurationParse = durParse + + compiler := executor.Compiler{Ctx: s} + multiQuery := len(stmtNodes) > 1 + for _, stmtNode := range stmtNodes { + s.sessionVars.StartTime = time.Now() + s.PrepareTxnCtx(ctx) + + // Step2: Transform abstract syntax tree to a physical plan(stored in executor.ExecStmt). + // Some executions are done in compile stage, so we reset them before compile. + if err := executor.ResetContextOfStmt(s, stmtNode); err != nil { + return nil, err + } + stmt, err := compiler.Compile(ctx, stmtNode) + if err != nil { + s.rollbackOnError(ctx) + logutil.Logger(ctx).Warn("compile SQL failed", + zap.Error(err), + zap.String("SQL", sql)) + return nil, err + } + durCompile := time.Since(s.sessionVars.StartTime) + s.GetSessionVars().DurationCompile = durCompile + + // Step3: Execute the physical plan. + if recordSets, err = s.executeStatement(ctx, connID, stmtNode, stmt, recordSets, multiQuery); err != nil { + return nil, err + } + } + + if s.sessionVars.ClientCapability&mysql.ClientMultiResults == 0 && len(recordSets) > 1 { + // return the first recordset if client doesn't support ClientMultiResults. + recordSets = recordSets[:1] + } + + for _, warn := range warns { + s.sessionVars.StmtCtx.AppendWarning(util.SyntaxWarn(warn)) + } + return recordSets, nil +} + +// rollbackOnError makes sure the next statement starts a new transaction with the latest InfoSchema. +func (s *session) rollbackOnError(ctx context.Context) { + if !s.sessionVars.InTxn() { + s.RollbackTxn(ctx) + } +} + +func (s *session) Txn(active bool) (kv.Transaction, error) { + if !s.txn.validOrPending() && active { + return &s.txn, kv.ErrInvalidTxn + } + if s.txn.pending() && active { + // Transaction is lazy initialized. + // PrepareTxnCtx is called to get a tso future, makes s.txn a pending txn, + // If Txn() is called later, wait for the future to get a valid txn. + txnCap := s.getMembufCap() + if err := s.txn.changePendingToValid(txnCap); err != nil { + logutil.BgLogger().Error("active transaction fail", + zap.Error(err)) + s.txn.cleanup() + s.sessionVars.TxnCtx.StartTS = 0 + return &s.txn, err + } + s.sessionVars.TxnCtx.StartTS = s.txn.StartTS() + if !s.sessionVars.IsAutocommit() { + s.sessionVars.SetStatusFlag(mysql.ServerStatusInTrans, true) + } + if s.sessionVars.GetReplicaRead().IsFollowerRead() { + s.txn.SetOption(kv.ReplicaRead, kv.ReplicaReadFollower) + } + } + return &s.txn, nil +} + +func (s *session) NewTxn(ctx context.Context) error { + if s.txn.Valid() { + txnID := s.txn.StartTS() + err := s.CommitTxn(ctx) + if err != nil { + return err + } + vars := s.GetSessionVars() + logutil.Logger(ctx).Info("NewTxn() inside a transaction auto commit", + zap.Int64("schemaVersion", vars.TxnCtx.SchemaVersion), + zap.Uint64("txnStartTS", txnID)) + } + + txn, err := s.store.Begin() + if err != nil { + return err + } + txn.SetCap(s.getMembufCap()) + txn.SetVars(s.sessionVars.KVVars) + if s.GetSessionVars().GetReplicaRead().IsFollowerRead() { + txn.SetOption(kv.ReplicaRead, kv.ReplicaReadFollower) + } + s.txn.changeInvalidToValid(txn) + is := domain.GetDomain(s).InfoSchema() + s.sessionVars.TxnCtx = &variable.TransactionContext{ + InfoSchema: is, + SchemaVersion: is.SchemaMetaVersion(), + CreateTime: time.Now(), + StartTS: txn.StartTS(), + } + return nil +} + +func (s *session) SetValue(key fmt.Stringer, value interface{}) { + s.mu.Lock() + s.mu.values[key] = value + s.mu.Unlock() +} + +func (s *session) Value(key fmt.Stringer) interface{} { + s.mu.RLock() + value := s.mu.values[key] + s.mu.RUnlock() + return value +} + +func (s *session) ClearValue(key fmt.Stringer) { + s.mu.Lock() + delete(s.mu.values, key) + s.mu.Unlock() +} + +// Close function does some clean work when session end. +func (s *session) Close() { + ctx := context.TODO() + s.RollbackTxn(ctx) +} + +// GetSessionVars implements the context.Context interface. +func (s *session) GetSessionVars() *variable.SessionVars { + return s.sessionVars +} + +// CreateSession4Test creates a new session environment for test. +func CreateSession4Test(store kv.Storage) (Session, error) { + s, err := CreateSession(store) + if err == nil { + // initialize session variables for test. + s.GetSessionVars().InitChunkSize = 2 + s.GetSessionVars().MaxChunkSize = 32 + } + return s, err +} + +// CreateSession creates a new session environment. +func CreateSession(store kv.Storage) (Session, error) { + s, err := createSession(store) + if err != nil { + return nil, err + } + return s, nil +} + +// BootstrapSession runs the first time when the TiDB server start. +func BootstrapSession(store kv.Storage) (*domain.Domain, error) { + initLoadCommonGlobalVarsSQL() + + if !getStoreBootstrap(store) { + runInBootstrapSession(store, bootstrap) + } + + se, err := createSession(store) + if err != nil { + return nil, err + } + + dom := domain.GetDomain(se) + + se1, err := createSession(store) + if err != nil { + return nil, err + } + err = dom.UpdateTableStatsLoop(se1) + if err != nil { + return nil, err + } + + return dom, err +} + +// GetDomain gets the associated domain for store. +func GetDomain(store kv.Storage) (*domain.Domain, error) { + return domap.Get(store) +} + +// runInBootstrapSession create a special session for boostrap to run. +// If no bootstrap and storage is remote, we must use a little lease time to +// bootstrap quickly, after bootstrapped, we will reset the lease time. +// TODO: Using a bootstrap tool for doing this may be better later. +func runInBootstrapSession(store kv.Storage, bootstrap func(Session)) { + s, err := createSession(store) + if err != nil { + // Bootstrap fail will cause program exit. + logutil.BgLogger().Fatal("createSession error", zap.Error(err)) + } + + s.SetValue(sessionctx.Initing, true) + bootstrap(s) + finishBootstrap(store) + s.ClearValue(sessionctx.Initing) + + dom := domain.GetDomain(s) + dom.Close() + domap.Delete(store) +} + +func createSession(store kv.Storage) (*session, error) { + dom, err := domap.Get(store) + if err != nil { + return nil, err + } + s := &session{ + store: store, + parser: parser.New(), + sessionVars: variable.NewSessionVars(), + ddlOwnerChecker: dom.DDL().OwnerManager(), + client: store.GetClient(), + } + s.mu.values = make(map[fmt.Stringer]interface{}) + domain.BindDomain(s, dom) + // session implements variable.GlobalVarAccessor. Bind it to ctx. + s.sessionVars.GlobalVarsAccessor = s + s.txn.init() + return s, nil +} + +// CreateSessionWithDomain creates a new Session and binds it with a Domain. +// We need this because when we start DDL in Domain, the DDL need a session +// to change some system tables. But at that time, we have been already in +// a lock context, which cause we can't call createSesion directly. +func CreateSessionWithDomain(store kv.Storage, dom *domain.Domain) (*session, error) { + s := &session{ + store: store, + parser: parser.New(), + sessionVars: variable.NewSessionVars(), + client: store.GetClient(), + } + s.mu.values = make(map[fmt.Stringer]interface{}) + domain.BindDomain(s, dom) + // session implements variable.GlobalVarAccessor. Bind it to ctx. + s.sessionVars.GlobalVarsAccessor = s + s.txn.init() + return s, nil +} + +const ( + notBootstrapped = 0 + bootstrapped = 1 +) + +func getStoreBootstrap(store kv.Storage) bool { + storeBootstrappedLock.Lock() + defer storeBootstrappedLock.Unlock() + // check in memory + _, ok := storeBootstrapped[store.UUID()] + if ok { + return true + } + + var ver int64 + // check in kv store + err := kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { + var err error + t := meta.NewMeta(txn) + ver, err = t.GetBootstrapVersion() + return err + }) + + if err != nil { + logutil.BgLogger().Fatal("check bootstrapped failed", + zap.Error(err)) + } + + if ver != notBootstrapped { + // here mean memory is not ok, but other server has already finished it + storeBootstrapped[store.UUID()] = true + } + + return ver != notBootstrapped +} + +func finishBootstrap(store kv.Storage) { + setStoreBootstrapped(store.UUID()) + + err := kv.RunInNewTxn(store, true, func(txn kv.Transaction) error { + t := meta.NewMeta(txn) + err := t.FinishBootstrap(bootstrapped) + return err + }) + if err != nil { + logutil.BgLogger().Fatal("finish bootstrap failed", + zap.Error(err)) + } +} + +const quoteCommaQuote = "', '" + +var builtinGlobalVariable = []string{ + variable.AutoCommit, + variable.SQLModeVar, + variable.MaxAllowedPacket, + variable.TimeZone, + variable.BlockEncryptionMode, + variable.WaitTimeout, + variable.InteractiveTimeout, + variable.MaxPreparedStmtCount, + variable.InitConnect, + variable.TxnIsolation, + variable.TxReadOnly, + variable.TransactionIsolation, + variable.TransactionReadOnly, + variable.NetBufferLength, + variable.QueryCacheType, + variable.QueryCacheSize, + variable.CharacterSetServer, + variable.AutoIncrementIncrement, + variable.CollationServer, + variable.NetWriteTimeout, + variable.MaxExecutionTime, + variable.InnodbLockWaitTimeout, + + /* TiDB specific global variables: */ + variable.TiDBSkipUTF8Check, + variable.TiDBIndexLookupSize, + variable.TiDBIndexLookupConcurrency, + variable.TiDBIndexLookupJoinConcurrency, + variable.TiDBIndexSerialScanConcurrency, + variable.TiDBHashJoinConcurrency, + variable.TiDBProjectionConcurrency, + variable.TiDBHashAggPartialConcurrency, + variable.TiDBHashAggFinalConcurrency, + variable.TiDBBackoffLockFast, + variable.TiDBBackOffWeight, + variable.TiDBConstraintCheckInPlace, + variable.TiDBDDLReorgWorkerCount, + variable.TiDBDDLReorgBatchSize, + variable.TiDBDDLErrorCountLimit, + variable.TiDBOptInSubqToJoinAndAgg, + variable.TiDBOptCorrelationThreshold, + variable.TiDBOptCorrelationExpFactor, + variable.TiDBOptCPUFactor, + variable.TiDBOptCopCPUFactor, + variable.TiDBOptNetworkFactor, + variable.TiDBOptScanFactor, + variable.TiDBOptDescScanFactor, + variable.TiDBOptMemoryFactor, + variable.TiDBOptDiskFactor, + variable.TiDBOptConcurrencyFactor, + variable.TiDBDistSQLScanConcurrency, + variable.TiDBInitChunkSize, + variable.TiDBMaxChunkSize, + variable.TiDBEnableCascadesPlanner, + variable.TiDBEnableVectorizedExpression, + variable.TiDBEnableNoopFuncs, + variable.TiDBMaxDeltaSchemaCount, +} + +var ( + loadCommonGlobalVarsSQLOnce sync.Once + loadCommonGlobalVarsSQL string +) + +func initLoadCommonGlobalVarsSQL() { + loadCommonGlobalVarsSQLOnce.Do(func() { + vars := append(make([]string, 0, len(builtinGlobalVariable)), builtinGlobalVariable...) + loadCommonGlobalVarsSQL = "select HIGH_PRIORITY * from mysql.global_variables where variable_name in ('" + strings.Join(vars, quoteCommaQuote) + "')" + }) +} + +// loadCommonGlobalVariablesIfNeeded loads and applies commonly used global variables for the session. +func (s *session) loadCommonGlobalVariablesIfNeeded() error { + initLoadCommonGlobalVarsSQL() + vars := s.sessionVars + if vars.CommonGlobalLoaded { + return nil + } + if s.Value(sessionctx.Initing) != nil { + // When running bootstrap or upgrade, we should not access global storage. + return nil + } + + var err error + // Use GlobalVariableCache if TiDB just loaded global variables within 2 second ago. + // When a lot of connections connect to TiDB simultaneously, it can protect TiKV meta region from overload. + gvc := domain.GetDomain(s).GetGlobalVarsCache() + succ, rows, fields := gvc.Get() + if !succ { + // Set the variable to true to prevent cyclic recursive call. + vars.CommonGlobalLoaded = true + rows, fields, err = s.ExecRestrictedSQL(loadCommonGlobalVarsSQL) + if err != nil { + vars.CommonGlobalLoaded = false + logutil.BgLogger().Error("failed to load common global variables.") + return err + } + gvc.Update(rows, fields) + } + + for _, row := range rows { + varName := row.GetString(0) + varVal := row.GetDatum(1, &fields[1].Column.FieldType) + if _, ok := vars.GetSystemVar(varName); !ok { + err = variable.SetSessionSystemVar(s.sessionVars, varName, varVal) + if err != nil { + return err + } + } + } + + // when client set Capability Flags CLIENT_INTERACTIVE, init wait_timeout with interactive_timeout + if vars.ClientCapability&mysql.ClientInteractive > 0 { + if varVal, ok := vars.GetSystemVar(variable.InteractiveTimeout); ok { + if err := vars.SetSystemVar(variable.WaitTimeout, varVal); err != nil { + return err + } + } + } + + vars.CommonGlobalLoaded = true + return nil +} + +// PrepareTxnCtx starts a goroutine to begin a transaction if needed, and creates a new transaction context. +// It is called before we execute a sql query. +func (s *session) PrepareTxnCtx(ctx context.Context) { + if s.txn.validOrPending() { + return + } + + is := domain.GetDomain(s).InfoSchema() + s.sessionVars.TxnCtx = &variable.TransactionContext{ + InfoSchema: is, + SchemaVersion: is.SchemaMetaVersion(), + CreateTime: time.Now(), + } +} + +// PrepareTxnFuture uses to try to get txn future. +func (s *session) PrepareTxnFuture(ctx context.Context) { + if s.txn.validOrPending() { + return + } + + txnFuture := s.getTxnFuture(ctx) + s.txn.changeInvalidToPending(txnFuture) +} + +// RefreshTxnCtx implements context.RefreshTxnCtx interface. +func (s *session) RefreshTxnCtx(ctx context.Context) error { + if err := s.doCommit(ctx); err != nil { + return err + } + + return s.NewTxn(ctx) +} + +// InitTxnWithStartTS create a transaction with startTS. +func (s *session) InitTxnWithStartTS(startTS uint64) error { + if s.txn.Valid() { + return nil + } + + // no need to get txn from txnFutureCh since txn should init with startTs + txn, err := s.store.BeginWithStartTS(startTS) + if err != nil { + return err + } + s.txn.changeInvalidToValid(txn) + s.txn.SetCap(s.getMembufCap()) + err = s.loadCommonGlobalVariablesIfNeeded() + if err != nil { + return err + } + return nil +} + +// GetStore gets the store of session. +func (s *session) GetStore() kv.Storage { + return s.store +} + +type multiQueryNoDelayRecordSet struct { + sqlexec.RecordSet + + affectedRows uint64 + status uint16 + warnCount uint16 + lastInsertID uint64 +} + +func (c *multiQueryNoDelayRecordSet) Close() error { + return nil +} + +func (c *multiQueryNoDelayRecordSet) AffectedRows() uint64 { + return c.affectedRows +} + +func (c *multiQueryNoDelayRecordSet) WarnCount() uint16 { + return c.warnCount +} + +func (c *multiQueryNoDelayRecordSet) Status() uint16 { + return c.status +} + +func (c *multiQueryNoDelayRecordSet) LastInsertID() uint64 { + return c.lastInsertID +} diff --git a/session/session_fail_test.go b/session/session_fail_test.go new file mode 100644 index 0000000..499e37d --- /dev/null +++ b/session/session_fail_test.go @@ -0,0 +1,115 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package session_test + +import ( + "context" + + . "github.com/pingcap/check" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/util/testkit" +) + +func (s *testSessionSuite2) TestFailStatementCommit(c *C) { + + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("create table t (id int)") + tk.MustExec("begin") + tk.MustExec("insert into t values (1)") + + c.Assert(failpoint.Enable("github.com/pingcap/tidb/session/mockStmtCommitError", `return(true)`), IsNil) + _, err := tk.Exec("insert into t values (2),(3),(4),(5)") + c.Assert(err, NotNil) + + c.Assert(failpoint.Disable("github.com/pingcap/tidb/session/mockStmtCommitError"), IsNil) + + _, err = tk.Exec("select * from t") + c.Assert(err, NotNil) + _, err = tk.Exec("insert into t values (3)") + c.Assert(err, NotNil) + _, err = tk.Exec("insert into t values (4)") + c.Assert(err, NotNil) + _, err = tk.Exec("commit") + c.Assert(err, NotNil) + + tk.MustQuery(`select * from t`).Check(testkit.Rows()) + + tk.MustExec("insert into t values (1)") + + tk.MustExec("begin") + tk.MustExec("insert into t values (2)") + tk.MustExec("commit") + + tk.MustExec("begin") + tk.MustExec("insert into t values (3)") + tk.MustExec("rollback") + + tk.MustQuery(`select * from t`).Check(testkit.Rows("1", "2")) +} + +func (s *testSessionSuite2) TestFailStatementCommitInRetry(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("create table t (id int)") + + tk.MustExec("begin") + tk.MustExec("insert into t values (1)") + tk.MustExec("insert into t values (2),(3),(4),(5)") + tk.MustExec("insert into t values (6)") + + c.Assert(failpoint.Enable("github.com/pingcap/tidb/session/mockCommitError8942", `return(true)`), IsNil) + c.Assert(failpoint.Enable("github.com/pingcap/tidb/session/mockStmtCommitError", `return(true)`), IsNil) + _, err := tk.Exec("commit") + c.Assert(err, NotNil) + c.Assert(failpoint.Disable("github.com/pingcap/tidb/session/mockCommitError8942"), IsNil) + c.Assert(failpoint.Disable("github.com/pingcap/tidb/session/mockStmtCommitError"), IsNil) + + tk.MustExec("insert into t values (6)") + tk.MustQuery(`select * from t`).Check(testkit.Rows("6")) +} + +func (s *testSessionSuite2) TestGetTSFailDirtyState(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("create table t (id int)") + + c.Assert(failpoint.Enable("github.com/pingcap/tidb/session/mockGetTSFail", "return"), IsNil) + ctx := failpoint.WithHook(context.Background(), func(ctx context.Context, fpname string) bool { + return fpname == "github.com/pingcap/tidb/session/mockGetTSFail" + }) + _, err := tk.Se.Execute(ctx, "select * from t") + c.Assert(err, NotNil) + + // Fix a bug that active txn fail set TxnState.fail to error, and then the following write + // affected by this fail flag. + tk.MustExec("insert into t values (1)") + tk.MustQuery(`select * from t`).Check(testkit.Rows("1")) + c.Assert(failpoint.Disable("github.com/pingcap/tidb/session/mockGetTSFail"), IsNil) +} + +func (s *testSessionSuite2) TestGetTSFailDirtyStateInretry(c *C) { + defer func() { + c.Assert(failpoint.Disable("github.com/pingcap/tidb/session/mockCommitError"), IsNil) + c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/mockGetTSErrorInRetry"), IsNil) + }() + + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("create table t (id int)") + + c.Assert(failpoint.Enable("github.com/pingcap/tidb/session/mockCommitError", `return(true)`), IsNil) + // This test will mock a PD timeout error, and recover then. + // Just make mockGetTSErrorInRetry return true once, and then return false. + c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/mockGetTSErrorInRetry", + `1*return(true)->return(false)`), IsNil) + tk.MustExec("insert into t values (2)") + tk.MustQuery(`select * from t`).Check(testkit.Rows("2")) +} diff --git a/session/session_test.go b/session/session_test.go new file mode 100644 index 0000000..b13e53a --- /dev/null +++ b/session/session_test.go @@ -0,0 +1,811 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package session_test + +import ( + "context" + "fmt" + "sync/atomic" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/store/mockstore/mocktikv" + "github.com/pingcap/tidb/table/tables" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/sqlexec" + "github.com/pingcap/tidb/util/testkit" + "github.com/pingcap/tidb/util/testleak" +) + +var _ = Suite(&testSessionSuite{}) +var _ = Suite(&testSessionSuite2{}) +var _ = SerialSuites(&testSessionSerialSuite{}) + +type testSessionSuiteBase struct { + cluster *mocktikv.Cluster + mvccStore mocktikv.MVCCStore + store kv.Storage + dom *domain.Domain +} + +type testSessionSuite struct { + testSessionSuiteBase +} + +type testSessionSuite2 struct { + testSessionSuiteBase +} + +type testSessionSerialSuite struct { + testSessionSuiteBase +} + +func (s *testSessionSuiteBase) SetUpSuite(c *C) { + testleak.BeforeTest() + s.cluster = mocktikv.NewCluster() + mocktikv.BootstrapWithSingleStore(s.cluster) + s.mvccStore = mocktikv.MustNewMVCCStore() + store, err := mockstore.NewMockTikvStore( + mockstore.WithCluster(s.cluster), + mockstore.WithMVCCStore(s.mvccStore), + ) + c.Assert(err, IsNil) + s.store = store + session.SetSchemaLease(0) + session.DisableStats4Test() + s.dom, err = session.BootstrapSession(s.store) + c.Assert(err, IsNil) +} + +func (s *testSessionSuiteBase) TearDownSuite(c *C) { + s.dom.Close() + s.store.Close() + testleak.AfterTest(c)() +} + +func (s *testSessionSuiteBase) TearDownTest(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + r := tk.MustQuery("show full tables") + for _, tb := range r.Rows() { + tableName := tb[0] + tableType := tb[1] + if tableType == "BASE TABLE" { + tk.MustExec(fmt.Sprintf("drop table %v", tableName)) + } else { + panic(fmt.Sprintf("Unexpected table '%s' with type '%s'.", tableName, tableType)) + } + } +} + +func (s *testSessionSuite) TestQueryString(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + + tk.MustExec("create table mutil1 (a int);create table multi2 (a int)") + queryStr := tk.Se.Value(sessionctx.QueryString) + c.Assert(queryStr, Equals, "create table multi2 (a int)") +} + +// TestAutocommit . See https://dev.mysql.com/doc/internals/en/status-flags.html +func (s *testSessionSuite) TestAutocommit(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + + tk.MustExec("drop table if exists t;") + c.Assert(int(tk.Se.Status()&mysql.ServerStatusAutocommit), Greater, 0) + tk.MustExec("create table t (id BIGINT PRIMARY KEY AUTO_INCREMENT NOT NULL)") + c.Assert(int(tk.Se.Status()&mysql.ServerStatusAutocommit), Greater, 0) + tk.MustExec("insert t values ()") + c.Assert(int(tk.Se.Status()&mysql.ServerStatusAutocommit), Greater, 0) + tk.MustExec("begin") + c.Assert(int(tk.Se.Status()&mysql.ServerStatusAutocommit), Greater, 0) + tk.MustExec("insert t values ()") + c.Assert(int(tk.Se.Status()&mysql.ServerStatusAutocommit), Greater, 0) + tk.MustExec("drop table if exists t") + c.Assert(int(tk.Se.Status()&mysql.ServerStatusAutocommit), Greater, 0) + + tk.MustExec("create table t (id BIGINT PRIMARY KEY AUTO_INCREMENT NOT NULL)") + c.Assert(int(tk.Se.Status()&mysql.ServerStatusAutocommit), Greater, 0) + tk.MustExec("set autocommit=0") + c.Assert(int(tk.Se.Status()&mysql.ServerStatusAutocommit), Equals, 0) + tk.MustExec("insert t values ()") + c.Assert(int(tk.Se.Status()&mysql.ServerStatusAutocommit), Equals, 0) + tk.MustExec("commit") + c.Assert(int(tk.Se.Status()&mysql.ServerStatusAutocommit), Equals, 0) + tk.MustExec("drop table if exists t") + c.Assert(int(tk.Se.Status()&mysql.ServerStatusAutocommit), Equals, 0) + tk.MustExec("set autocommit='On'") + c.Assert(int(tk.Se.Status()&mysql.ServerStatusAutocommit), Greater, 0) + + // When autocommit is 0, transaction start ts should be the first *valid* + // statement, rather than *any* statement. + tk.MustExec("create table t (id int)") + tk.MustExec("set @@autocommit = 0") + tk.MustExec("rollback") + tk.MustExec("set @@autocommit = 0") + tk1 := testkit.NewTestKitWithInit(c, s.store) + tk1.MustExec("insert into t select 1") + tk.MustQuery("select * from t").Check(testkit.Rows("1")) + + // TODO: MySQL compatibility for setting global variable. + // tk.MustExec("begin") + // tk.MustExec("insert into t values (42)") + // tk.MustExec("set @@global.autocommit = 1") + // tk.MustExec("rollback") + // tk.MustQuery("select count(*) from t where id = 42").Check(testkit.Rows("0")) + // Even the transaction is rollbacked, the set statement succeed. + // tk.MustQuery("select @@global.autocommit").Rows("1") +} + +// TestTxnLazyInitialize tests that when autocommit = 0, not all statement starts +// a new transaction. +func (s *testSessionSuite) TestTxnLazyInitialize(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (id int)") + + tk.MustExec("set @@autocommit = 0") + txn, err := tk.Se.Txn(true) + c.Assert(kv.ErrInvalidTxn.Equal(err), IsTrue) + txn, err = tk.Se.Txn(false) + c.Assert(err, IsNil) + c.Assert(txn.Valid(), IsFalse) + tk.MustQuery("select @@tidb_current_ts").Check(testkit.Rows("0")) + tk.MustQuery("select @@tidb_current_ts").Check(testkit.Rows("0")) + + // Those statement should not start a new transaction automacally. + tk.MustQuery("select 1") + tk.MustQuery("select @@tidb_current_ts").Check(testkit.Rows("0")) + + tk.MustExec("set @@tidb_general_log = 0") + tk.MustQuery("select @@tidb_current_ts").Check(testkit.Rows("0")) + + tk.MustQuery("explain select * from t") + tk.MustQuery("select @@tidb_current_ts").Check(testkit.Rows("0")) + + // Begin statement should start a new transaction. + tk.MustExec("begin") + txn, err = tk.Se.Txn(false) + c.Assert(err, IsNil) + c.Assert(txn.Valid(), IsTrue) + tk.MustExec("rollback") + + tk.MustExec("select * from t") + txn, err = tk.Se.Txn(false) + c.Assert(err, IsNil) + c.Assert(txn.Valid(), IsTrue) + tk.MustExec("rollback") + + tk.MustExec("insert into t values (1)") + txn, err = tk.Se.Txn(false) + c.Assert(err, IsNil) + c.Assert(txn.Valid(), IsTrue) + tk.MustExec("rollback") +} + +func (s *testSessionSuite) TestGetSysVariables(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + + // Test ScopeSession + tk.MustExec("select @@warning_count") + tk.MustExec("select @@session.warning_count") + tk.MustExec("select @@local.warning_count") + _, err := tk.Exec("select @@global.warning_count") + c.Assert(terror.ErrorEqual(err, variable.ErrIncorrectScope), IsTrue, Commentf("err %v", err)) + + // Test ScopeGlobal + tk.MustExec("select @@max_connections") + tk.MustExec("select @@global.max_connections") + _, err = tk.Exec("select @@session.max_connections") + c.Assert(terror.ErrorEqual(err, variable.ErrIncorrectScope), IsTrue, Commentf("err %v", err)) + _, err = tk.Exec("select @@local.max_connections") + c.Assert(terror.ErrorEqual(err, variable.ErrIncorrectScope), IsTrue, Commentf("err %v", err)) + + // Test ScopeNone + tk.MustExec("select @@performance_schema_max_mutex_classes") + tk.MustExec("select @@global.performance_schema_max_mutex_classes") + _, err = tk.Exec("select @@session.performance_schema_max_mutex_classes") + c.Assert(terror.ErrorEqual(err, variable.ErrIncorrectScope), IsTrue, Commentf("err %v", err)) + _, err = tk.Exec("select @@local.performance_schema_max_mutex_classes") + c.Assert(terror.ErrorEqual(err, variable.ErrIncorrectScope), IsTrue, Commentf("err %v", err)) +} + +func (s *testSessionSuite) TestString(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("select 1") + // here to check the panic bug in String() when txn is nil after committed. + c.Log(tk.Se.String()) +} + +func (s *testSessionSuite) TestDatabase(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + + // Test database. + tk.MustExec("create database xxx") + tk.MustExec("drop database xxx") + + tk.MustExec("drop database if exists xxx") + tk.MustExec("create database xxx") + tk.MustExec("create database if not exists xxx") + tk.MustExec("drop database if exists xxx") + + // Test schema. + tk.MustExec("create schema xxx") + tk.MustExec("drop schema xxx") + + tk.MustExec("drop schema if exists xxx") + tk.MustExec("create schema xxx") + tk.MustExec("create schema if not exists xxx") + tk.MustExec("drop schema if exists xxx") +} + +func (s *testSessionSuite) TestExecRestrictedSQL(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + r, _, err := tk.Se.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL("select 1;") + c.Assert(err, IsNil) + c.Assert(len(r), Equals, 1) +} + +// TestInTrans . See https://dev.mysql.com/doc/internals/en/status-flags.html +func (s *testSessionSuite) TestInTrans(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (id BIGINT PRIMARY KEY AUTO_INCREMENT NOT NULL)") + tk.MustExec("insert t values ()") + tk.MustExec("begin") + txn, err := tk.Se.Txn(true) + c.Assert(err, IsNil) + c.Assert(txn.Valid(), IsTrue) + tk.MustExec("insert t values ()") + c.Assert(txn.Valid(), IsTrue) + tk.MustExec("drop table if exists t;") + c.Assert(txn.Valid(), IsFalse) + tk.MustExec("create table t (id BIGINT PRIMARY KEY AUTO_INCREMENT NOT NULL)") + c.Assert(txn.Valid(), IsFalse) + tk.MustExec("insert t values ()") + c.Assert(txn.Valid(), IsFalse) + tk.MustExec("commit") + tk.MustExec("insert t values ()") + + tk.MustExec("set autocommit=0") + tk.MustExec("begin") + c.Assert(txn.Valid(), IsTrue) + tk.MustExec("insert t values ()") + c.Assert(txn.Valid(), IsTrue) + tk.MustExec("commit") + c.Assert(txn.Valid(), IsFalse) + tk.MustExec("insert t values ()") + c.Assert(txn.Valid(), IsTrue) + tk.MustExec("commit") + c.Assert(txn.Valid(), IsFalse) + + tk.MustExec("set autocommit=1") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (id BIGINT PRIMARY KEY AUTO_INCREMENT NOT NULL)") + tk.MustExec("begin") + c.Assert(txn.Valid(), IsTrue) + tk.MustExec("insert t values ()") + c.Assert(txn.Valid(), IsTrue) + tk.MustExec("rollback") + c.Assert(txn.Valid(), IsFalse) +} + +func (s *testSessionSuite) TestSession(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("ROLLBACK;") + tk.Se.Close() +} + +func (s *testSessionSuite) TestResultField(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("create table t (id int);") + + tk.MustExec(`INSERT INTO t VALUES (1);`) + tk.MustExec(`INSERT INTO t VALUES (2);`) + r, err := tk.Exec(`SELECT count(*) from t;`) + c.Assert(err, IsNil) + fields := r.Fields() + c.Assert(err, IsNil) + c.Assert(len(fields), Equals, 1) + field := fields[0].Column + c.Assert(field.Tp, Equals, mysql.TypeLonglong) + c.Assert(field.Flen, Equals, 21) +} + +func (s *testSessionSuite) TestFieldText(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("create table t (a int)") + tests := []struct { + sql string + field string + }{ + {"select distinct(a) from t", "a"}, + {"select (1)", "1"}, + {"select (1+1)", "(1+1)"}, + {"select a from t", "a"}, + {"select ((a+1)) from t", "((a+1))"}, + {"select 1 /*!32301 +1 */;", "1 +1 "}, + {"select /*!32301 1 +1 */;", "1 +1 "}, + {"/*!32301 select 1 +1 */;", "1 +1 "}, + {"select 1 + /*!32301 1 +1 */;", "1 + 1 +1 "}, + {"select 1 /*!32301 + 1, 1 */;", "1 + 1"}, + {"select /*!32301 1, 1 +1 */;", "1"}, + {"select /*!32301 1 + 1, */ +1;", "1 + 1"}, + } + for _, tt := range tests { + result, err := tk.Exec(tt.sql) + c.Assert(err, IsNil) + c.Assert(result.Fields()[0].ColumnAsName.O, Equals, tt.field) + } +} + +func (s *testSessionSuite2) TestIndexColumnLength(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("create table t (c1 int, c2 blob);") + tk.MustExec("create index idx_c1 on t(c1);") + tk.MustExec("create index idx_c2 on t(c2(6));") + + is := s.dom.InfoSchema() + tab, err2 := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + c.Assert(err2, Equals, nil) + + idxC1Cols := tables.FindIndexByColName(tab, "c1").Meta().Columns + c.Assert(idxC1Cols[0].Length, Equals, types.UnspecifiedLength) + + idxC2Cols := tables.FindIndexByColName(tab, "c2").Meta().Columns + c.Assert(idxC2Cols[0].Length, Equals, 6) +} + +func (s *testSessionSuite2) TestMultiStmts(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("drop table if exists t1; create table t1(id int ); insert into t1 values (1);") + tk.MustQuery("select * from t1;").Check(testkit.Rows("1")) +} + +func (s *testSessionSuite2) TestLastExecuteDDLFlag(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("drop table if exists t1") + tk.MustExec("create table t1(id int)") + c.Assert(tk.Se.Value(sessionctx.LastExecuteDDL), NotNil) + tk.MustExec("insert into t1 values (1)") + c.Assert(tk.Se.Value(sessionctx.LastExecuteDDL), IsNil) +} + +func (s *testSessionSuite2) TestReplace(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + + // test for https://github.com/pingcap/tidb/pull/456 + tk.MustExec("drop table if exists t") + tk.MustExec("drop table if exists t1") + tk.MustExec("create table t1 (c1 int, c2 int, c3 int);") + tk.MustExec("replace into t1 set c1=1, c2=2, c3=1;") + tk.MustExec("create table t (c1 int, c2 int, c3 int, primary key (c1));") + tk.MustExec("replace into t set c1=1, c2=4;") + tk.MustExec("replace into t select * from t1 limit 1;") +} + +func (s *testSessionSuite2) TestIssue986(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + sqlText := `CREATE TABLE address ( + id bigint(20) NOT NULL AUTO_INCREMENT, + PRIMARY KEY (id));` + tk.MustExec(sqlText) + tk.MustExec(`insert into address values ('10')`) +} + +func (s *testSessionSuite2) TestTableInfoMeta(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + + checkResult := func(affectedRows uint64, insertID uint64) { + gotRows := tk.Se.AffectedRows() + c.Assert(gotRows, Equals, affectedRows) + + gotID := tk.Se.LastInsertID() + c.Assert(gotID, Equals, insertID) + } + + // create table + tk.MustExec("CREATE TABLE tbl_test(id INT NOT NULL DEFAULT 1, name varchar(255), PRIMARY KEY(id));") + + // insert data + tk.MustExec(`INSERT INTO tbl_test VALUES (1, "hello");`) + checkResult(1, 0) + + tk.MustExec(`INSERT INTO tbl_test VALUES (2, "hello");`) + checkResult(1, 0) + + tk.MustExec(`DELETE from tbl_test where id = 2;`) + checkResult(1, 0) + + // select data + tk.MustQuery("select * from tbl_test").Check(testkit.Rows("1 hello")) +} + +func (s *testSessionSuite2) TestCaseInsensitive(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + + tk.MustExec("create table T (a text, B int)") + tk.MustExec("insert t (A, b) values ('aaa', 1)") + rs, _ := tk.Exec("select * from t") + fields := rs.Fields() + c.Assert(fields[0].ColumnAsName.O, Equals, "a") + c.Assert(fields[1].ColumnAsName.O, Equals, "B") + rs, _ = tk.Exec("select A, b from t") + fields = rs.Fields() + c.Assert(fields[0].ColumnAsName.O, Equals, "A") + c.Assert(fields[1].ColumnAsName.O, Equals, "b") + rs, _ = tk.Exec("select a as A from t where A > 0") + fields = rs.Fields() + c.Assert(fields[0].ColumnAsName.O, Equals, "A") +} + +// TestDeletePanic is for delete panic +func (s *testSessionSuite2) TestDeletePanic(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("create table t (c int)") + tk.MustExec("insert into t values (1), (2), (3)") + tk.MustExec("delete from `t` where `c` = 1") + tk.MustExec("delete from `t` where `c` = 2") +} + +var _ = Suite(&testSchemaSuite{}) +var _ = SerialSuites(&testSchemaSerialSuite{}) + +type testSchemaSuiteBase struct { + cluster *mocktikv.Cluster + mvccStore mocktikv.MVCCStore + store kv.Storage + lease time.Duration + dom *domain.Domain +} + +type testSchemaSuite struct { + testSchemaSuiteBase +} + +type testSchemaSerialSuite struct { + testSchemaSuiteBase +} + +func (s *testSchemaSuiteBase) TearDownTest(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + r := tk.MustQuery("show tables") + for _, tb := range r.Rows() { + tableName := tb[0] + tk.MustExec(fmt.Sprintf("drop table %v", tableName)) + } +} + +func (s *testSchemaSuiteBase) SetUpSuite(c *C) { + testleak.BeforeTest() + s.cluster = mocktikv.NewCluster() + mocktikv.BootstrapWithSingleStore(s.cluster) + s.mvccStore = mocktikv.MustNewMVCCStore() + store, err := mockstore.NewMockTikvStore( + mockstore.WithCluster(s.cluster), + mockstore.WithMVCCStore(s.mvccStore), + ) + c.Assert(err, IsNil) + s.store = store + s.lease = 20 * time.Millisecond + session.SetSchemaLease(s.lease) + session.DisableStats4Test() + dom, err := session.BootstrapSession(s.store) + c.Assert(err, IsNil) + s.dom = dom +} + +func (s *testSchemaSuiteBase) TearDownSuite(c *C) { + s.dom.Close() + s.store.Close() + testleak.AfterTest(c)() +} + +func (s *testSchemaSerialSuite) TestLoadSchemaFailed(c *C) { + atomic.StoreInt32(&domain.SchemaOutOfDateRetryTimes, int32(3)) + atomic.StoreInt64(&domain.SchemaOutOfDateRetryInterval, int64(20*time.Millisecond)) + defer func() { + atomic.StoreInt32(&domain.SchemaOutOfDateRetryTimes, 10) + atomic.StoreInt64(&domain.SchemaOutOfDateRetryInterval, int64(500*time.Millisecond)) + }() + + tk := testkit.NewTestKitWithInit(c, s.store) + tk1 := testkit.NewTestKitWithInit(c, s.store) + tk2 := testkit.NewTestKitWithInit(c, s.store) + + tk.MustExec("create table t (a int);") + tk.MustExec("create table t1 (a int);") + tk.MustExec("create table t2 (a int);") + + tk1.MustExec("begin") + tk2.MustExec("begin") + + // Make sure loading information schema is failed and server is invalid. + c.Assert(failpoint.Enable("github.com/pingcap/tidb/domain/ErrorMockReloadFailed", `return(true)`), IsNil) + err := domain.GetDomain(tk.Se).Reload() + c.Assert(err, NotNil) + + lease := domain.GetDomain(tk.Se).DDL().GetLease() + time.Sleep(lease * 2) + + // Make sure executing insert statement is failed when server is invalid. + _, err = tk.Exec("insert t values (100);") + c.Check(err, NotNil) + + tk1.MustExec("insert t1 values (100);") + tk2.MustExec("insert t2 values (100);") + + _, err = tk1.Exec("commit") + c.Check(err, NotNil) + + ver, err := s.store.CurrentVersion() + c.Assert(err, IsNil) + c.Assert(ver, NotNil) + + failpoint.Disable("github.com/pingcap/tidb/domain/ErrorMockReloadFailed") + time.Sleep(lease * 2) + + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (a int);") + tk.MustExec("insert t values (100);") + // Make sure insert to table t2 transaction executes. + tk2.MustExec("commit") +} + +func (s *testSchemaSuite) TestTableReaderChunk(c *C) { + // Since normally a single region mock tikv only returns one partial result we need to manually split the + // table to test multiple chunks. + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("create table chk (a int)") + for i := 0; i < 100; i++ { + tk.MustExec(fmt.Sprintf("insert chk values (%d)", i)) + } + tbl, err := domain.GetDomain(tk.Se).InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("chk")) + c.Assert(err, IsNil) + s.cluster.SplitTable(s.mvccStore, tbl.Meta().ID, 10) + + tk.Se.GetSessionVars().DistSQLScanConcurrency = 1 + tk.MustExec("set tidb_init_chunk_size = 2") + defer func() { + tk.MustExec(fmt.Sprintf("set tidb_init_chunk_size = %d", variable.DefInitChunkSize)) + }() + rs, err := tk.Exec("select * from chk") + c.Assert(err, IsNil) + req := rs.NewChunk() + var count int + var numChunks int + for { + err = rs.Next(context.TODO(), req) + c.Assert(err, IsNil) + numRows := req.NumRows() + if numRows == 0 { + break + } + for i := 0; i < numRows; i++ { + c.Assert(req.GetRow(i).GetInt64(0), Equals, int64(count)) + count++ + } + numChunks++ + } + c.Assert(count, Equals, 100) + // FIXME: revert this result to new group value after distsql can handle initChunkSize. + c.Assert(numChunks, Equals, 1) + rs.Close() +} + +func (s *testSchemaSuite) TestInsertExecChunk(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("create table test1(a int)") + for i := 0; i < 100; i++ { + tk.MustExec(fmt.Sprintf("insert test1 values (%d)", i)) + } + tk.MustExec("create table test2(a int)") + + tk.Se.GetSessionVars().DistSQLScanConcurrency = 1 + tk.MustExec("insert into test2(a) select a from test1;") + + rs, err := tk.Exec("select * from test2") + c.Assert(err, IsNil) + var idx int + for { + req := rs.NewChunk() + err = rs.Next(context.TODO(), req) + c.Assert(err, IsNil) + if req.NumRows() == 0 { + break + } + + for rowIdx := 0; rowIdx < req.NumRows(); rowIdx++ { + row := req.GetRow(rowIdx) + c.Assert(row.GetInt64(0), Equals, int64(idx)) + idx++ + } + } + + c.Assert(idx, Equals, 100) + rs.Close() +} + +func (s *testSchemaSuite) TestDeleteExecChunk(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("create table chk(a int)") + + for i := 0; i < 100; i++ { + tk.MustExec(fmt.Sprintf("insert chk values (%d)", i)) + } + + tk.Se.GetSessionVars().DistSQLScanConcurrency = 1 + + for i := 0; i < 99; i++ { + tk.MustExec(fmt.Sprintf("delete from chk where a = %d", i)) + } + + rs, err := tk.Exec("select * from chk") + c.Assert(err, IsNil) + + req := rs.NewChunk() + err = rs.Next(context.TODO(), req) + c.Assert(err, IsNil) + c.Assert(req.NumRows(), Equals, 1) + + row := req.GetRow(0) + c.Assert(row.GetInt64(0), Equals, int64(99)) + rs.Close() +} + +func (s *testSchemaSuite) TestIndexLookUpReaderChunk(c *C) { + // Since normally a single region mock tikv only returns one partial result we need to manually split the + // table to test multiple chunks. + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("drop table if exists chk") + tk.MustExec("create table chk (k int unique, c int)") + for i := 0; i < 100; i++ { + tk.MustExec(fmt.Sprintf("insert chk values (%d, %d)", i, i)) + } + tbl, err := domain.GetDomain(tk.Se).InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("chk")) + c.Assert(err, IsNil) + s.cluster.SplitIndex(s.mvccStore, tbl.Meta().ID, tbl.Indices()[0].Meta().ID, 10) + + tk.Se.GetSessionVars().IndexLookupSize = 10 + rs, err := tk.Exec("select * from chk order by k") + c.Assert(err, IsNil) + req := rs.NewChunk() + var count int + for { + err = rs.Next(context.TODO(), req) + c.Assert(err, IsNil) + numRows := req.NumRows() + if numRows == 0 { + break + } + for i := 0; i < numRows; i++ { + c.Assert(req.GetRow(i).GetInt64(0), Equals, int64(count)) + c.Assert(req.GetRow(i).GetInt64(1), Equals, int64(count)) + count++ + } + } + c.Assert(count, Equals, 100) + rs.Close() + + rs, err = tk.Exec("select k from chk where c < 90 order by k") + c.Assert(err, IsNil) + req = rs.NewChunk() + count = 0 + for { + err = rs.Next(context.TODO(), req) + c.Assert(err, IsNil) + numRows := req.NumRows() + if numRows == 0 { + break + } + for i := 0; i < numRows; i++ { + c.Assert(req.GetRow(i).GetInt64(0), Equals, int64(count)) + count++ + } + } + c.Assert(count, Equals, 90) + rs.Close() +} + +func (s *testSessionSuite2) TestStatementErrorInTransaction(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("create table statement_side_effect (c int primary key)") + tk.MustExec("begin") + tk.MustExec("insert into statement_side_effect values (1)") + _, err := tk.Exec("insert into statement_side_effect value (2),(3),(4),(1)") + c.Assert(err, NotNil) + tk.MustQuery(`select * from statement_side_effect`).Check(testkit.Rows("1")) + tk.MustExec("commit") + tk.MustQuery(`select * from statement_side_effect`).Check(testkit.Rows("1")) +} + +func (s *testSessionSuite2) TestTxnGoString(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("drop table if exists gostr;") + tk.MustExec("create table gostr (id int);") + txn, err := tk.Se.Txn(false) + c.Assert(err, IsNil) + str1 := fmt.Sprintf("%#v", txn) + c.Assert(str1, Equals, "Txn{state=invalid}") + tk.MustExec("begin") + txn, err = tk.Se.Txn(false) + c.Assert(err, IsNil) + c.Assert(fmt.Sprintf("%#v", txn), Equals, fmt.Sprintf("Txn{state=valid, txnStartTS=%d}", txn.StartTS())) + + tk.MustExec("insert into gostr values (1)") + c.Assert(fmt.Sprintf("%#v", txn), Equals, fmt.Sprintf("Txn{state=valid, txnStartTS=%d}", txn.StartTS())) + + tk.MustExec("rollback") + c.Assert(fmt.Sprintf("%#v", txn), Equals, "Txn{state=invalid}") +} + +func (s *testSessionSuite2) TestMaxExeucteTime(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + + tk.MustExec("create table MaxExecTime( id int,name varchar(128),age int);") + tk.MustExec("begin") + tk.MustExec("insert into MaxExecTime (id,name,age) values (1,'john',18),(2,'lary',19),(3,'lily',18);") + + tk.MustQuery("select @@MAX_EXECUTION_TIME;").Check(testkit.Rows("0")) + tk.MustQuery("select @@global.MAX_EXECUTION_TIME;").Check(testkit.Rows("0")) + tk.MustQuery("select /*+ MAX_EXECUTION_TIME(1000) */ * FROM MaxExecTime;") + + tk.MustExec("set @@global.MAX_EXECUTION_TIME = 300;") + tk.MustQuery("select * FROM MaxExecTime;") + + tk.MustExec("set @@MAX_EXECUTION_TIME = 150;") + tk.MustQuery("select * FROM MaxExecTime;") + + tk.MustQuery("select @@global.MAX_EXECUTION_TIME;").Check(testkit.Rows("300")) + tk.MustQuery("select @@MAX_EXECUTION_TIME;").Check(testkit.Rows("150")) + + tk.MustExec("set @@global.MAX_EXECUTION_TIME = 0;") + tk.MustExec("set @@MAX_EXECUTION_TIME = 0;") + tk.MustExec("commit") + tk.MustExec("drop table if exists MaxExecTime;") +} + +func (s *testSessionSuite2) TestLoadClientInteractive(c *C) { + var ( + err error + connectionID uint64 + ) + tk := testkit.NewTestKit(c, s.store) + tk.Se, err = session.CreateSession4Test(s.store) + c.Assert(err, IsNil) + id := atomic.AddUint64(&connectionID, 1) + tk.Se.SetConnectionID(id) + tk.Se.GetSessionVars().ClientCapability = tk.Se.GetSessionVars().ClientCapability | mysql.ClientInteractive + tk.MustQuery("select @@wait_timeout").Check(testkit.Rows("28800")) +} + +func (s *testSessionSuite2) TestReplicaRead(c *C) { + var err error + tk := testkit.NewTestKit(c, s.store) + tk.Se, err = session.CreateSession4Test(s.store) + c.Assert(err, IsNil) + c.Assert(tk.Se.GetSessionVars().GetReplicaRead(), Equals, kv.ReplicaReadLeader) + tk.MustExec("set @@tidb_replica_read = 'follower';") + c.Assert(tk.Se.GetSessionVars().GetReplicaRead(), Equals, kv.ReplicaReadFollower) + tk.MustExec("set @@tidb_replica_read = 'leader';") + c.Assert(tk.Se.GetSessionVars().GetReplicaRead(), Equals, kv.ReplicaReadLeader) +} diff --git a/session/tidb.go b/session/tidb.go new file mode 100644 index 0000000..02b5727 --- /dev/null +++ b/session/tidb.go @@ -0,0 +1,327 @@ +// Copyright 2013 The ql Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSES/QL-LICENSE file. + +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package session + +import ( + "context" + "sync" + "sync/atomic" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/executor" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/util" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/sqlexec" + "go.uber.org/zap" +) + +type domainMap struct { + domains map[string]*domain.Domain + mu sync.Mutex +} + +func (dm *domainMap) Get(store kv.Storage) (d *domain.Domain, err error) { + dm.mu.Lock() + defer dm.mu.Unlock() + + // If this is the only domain instance, and the caller doesn't provide store. + if len(dm.domains) == 1 && store == nil { + for _, r := range dm.domains { + return r, nil + } + } + + key := store.UUID() + d = dm.domains[key] + if d != nil { + return + } + + ddlLease := time.Duration(atomic.LoadInt64(&schemaLease)) + statisticLease := time.Duration(atomic.LoadInt64(&statsLease)) + err = util.RunWithRetry(util.DefaultMaxRetries, util.RetryInterval, func() (retry bool, err1 error) { + logutil.BgLogger().Info("new domain", + zap.String("store", store.UUID()), + zap.Stringer("ddl lease", ddlLease), + zap.Stringer("stats lease", statisticLease)) + factory := createSessionFunc(store) + sysFactory := createSessionWithDomainFunc(store) + d = domain.NewDomain(store, ddlLease, statisticLease, factory) + err1 = d.Init(ddlLease, sysFactory) + if err1 != nil { + // If we don't clean it, there are some dirty data when retrying the function of Init. + d.Close() + logutil.BgLogger().Error("[ddl] init domain failed", + zap.Error(err1)) + } + return true, err1 + }) + if err != nil { + return nil, err + } + dm.domains[key] = d + + return +} + +func (dm *domainMap) Delete(store kv.Storage) { + dm.mu.Lock() + delete(dm.domains, store.UUID()) + dm.mu.Unlock() +} + +var ( + domap = &domainMap{ + domains: map[string]*domain.Domain{}, + } + // store.UUID()-> IfBootstrapped + storeBootstrapped = make(map[string]bool) + storeBootstrappedLock sync.Mutex + + // schemaLease is the time for re-updating remote schema. + // In online DDL, we must wait 2 * SchemaLease time to guarantee + // all servers get the neweset schema. + // Default schema lease time is 1 second, you can change it with a proper time, + // but you must know that too little may cause badly performance degradation. + // For production, you should set a big schema lease, like 300s+. + schemaLease = int64(1 * time.Second) + + // statsLease is the time for reload stats table. + statsLease = int64(3 * time.Second) +) + +func setStoreBootstrapped(storeUUID string) { + storeBootstrappedLock.Lock() + defer storeBootstrappedLock.Unlock() + storeBootstrapped[storeUUID] = true +} + +// SetSchemaLease changes the default schema lease time for DDL. +// This function is very dangerous, don't use it if you really know what you do. +// SetSchemaLease only affects not local storage after bootstrapped. +func SetSchemaLease(lease time.Duration) { + atomic.StoreInt64(&schemaLease, int64(lease)) +} + +// SetStatsLease changes the default stats lease time for loading stats info. +func SetStatsLease(lease time.Duration) { + atomic.StoreInt64(&statsLease, int64(lease)) +} + +// DisableStats4Test disables the stats for tests. +func DisableStats4Test() { + SetStatsLease(-1) +} + +// Parse parses a query string to raw ast.StmtNode. +func Parse(ctx sessionctx.Context, src string) ([]ast.StmtNode, error) { + logutil.BgLogger().Debug("compiling", zap.String("source", src)) + charset, collation := ctx.GetSessionVars().GetCharsetInfo() + p := parser.New() + p.SetSQLMode(ctx.GetSessionVars().SQLMode) + stmts, warns, err := p.Parse(src, charset, collation) + for _, warn := range warns { + ctx.GetSessionVars().StmtCtx.AppendWarning(warn) + } + if err != nil { + logutil.BgLogger().Warn("compiling", + zap.String("source", src), + zap.Error(err)) + return nil, err + } + return stmts, nil +} + +// Compile is safe for concurrent use by multiple goroutines. +func Compile(ctx context.Context, sctx sessionctx.Context, stmtNode ast.StmtNode) (sqlexec.Statement, error) { + compiler := executor.Compiler{Ctx: sctx} + stmt, err := compiler.Compile(ctx, stmtNode) + return stmt, err +} + +func finishStmt(ctx context.Context, sctx sessionctx.Context, se *session, sessVars *variable.SessionVars, + meetsErr error, sql sqlexec.Statement) error { + if meetsErr != nil { + if !sessVars.InTxn() { + logutil.BgLogger().Info("rollbackTxn for ddl/autocommit failed") + se.RollbackTxn(ctx) + } + return meetsErr + } + + if !sessVars.InTxn() { + if err := se.CommitTxn(ctx); err != nil { + if _, ok := sql.(*executor.ExecStmt).StmtNode.(*ast.CommitStmt); ok { + err = errors.Annotatef(err, "previous statement: %s", se.GetSessionVars().PrevStmt) + } + return err + } + return nil + } + + return checkStmtLimit(ctx, sctx, se) +} + +func checkStmtLimit(ctx context.Context, sctx sessionctx.Context, se *session) error { + // If the user insert, insert, insert ... but never commit, TiDB would OOM. + // So we limit the statement count in a transaction here. + var err error + history := GetHistory(sctx) + if history.Count() > 5000 { + se.RollbackTxn(ctx) + return errors.Errorf("statement count %d exceeds the transaction limitation, autocommit = %t", + history.Count(), sctx.GetSessionVars().IsAutocommit()) + } + return err +} + +// runStmt executes the sqlexec.Statement and commit or rollback the current transaction. +func runStmt(ctx context.Context, sctx sessionctx.Context, s sqlexec.Statement) (rs sqlexec.RecordSet, err error) { + se := sctx.(*session) + sessVars := se.GetSessionVars() + defer func() { + // If it is not a select statement, we record its slow log here, + // then it could include the transaction commit time. + if rs == nil { + sessVars.PrevStmt = executor.FormatSQL(s.OriginText()) + } + }() + + err = se.checkTxnAborted(s) + if err != nil { + return nil, err + } + rs, err = s.Exec(ctx) + sessVars.TxnCtx.StatementCount++ + if !s.IsReadOnly() { + // Handle the stmt commit/rollback. + if txn, err1 := sctx.Txn(false); err1 == nil { + if txn.Valid() { + if err != nil { + sctx.StmtRollback() + } else { + err = sctx.StmtCommit() + } + } + } else { + logutil.BgLogger().Error("get txn failed", zap.Error(err1)) + } + } + err = finishStmt(ctx, sctx, se, sessVars, err, s) + + if se.txn.pending() { + // After run statement finish, txn state is still pending means the + // statement never need a Txn(), such as: + // + // set @@tidb_general_log = 1 + // set @@autocommit = 0 + // select 1 + // + // Reset txn state to invalid to dispose the pending start ts. + se.txn.changeToInvalid() + } + return rs, err +} + +// GetHistory get all stmtHistory in current txn. Exported only for test. +func GetHistory(ctx sessionctx.Context) *StmtHistory { + hist, ok := ctx.GetSessionVars().TxnCtx.History.(*StmtHistory) + if ok { + return hist + } + hist = new(StmtHistory) + ctx.GetSessionVars().TxnCtx.History = hist + return hist +} + +// GetRows4Test gets all the rows from a RecordSet, only used for test. +func GetRows4Test(ctx context.Context, sctx sessionctx.Context, rs sqlexec.RecordSet) ([]chunk.Row, error) { + if rs == nil { + return nil, nil + } + var rows []chunk.Row + req := rs.NewChunk() + // Must reuse `req` for imitating server.(*clientConn).writeChunks + for { + err := rs.Next(ctx, req) + if err != nil { + return nil, err + } + if req.NumRows() == 0 { + break + } + + iter := chunk.NewIterator4Chunk(req.CopyConstruct()) + for row := iter.Begin(); row != iter.End(); row = iter.Next() { + rows = append(rows, row) + } + } + return rows, nil +} + +// ResultSetToStringSlice changes the RecordSet to [][]string. +func ResultSetToStringSlice(ctx context.Context, s Session, rs sqlexec.RecordSet) ([][]string, error) { + rows, err := GetRows4Test(ctx, s, rs) + if err != nil { + return nil, err + } + err = rs.Close() + if err != nil { + return nil, err + } + sRows := make([][]string, len(rows)) + for i := range rows { + row := rows[i] + iRow := make([]string, row.Len()) + for j := 0; j < row.Len(); j++ { + if row.IsNull(j) { + iRow[j] = "" + } else { + d := row.GetDatum(j, &rs.Fields()[j].Column.FieldType) + iRow[j], err = d.ToString() + if err != nil { + return nil, err + } + } + } + sRows[i] = iRow + } + return sRows, nil +} + +// Session errors. +var ( + ErrForUpdateCantRetry = terror.ClassSession.New(mysql.ErrForUpdateCantRetry, mysql.MySQLErrName[mysql.ErrForUpdateCantRetry]) +) + +func init() { + sessionMySQLErrCodes := map[terror.ErrCode]uint16{ + mysql.ErrForUpdateCantRetry: mysql.ErrForUpdateCantRetry, + } + terror.ErrClassToMySQLCodes[terror.ClassSession] = sessionMySQLErrCodes +} diff --git a/session/tidb_test.go b/session/tidb_test.go new file mode 100644 index 0000000..0ce0bf1 --- /dev/null +++ b/session/tidb_test.go @@ -0,0 +1,109 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package session + +import ( + "os" + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/testleak" +) + +func TestT(t *testing.T) { + logLevel := os.Getenv("log_level") + logutil.InitLogger(logutil.NewLogConfig(logLevel, logutil.DefaultLogFormat, logutil.EmptyFileLogConfig, false)) + CustomVerboseFlag = true + TestingT(t) +} + +var _ = Suite(&testMainSuite{}) + +type testMainSuite struct { + dbName string + store kv.Storage + dom *domain.Domain +} + +func (s *testMainSuite) SetUpSuite(c *C) { + testleak.BeforeTest() + s.dbName = "test_main_db" + s.store = newStore(c, s.dbName) + dom, err := BootstrapSession(s.store) + c.Assert(err, IsNil) + s.dom = dom +} + +func (s *testMainSuite) TearDownSuite(c *C) { + defer testleak.AfterTest(c)() + s.dom.Close() + err := s.store.Close() + c.Assert(err, IsNil) + removeStore(c, s.dbName) +} + +func (s *testMainSuite) TestParseErrorWarn(c *C) { + ctx := core.MockContext() + + nodes, err := Parse(ctx, "select /*+ adf */ 1") + c.Assert(err, IsNil) + c.Assert(len(nodes), Equals, 1) + c.Assert(len(ctx.GetSessionVars().StmtCtx.GetWarnings()), Equals, 1) + + _, err = Parse(ctx, "select") + c.Assert(err, NotNil) +} + +func newStore(c *C, dbPath string) kv.Storage { + store, err := mockstore.NewMockTikvStore() + c.Assert(err, IsNil) + return store +} + +func removeStore(c *C, dbPath string) { + os.RemoveAll(dbPath) +} + +func (s *testMainSuite) TestKeysNeedLock(c *C) { + rowKey := tablecodec.EncodeRowKeyWithHandle(1, 1) + indexKey := tablecodec.EncodeIndexSeekKey(1, 1, []byte{1}) + uniqueValue := make([]byte, 8) + uniqueUntouched := append(uniqueValue, '1') + nonUniqueVal := []byte{'0'} + nonUniqueUntouched := []byte{'1'} + var deleteVal []byte + rowVal := []byte{'a', 'b', 'c'} + tests := []struct { + key []byte + val []byte + need bool + }{ + {rowKey, rowVal, true}, + {rowKey, deleteVal, true}, + {indexKey, nonUniqueVal, false}, + {indexKey, nonUniqueUntouched, false}, + {indexKey, uniqueValue, true}, + {indexKey, uniqueUntouched, false}, + {indexKey, deleteVal, false}, + } + for _, tt := range tests { + c.Assert(keyNeedToLock(tt.key, tt.val), Equals, tt.need) + } +} diff --git a/session/txn.go b/session/txn.go new file mode 100644 index 0000000..7b2ec26 --- /dev/null +++ b/session/txn.go @@ -0,0 +1,401 @@ +// Copyright 2018 PingCAP, Inc. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package session + +import ( + "bytes" + "context" + "fmt" + "strings" + "sync/atomic" + + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/executor" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/store/tikv/oracle" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +// TxnState wraps kv.Transaction to provide a new kv.Transaction. +// 1. It holds all statement related modification in the buffer before flush to the txn, +// so if execute statement meets error, the txn won't be made dirty. +// 2. It's a lazy transaction, that means it's a txnFuture before StartTS() is really need. +type TxnState struct { + // States of a TxnState should be one of the followings: + // Invalid: kv.Transaction == nil && txnFuture == nil + // Pending: kv.Transaction == nil && txnFuture != nil + // Valid: kv.Transaction != nil && txnFuture == nil + kv.Transaction + txnFuture *txnFuture + + buf kv.MemBuffer + dirtyTableOP []dirtyTableOperation + + // If doNotCommit is not nil, Commit() will not commit the transaction. + // doNotCommit flag may be set when StmtCommit fail. + doNotCommit error +} + +func (st *TxnState) init() { + st.buf = kv.NewMemDbBuffer(kv.DefaultTxnMembufCap) +} + +// Valid implements the kv.Transaction interface. +func (st *TxnState) Valid() bool { + return st.Transaction != nil && st.Transaction.Valid() +} + +func (st *TxnState) pending() bool { + return st.Transaction == nil && st.txnFuture != nil +} + +func (st *TxnState) validOrPending() bool { + return st.txnFuture != nil || st.Valid() +} + +func (st *TxnState) String() string { + if st.Transaction != nil { + return st.Transaction.String() + } + if st.txnFuture != nil { + return "txnFuture" + } + return "invalid transaction" +} + +// GoString implements the "%#v" format for fmt.Printf. +func (st *TxnState) GoString() string { + var s strings.Builder + s.WriteString("Txn{") + if st.pending() { + s.WriteString("state=pending") + } else if st.Valid() { + s.WriteString("state=valid") + fmt.Fprintf(&s, ", txnStartTS=%d", st.Transaction.StartTS()) + if len(st.dirtyTableOP) > 0 { + fmt.Fprintf(&s, ", len(dirtyTable)=%d, %#v", len(st.dirtyTableOP), st.dirtyTableOP) + } + if st.buf != nil && st.buf.Len() != 0 { + fmt.Fprintf(&s, ", buf.length: %d, buf.size: %d", st.buf.Len(), st.buf.Size()) + } + } else { + s.WriteString("state=invalid") + } + + s.WriteString("}") + return s.String() +} + +func (st *TxnState) changeInvalidToValid(txn kv.Transaction) { + st.Transaction = txn + st.txnFuture = nil +} + +func (st *TxnState) changeInvalidToPending(future *txnFuture) { + st.Transaction = nil + st.txnFuture = future +} + +func (st *TxnState) changePendingToValid(txnCap int) error { + if st.txnFuture == nil { + return errors.New("transaction future is not set") + } + + future := st.txnFuture + st.txnFuture = nil + + txn, err := future.wait() + if err != nil { + st.Transaction = nil + return err + } + txn.SetCap(txnCap) + st.Transaction = txn + return nil +} + +func (st *TxnState) changeToInvalid() { + st.Transaction = nil + st.txnFuture = nil +} + +// dirtyTableOperation represents an operation to dirtyTable, we log the operation +// first and apply the operation log when statement commit. +type dirtyTableOperation struct { + kind int + tid int64 + handle int64 +} + +var hasMockAutoIDRetry = int64(0) + +func enableMockAutoIDRetry() { + atomic.StoreInt64(&hasMockAutoIDRetry, 1) +} + +func mockAutoIDRetry() bool { + return atomic.LoadInt64(&hasMockAutoIDRetry) == 1 +} + +// Commit overrides the Transaction interface. +func (st *TxnState) Commit(ctx context.Context) error { + defer st.reset() + if len(st.dirtyTableOP) != 0 || st.buf.Len() != 0 { + logutil.BgLogger().Error("the code should never run here", + zap.String("TxnState", st.GoString()), + zap.Stack("something must be wrong")) + return errors.New("invalid transaction") + } + if st.doNotCommit != nil { + if err1 := st.Transaction.Rollback(); err1 != nil { + logutil.BgLogger().Error("rollback error", zap.Error(err1)) + } + return errors.Trace(st.doNotCommit) + } + + // mockCommitError8942 is used for PR #8942. + failpoint.Inject("mockCommitError8942", func(val failpoint.Value) { + if val.(bool) { + failpoint.Return(kv.ErrTxnRetryable) + } + }) + + // mockCommitRetryForAutoID is used to mock an commit retry for adjustAutoIncrementDatum. + failpoint.Inject("mockCommitRetryForAutoID", func(val failpoint.Value) { + if val.(bool) && !mockAutoIDRetry() { + enableMockAutoIDRetry() + failpoint.Return(kv.ErrTxnRetryable) + } + }) + + return st.Transaction.Commit(ctx) +} + +// Rollback overrides the Transaction interface. +func (st *TxnState) Rollback() error { + defer st.reset() + return st.Transaction.Rollback() +} + +func (st *TxnState) reset() { + st.doNotCommit = nil + st.cleanup() + st.changeToInvalid() +} + +// Get overrides the Transaction interface. +func (st *TxnState) Get(ctx context.Context, k kv.Key) ([]byte, error) { + val, err := st.buf.Get(ctx, k) + if kv.IsErrNotFound(err) { + val, err = st.Transaction.Get(ctx, k) + if kv.IsErrNotFound(err) { + return nil, err + } + } + if err != nil { + return nil, err + } + if len(val) == 0 { + return nil, kv.ErrNotExist + } + return val, nil +} + +// Set overrides the Transaction interface. +func (st *TxnState) Set(k kv.Key, v []byte) error { + return st.buf.Set(k, v) +} + +// Delete overrides the Transaction interface. +func (st *TxnState) Delete(k kv.Key) error { + return st.buf.Delete(k) +} + +// Iter overrides the Transaction interface. +func (st *TxnState) Iter(k kv.Key, upperBound kv.Key) (kv.Iterator, error) { + bufferIt, err := st.buf.Iter(k, upperBound) + if err != nil { + return nil, err + } + retrieverIt, err := st.Transaction.Iter(k, upperBound) + if err != nil { + return nil, err + } + return kv.NewUnionIter(bufferIt, retrieverIt, false) +} + +// IterReverse overrides the Transaction interface. +func (st *TxnState) IterReverse(k kv.Key) (kv.Iterator, error) { + bufferIt, err := st.buf.IterReverse(k) + if err != nil { + return nil, err + } + retrieverIt, err := st.Transaction.IterReverse(k) + if err != nil { + return nil, err + } + return kv.NewUnionIter(bufferIt, retrieverIt, true) +} + +func (st *TxnState) cleanup() { + const sz4M = 4 << 20 + if st.buf.Size() > sz4M { + // The memory footprint for the large transaction could be huge here. + // Each active session has its own buffer, we should free the buffer to + // avoid memory leak. + st.buf = kv.NewMemDbBuffer(kv.DefaultTxnMembufCap) + } else { + st.buf.Reset() + } + if st.dirtyTableOP != nil { + empty := dirtyTableOperation{} + for i := 0; i < len(st.dirtyTableOP); i++ { + st.dirtyTableOP[i] = empty + } + if len(st.dirtyTableOP) > 256 { + // Reduce memory footprint for the large transaction. + st.dirtyTableOP = nil + } else { + st.dirtyTableOP = st.dirtyTableOP[:0] + } + } +} + +// KeysNeedToLock returns the keys need to be locked. +func (st *TxnState) KeysNeedToLock() ([]kv.Key, error) { + keys := make([]kv.Key, 0, st.buf.Len()) + if err := kv.WalkMemBuffer(st.buf, func(k kv.Key, v []byte) error { + if !keyNeedToLock(k, v) { + return nil + } + // If the key is already locked, it will be deduplicated in LockKeys method later. + // The statement MemBuffer will be reused, so we must copy the key here. + keys = append(keys, append([]byte{}, k...)) + return nil + }); err != nil { + return nil, err + } + return keys, nil +} + +func keyNeedToLock(k, v []byte) bool { + isTableKey := bytes.HasPrefix(k, tablecodec.TablePrefix()) + if !isTableKey { + // meta key always need to lock. + return true + } + isDelete := len(v) == 0 + if isDelete { + // only need to delete row key. + return k[10] == 'r' + } + if tablecodec.IsUntouchedIndexKValue(k, v) { + return false + } + isNonUniqueIndex := tablecodec.IsIndexKey(k) && len(v) == 1 + // Put row key and unique index need to lock. + return !isNonUniqueIndex +} + +func mergeToDirtyDB(dirtyDB *executor.DirtyDB, op dirtyTableOperation) { + dt := dirtyDB.GetDirtyTable(op.tid) + switch op.kind { + case table.DirtyTableAddRow: + dt.AddRow(op.handle) + case table.DirtyTableDeleteRow: + dt.DeleteRow(op.handle) + } +} + +type txnFailFuture struct{} + +func (txnFailFuture) Wait() (uint64, error) { + return 0, errors.New("mock get timestamp fail") +} + +// txnFuture is a promise, which promises to return a txn in future. +type txnFuture struct { + future oracle.Future + store kv.Storage +} + +func (tf *txnFuture) wait() (kv.Transaction, error) { + startTS, err := tf.future.Wait() + if err == nil { + return tf.store.BeginWithStartTS(startTS) + } else if _, ok := tf.future.(txnFailFuture); ok { + return nil, err + } + + // It would retry get timestamp. + return tf.store.Begin() +} + +func (s *session) getTxnFuture(ctx context.Context) *txnFuture { + oracleStore := s.store.GetOracle() + tsFuture := oracleStore.GetTimestampAsync(ctx) + ret := &txnFuture{future: tsFuture, store: s.store} + failpoint.InjectContext(ctx, "mockGetTSFail", func() { + ret.future = txnFailFuture{} + }) + return ret +} + +// StmtCommit implements the sessionctx.Context interface. +func (s *session) StmtCommit() error { + defer s.txn.cleanup() + st := &s.txn + var count int + err := kv.WalkMemBuffer(st.buf, func(k kv.Key, v []byte) error { + failpoint.Inject("mockStmtCommitError", func(val failpoint.Value) { + if val.(bool) { + count++ + } + }) + + if count > 3 { + return errors.New("mock stmt commit error") + } + + if len(v) == 0 { + return st.Transaction.Delete(k) + } + return st.Transaction.Set(k, v) + }) + if err != nil { + st.doNotCommit = err + return err + } + + if len(st.dirtyTableOP) > 0 { + dirtyDB := executor.GetDirtyDB(s) + for _, op := range st.dirtyTableOP { + mergeToDirtyDB(dirtyDB, op) + } + } + return nil +} + +// StmtRollback implements the sessionctx.Context interface. +func (s *session) StmtRollback() { + s.txn.cleanup() +} + +func (s *session) StmtAddDirtyTableOP(op int, tid int64, handle int64) { + s.txn.dirtyTableOP = append(s.txn.dirtyTableOP, dirtyTableOperation{op, tid, handle}) +} diff --git a/sessionctx/context.go b/sessionctx/context.go new file mode 100644 index 0000000..95e78da --- /dev/null +++ b/sessionctx/context.go @@ -0,0 +1,108 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package sessionctx + +import ( + "context" + "fmt" + + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/owner" + "github.com/pingcap/tidb/sessionctx/variable" +) + +// Context is an interface for transaction and executive args environment. +type Context interface { + // NewTxn creates a new transaction for further execution. + // If old transaction is valid, it is committed first. + // It's used in BEGIN statement and DDL statements to commit old transaction. + NewTxn(context.Context) error + + // Txn returns the current transaction which is created before executing a statement. + // The returned kv.Transaction is not nil, but it maybe pending or invalid. + // If the active parameter is true, call this function will wait for the pending txn + // to become valid. + Txn(active bool) (kv.Transaction, error) + + // GetClient gets a kv.Client. + GetClient() kv.Client + + // SetValue saves a value associated with this context for key. + SetValue(key fmt.Stringer, value interface{}) + + // Value returns the value associated with this context for key. + Value(key fmt.Stringer) interface{} + + // ClearValue clears the value associated with this context for key. + ClearValue(key fmt.Stringer) + + GetSessionVars() *variable.SessionVars + + // RefreshTxnCtx commits old transaction without retry, + // and creates a new transaction. + // now just for load data and batch insert. + RefreshTxnCtx(context.Context) error + + // InitTxnWithStartTS initializes a transaction with startTS. + // It should be called right before we builds an executor. + InitTxnWithStartTS(startTS uint64) error + + // GetStore returns the store of session. + GetStore() kv.Storage + + // StmtCommit flush all changes by the statement to the underlying transaction. + StmtCommit() error + // StmtRollback provides statement level rollback. + StmtRollback() + // StmtAddDirtyTableOP adds the dirty table operation for current statement. + StmtAddDirtyTableOP(op int, physicalID int64, handle int64) + // DDLOwnerChecker returns owner.DDLOwnerChecker. + DDLOwnerChecker() owner.DDLOwnerChecker + // PrepareTxnFuture uses to prepare txn by future. + PrepareTxnFuture(ctx context.Context) +} + +type basicCtxType int + +func (t basicCtxType) String() string { + switch t { + case QueryString: + return "query_string" + case Initing: + return "initing" + case LastExecuteDDL: + return "last_execute_ddl" + } + return "unknown" +} + +// Context keys. +const ( + // QueryString is the key for original query string. + QueryString basicCtxType = 1 + // Initing is the key for indicating if the server is running bootstrap or upgrade job. + Initing basicCtxType = 2 + // LastExecuteDDL is the key for whether the session execute a ddl command last time. + LastExecuteDDL basicCtxType = 3 +) + +type connIDCtxKeyType struct{} + +// ConnID is the key in context. +var ConnID = connIDCtxKeyType{} + +// SetCommitCtx sets the variables for context before commit a transaction. +func SetCommitCtx(ctx context.Context, sessCtx Context) context.Context { + return context.WithValue(ctx, ConnID, sessCtx.GetSessionVars().ConnectionID) +} diff --git a/sessionctx/context_test.go b/sessionctx/context_test.go new file mode 100644 index 0000000..a06ee54 --- /dev/null +++ b/sessionctx/context_test.go @@ -0,0 +1,42 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package sessionctx + +import ( + "fmt" + "testing" + + . "github.com/pingcap/check" +) + +func TestT(t *testing.T) { + TestingT(t) +} + +func TestBasicCtxTypeToString(t *testing.T) { + tests := []struct { + key fmt.Stringer + v string + }{ + {QueryString, "query_string"}, + {Initing, "initing"}, + {LastExecuteDDL, "last_execute_ddl"}, + {basicCtxType(9), "unknown"}, + } + for _, tt := range tests { + if tt.key.String() != tt.v { + t.Fatalf("want %s but got %s", tt.v, tt.key.String()) + } + } +} diff --git a/sessionctx/stmtctx/stmtctx.go b/sessionctx/stmtctx/stmtctx.go new file mode 100644 index 0000000..6a98bc0 --- /dev/null +++ b/sessionctx/stmtctx/stmtctx.go @@ -0,0 +1,454 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package stmtctx + +import ( + "math" + "strconv" + "sync" + "time" + + "github.com/pingcap/tidb/parser/model" + "go.uber.org/zap" +) + +const ( + // WarnLevelError represents level "Error" for 'SHOW WARNINGS' syntax. + WarnLevelError = "Error" + // WarnLevelWarning represents level "Warning" for 'SHOW WARNINGS' syntax. + WarnLevelWarning = "Warning" + // WarnLevelNote represents level "Note" for 'SHOW WARNINGS' syntax. + WarnLevelNote = "Note" +) + +// SQLWarn relates a sql warning and it's level. +type SQLWarn struct { + Level string + Err error +} + +// StatementContext contains variables for a statement. +// It should be reset before executing a statement. +type StatementContext struct { + // Set the following variables before execution + StmtHints + + // IsDDLJobInQueue is used to mark whether the DDL job is put into the queue. + // If IsDDLJobInQueue is true, it means the DDL job is in the queue of storage, and it can be handled by the DDL worker. + IsDDLJobInQueue bool + InInsertStmt bool + InDeleteStmt bool + InSelectStmt bool + InExplainStmt bool + IgnoreTruncate bool + IgnoreZeroInDate bool + BadNullAsWarning bool + DividedByZeroAsWarning bool + TruncateAsWarning bool + OverflowAsWarning bool + InShowWarning bool + PadCharToFullLength bool + BatchCheck bool + InNullRejectCheck bool + AllowInvalidDate bool + // CastStrToIntStrict is used to control the way we cast float format string to int. + // If ConvertStrToIntStrict is false, we convert it to a valid float string first, + // then cast the float string to int string. Otherwise, we cast string to integer + // prefix in a strict way, only extract 0-9 and (+ or - in first bit). + CastStrToIntStrict bool + + // mu struct holds variables that change during execution. + mu struct { + sync.Mutex + + affectedRows uint64 + foundRows uint64 + + /* + following variables are ported from 'COPY_INFO' struct of MySQL server source, + they are used to count rows for INSERT/REPLACE/UPDATE queries: + If a row is inserted then the copied variable is incremented. + If a row is updated by the INSERT ... ON DUPLICATE KEY UPDATE and the + new data differs from the old one then the copied and the updated + variables are incremented. + The touched variable is incremented if a row was touched by the update part + of the INSERT ... ON DUPLICATE KEY UPDATE no matter whether the row + was actually changed or not. + + see https://github.com/mysql/mysql-server/blob/d2029238d6d9f648077664e4cdd611e231a6dc14/sql/sql_data_change.h#L60 for more details + */ + records uint64 + updated uint64 + copied uint64 + touched uint64 + + warnings []SQLWarn + errorCount uint16 + } + // PrevAffectedRows is the affected-rows value(DDL is 0, DML is the number of affected rows). + PrevAffectedRows int64 + // PrevLastInsertID is the last insert ID of previous statement. + PrevLastInsertID uint64 + // LastInsertID is the auto-generated ID in the current statement. + LastInsertID uint64 + // InsertID is the given insert ID of an auto_increment column. + InsertID uint64 + + BaseRowID int64 + MaxRowID int64 + + // Copied from SessionVars.TimeZone. + TimeZone *time.Location + NotFillCache bool + TableIDs []int64 + IndexNames []string + nowTs time.Time // use this variable for now/current_timestamp calculation/cache for one stmt + stmtTimeCached bool + StmtType string +} + +// StmtHints are SessionVars related sql hints. +type StmtHints struct { + // Hint flags + HasAllowInSubqToJoinAndAggHint bool + HasMemQuotaHint bool + HasReplicaReadHint bool + + // Hint Information + AllowInSubqToJoinAndAgg bool + MemQuotaQuery int64 + ReplicaRead byte +} + +// GetNowTsCached getter for nowTs, if not set get now time and cache it +func (sc *StatementContext) GetNowTsCached() time.Time { + if !sc.stmtTimeCached { + now := time.Now() + sc.nowTs = now + sc.stmtTimeCached = true + } + return sc.nowTs +} + +// ResetNowTs resetter for nowTs, clear cached time flag +func (sc *StatementContext) ResetNowTs() { + sc.stmtTimeCached = false +} + +// AddAffectedRows adds affected rows. +func (sc *StatementContext) AddAffectedRows(rows uint64) { + sc.mu.Lock() + sc.mu.affectedRows += rows + sc.mu.Unlock() +} + +// AffectedRows gets affected rows. +func (sc *StatementContext) AffectedRows() uint64 { + sc.mu.Lock() + rows := sc.mu.affectedRows + sc.mu.Unlock() + return rows +} + +// FoundRows gets found rows. +func (sc *StatementContext) FoundRows() uint64 { + sc.mu.Lock() + rows := sc.mu.foundRows + sc.mu.Unlock() + return rows +} + +// AddFoundRows adds found rows. +func (sc *StatementContext) AddFoundRows(rows uint64) { + sc.mu.Lock() + sc.mu.foundRows += rows + sc.mu.Unlock() +} + +// RecordRows is used to generate info message +func (sc *StatementContext) RecordRows() uint64 { + sc.mu.Lock() + rows := sc.mu.records + sc.mu.Unlock() + return rows +} + +// AddRecordRows adds record rows. +func (sc *StatementContext) AddRecordRows(rows uint64) { + sc.mu.Lock() + sc.mu.records += rows + sc.mu.Unlock() +} + +// UpdatedRows is used to generate info message +func (sc *StatementContext) UpdatedRows() uint64 { + sc.mu.Lock() + rows := sc.mu.updated + sc.mu.Unlock() + return rows +} + +// AddUpdatedRows adds updated rows. +func (sc *StatementContext) AddUpdatedRows(rows uint64) { + sc.mu.Lock() + sc.mu.updated += rows + sc.mu.Unlock() +} + +// CopiedRows is used to generate info message +func (sc *StatementContext) CopiedRows() uint64 { + sc.mu.Lock() + rows := sc.mu.copied + sc.mu.Unlock() + return rows +} + +// AddCopiedRows adds copied rows. +func (sc *StatementContext) AddCopiedRows(rows uint64) { + sc.mu.Lock() + sc.mu.copied += rows + sc.mu.Unlock() +} + +// TouchedRows is used to generate info message +func (sc *StatementContext) TouchedRows() uint64 { + sc.mu.Lock() + rows := sc.mu.touched + sc.mu.Unlock() + return rows +} + +// AddTouchedRows adds touched rows. +func (sc *StatementContext) AddTouchedRows(rows uint64) { + sc.mu.Lock() + sc.mu.touched += rows + sc.mu.Unlock() +} + +// GetWarnings gets warnings. +func (sc *StatementContext) GetWarnings() []SQLWarn { + sc.mu.Lock() + warns := make([]SQLWarn, len(sc.mu.warnings)) + copy(warns, sc.mu.warnings) + sc.mu.Unlock() + return warns +} + +// WarningCount gets warning count. +func (sc *StatementContext) WarningCount() uint16 { + if sc.InShowWarning { + return 0 + } + sc.mu.Lock() + wc := uint16(len(sc.mu.warnings)) + sc.mu.Unlock() + return wc +} + +// NumErrorWarnings gets warning and error count. +func (sc *StatementContext) NumErrorWarnings() (ec uint16, wc int) { + sc.mu.Lock() + ec = sc.mu.errorCount + wc = len(sc.mu.warnings) + sc.mu.Unlock() + return +} + +// SetWarnings sets warnings. +func (sc *StatementContext) SetWarnings(warns []SQLWarn) { + sc.mu.Lock() + sc.mu.warnings = warns + for _, w := range warns { + if w.Level == WarnLevelError { + sc.mu.errorCount++ + } + } + sc.mu.Unlock() +} + +// AppendWarning appends a warning with level 'Warning'. +func (sc *StatementContext) AppendWarning(warn error) { + sc.mu.Lock() + if len(sc.mu.warnings) < math.MaxUint16 { + sc.mu.warnings = append(sc.mu.warnings, SQLWarn{WarnLevelWarning, warn}) + } + sc.mu.Unlock() +} + +// AppendNote appends a warning with level 'Note'. +func (sc *StatementContext) AppendNote(warn error) { + sc.mu.Lock() + if len(sc.mu.warnings) < math.MaxUint16 { + sc.mu.warnings = append(sc.mu.warnings, SQLWarn{WarnLevelNote, warn}) + } + sc.mu.Unlock() +} + +// AppendError appends a warning with level 'Error'. +func (sc *StatementContext) AppendError(warn error) { + sc.mu.Lock() + if len(sc.mu.warnings) < math.MaxUint16 { + sc.mu.warnings = append(sc.mu.warnings, SQLWarn{WarnLevelError, warn}) + sc.mu.errorCount++ + } + sc.mu.Unlock() +} + +// HandleTruncate ignores or returns the error based on the StatementContext state. +func (sc *StatementContext) HandleTruncate(err error) error { + // TODO: At present we have not checked whether the error can be ignored or treated as warning. + // We will do that later, and then append WarnDataTruncated instead of the error itself. + if err == nil { + return nil + } + if sc.IgnoreTruncate { + return nil + } + if sc.TruncateAsWarning { + sc.AppendWarning(err) + return nil + } + return err +} + +// HandleOverflow treats ErrOverflow as warnings or returns the error based on the StmtCtx.OverflowAsWarning state. +func (sc *StatementContext) HandleOverflow(err error, warnErr error) error { + if err == nil { + return nil + } + + if sc.OverflowAsWarning { + sc.AppendWarning(warnErr) + return nil + } + return err +} + +// ResetForRetry resets the changed states during execution. +func (sc *StatementContext) ResetForRetry() { + sc.mu.Lock() + sc.mu.affectedRows = 0 + sc.mu.foundRows = 0 + sc.mu.records = 0 + sc.mu.updated = 0 + sc.mu.copied = 0 + sc.mu.touched = 0 + sc.mu.errorCount = 0 + sc.mu.warnings = nil + sc.mu.Unlock() + sc.MaxRowID = 0 + sc.BaseRowID = 0 + sc.TableIDs = sc.TableIDs[:0] + sc.IndexNames = sc.IndexNames[:0] +} + +// ShouldClipToZero indicates whether values less than 0 should be clipped to 0 for unsigned integer types. +// This is the case for `insert`, `update`, `alter table` and `load data infile` statements, when not in strict SQL mode. +// see https://dev.mysql.com/doc/refman/5.7/en/out-of-range-and-overflow.html +func (sc *StatementContext) ShouldClipToZero() bool { + // TODO: Currently altering column of integer to unsigned integer is not supported. + // If it is supported one day, that case should be added here. + return sc.InInsertStmt +} + +// ShouldIgnoreOverflowError indicates whether we should ignore the error when type conversion overflows, +// so we can leave it for further processing like clipping values less than 0 to 0 for unsigned integer types. +func (sc *StatementContext) ShouldIgnoreOverflowError() bool { + if sc.InInsertStmt && sc.TruncateAsWarning { + return true + } + return false +} + +// PushDownFlags converts StatementContext to tipb.SelectRequest.Flags. +func (sc *StatementContext) PushDownFlags() uint64 { + var flags uint64 + if sc.InInsertStmt { + flags |= model.FlagInInsertStmt + } else if sc.InDeleteStmt { + flags |= model.FlagInUpdateOrDeleteStmt + } else if sc.InSelectStmt { + flags |= model.FlagInSelectStmt + } + if sc.IgnoreTruncate { + flags |= model.FlagIgnoreTruncate + } else if sc.TruncateAsWarning { + flags |= model.FlagTruncateAsWarning + } + if sc.OverflowAsWarning { + flags |= model.FlagOverflowAsWarning + } + if sc.IgnoreZeroInDate { + flags |= model.FlagIgnoreZeroInDate + } + if sc.DividedByZeroAsWarning { + flags |= model.FlagDividedByZeroAsWarning + } + if sc.PadCharToFullLength { + flags |= model.FlagPadCharToFullLength + } + return flags +} + +// SetFlagsFromPBFlag set the flag of StatementContext from a `tipb.SelectRequest.Flags`. +func (sc *StatementContext) SetFlagsFromPBFlag(flags uint64) { + sc.IgnoreTruncate = (flags & model.FlagIgnoreTruncate) > 0 + sc.TruncateAsWarning = (flags & model.FlagTruncateAsWarning) > 0 + sc.PadCharToFullLength = (flags & model.FlagPadCharToFullLength) > 0 + sc.InInsertStmt = (flags & model.FlagInInsertStmt) > 0 + sc.InSelectStmt = (flags & model.FlagInSelectStmt) > 0 + sc.OverflowAsWarning = (flags & model.FlagOverflowAsWarning) > 0 + sc.IgnoreZeroInDate = (flags & model.FlagIgnoreZeroInDate) > 0 + sc.DividedByZeroAsWarning = (flags & model.FlagDividedByZeroAsWarning) > 0 +} + +//CopTasksDetails collects some useful information of cop-tasks during execution. +type CopTasksDetails struct { + NumCopTasks int + + AvgProcessTime time.Duration + P90ProcessTime time.Duration + MaxProcessAddress string + MaxProcessTime time.Duration + + AvgWaitTime time.Duration + P90WaitTime time.Duration + MaxWaitAddress string + MaxWaitTime time.Duration + + MaxBackoffTime map[string]time.Duration + MaxBackoffAddress map[string]string + AvgBackoffTime map[string]time.Duration + P90BackoffTime map[string]time.Duration + TotBackoffTime map[string]time.Duration + TotBackoffTimes map[string]int +} + +// ToZapFields wraps the CopTasksDetails as zap.Fileds. +func (d *CopTasksDetails) ToZapFields() (fields []zap.Field) { + if d.NumCopTasks == 0 { + return + } + fields = make([]zap.Field, 0, 10) + fields = append(fields, zap.Int("num_cop_tasks", d.NumCopTasks)) + fields = append(fields, zap.String("process_avg_time", strconv.FormatFloat(d.AvgProcessTime.Seconds(), 'f', -1, 64)+"s")) + fields = append(fields, zap.String("process_p90_time", strconv.FormatFloat(d.P90ProcessTime.Seconds(), 'f', -1, 64)+"s")) + fields = append(fields, zap.String("process_max_time", strconv.FormatFloat(d.MaxProcessTime.Seconds(), 'f', -1, 64)+"s")) + fields = append(fields, zap.String("process_max_addr", d.MaxProcessAddress)) + fields = append(fields, zap.String("wait_avg_time", strconv.FormatFloat(d.AvgWaitTime.Seconds(), 'f', -1, 64)+"s")) + fields = append(fields, zap.String("wait_p90_time", strconv.FormatFloat(d.P90WaitTime.Seconds(), 'f', -1, 64)+"s")) + fields = append(fields, zap.String("wait_max_time", strconv.FormatFloat(d.MaxWaitTime.Seconds(), 'f', -1, 64)+"s")) + fields = append(fields, zap.String("wait_max_addr", d.MaxWaitAddress)) + return fields +} diff --git a/sessionctx/stmtctx/stmtctx_test.go b/sessionctx/stmtctx/stmtctx_test.go new file mode 100644 index 0000000..84229cf --- /dev/null +++ b/sessionctx/stmtctx/stmtctx_test.go @@ -0,0 +1,51 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package stmtctx_test + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "testing" +) + +func TestT(t *testing.T) { + TestingT(t) +} + +type stmtctxSuit struct{} + +var _ = Suite(&stmtctxSuit{}) + +func (s *stmtctxSuit) TestStatementContextPushDownFLags(c *C) { + testCases := []struct { + in *stmtctx.StatementContext + out uint64 + }{ + {&stmtctx.StatementContext{InInsertStmt: true}, 8}, + {&stmtctx.StatementContext{InDeleteStmt: true}, 16}, + {&stmtctx.StatementContext{InSelectStmt: true}, 32}, + {&stmtctx.StatementContext{IgnoreTruncate: true}, 1}, + {&stmtctx.StatementContext{TruncateAsWarning: true}, 2}, + {&stmtctx.StatementContext{OverflowAsWarning: true}, 64}, + {&stmtctx.StatementContext{IgnoreZeroInDate: true}, 128}, + {&stmtctx.StatementContext{DividedByZeroAsWarning: true}, 256}, + {&stmtctx.StatementContext{PadCharToFullLength: true}, 4}, + {&stmtctx.StatementContext{InSelectStmt: true, TruncateAsWarning: true}, 34}, + {&stmtctx.StatementContext{DividedByZeroAsWarning: true, IgnoreTruncate: true}, 257}, + } + for _, tt := range testCases { + got := tt.in.PushDownFlags() + c.Assert(got, Equals, tt.out, Commentf("get %v, want %v", got, tt.out)) + } +} diff --git a/sessionctx/variable/mock_globalaccessor.go b/sessionctx/variable/mock_globalaccessor.go new file mode 100644 index 0000000..24ab573 --- /dev/null +++ b/sessionctx/variable/mock_globalaccessor.go @@ -0,0 +1,42 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package variable + +// MockGlobalAccessor implements GlobalVarAccessor interface. it's used in tests +type MockGlobalAccessor struct { +} + +// NewMockGlobalAccessor implements GlobalVarAccessor interface. +func NewMockGlobalAccessor() *MockGlobalAccessor { + return new(MockGlobalAccessor) +} + +// GetGlobalSysVar implements GlobalVarAccessor.GetGlobalSysVar interface. +func (m *MockGlobalAccessor) GetGlobalSysVar(name string) (string, error) { + v, ok := SysVars[name] + if ok { + return v.Value, nil + } + return "", nil +} + +// SetGlobalSysVar implements GlobalVarAccessor.SetGlobalSysVar interface. +func (m *MockGlobalAccessor) SetGlobalSysVar(name string, value string) error { + panic("not supported") +} + +// GetAllSysVars implements GlobalVarAccessor.GetAllSysVars interface. +func (m *MockGlobalAccessor) GetAllSysVars() (map[string]string, error) { + panic("not supported") +} diff --git a/sessionctx/variable/session.go b/sessionctx/variable/session.go new file mode 100644 index 0000000..86b12cb --- /dev/null +++ b/sessionctx/variable/session.go @@ -0,0 +1,747 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package variable + +import ( + "crypto/tls" + "fmt" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta/autoid" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/rowcodec" +) + +// Error instances. +var ( + ErrCantSetToNull = terror.ClassVariable.New(mysql.ErrCantSetToNull, mysql.MySQLErrName[mysql.ErrCantSetToNull]) + ErrSnapshotTooOld = terror.ClassVariable.New(mysql.ErrSnapshotTooOld, mysql.MySQLErrName[mysql.ErrSnapshotTooOld]) +) + +// TransactionContext is used to store variables that has transaction scope. +type TransactionContext struct { + forUpdateTS uint64 + DirtyDB interface{} + InfoSchema interface{} + History interface{} + SchemaVersion int64 + StartTS uint64 + Shard *int64 + TableDeltaMap map[int64]TableDelta + + CreateTime time.Time + StatementCount int +} + +// UpdateDeltaForTable updates the delta info for some table. +func (tc *TransactionContext) UpdateDeltaForTable(tableID int64, delta int64, count int64, colSize map[int64]int64) { + if tc.TableDeltaMap == nil { + tc.TableDeltaMap = make(map[int64]TableDelta) + } + item := tc.TableDeltaMap[tableID] + if item.ColSize == nil && colSize != nil { + item.ColSize = make(map[int64]int64) + } + item.Delta += delta + item.Count += count + for key, val := range colSize { + item.ColSize[key] += val + } + tc.TableDeltaMap[tableID] = item +} + +// Cleanup clears up transaction info that no longer use. +func (tc *TransactionContext) Cleanup() { + // tc.InfoSchema = nil; we cannot do it now, because some operation like handleFieldList depend on this. + tc.DirtyDB = nil + tc.History = nil + tc.TableDeltaMap = nil +} + +// ClearDelta clears the delta map. +func (tc *TransactionContext) ClearDelta() { + tc.TableDeltaMap = nil +} + +// GetForUpdateTS returns the ts for update. +func (tc *TransactionContext) GetForUpdateTS() uint64 { + if tc.forUpdateTS > tc.StartTS { + return tc.forUpdateTS + } + return tc.StartTS +} + +// SetForUpdateTS sets the ts for update. +func (tc *TransactionContext) SetForUpdateTS(forUpdateTS uint64) { + if forUpdateTS > tc.forUpdateTS { + tc.forUpdateTS = forUpdateTS + } +} + +// WriteStmtBufs can be used by insert/replace/delete/update statement. +// TODO: use a common memory pool to replace this. +type WriteStmtBufs struct { + // RowValBuf is used by tablecodec.EncodeRow, to reduce runtime.growslice. + RowValBuf []byte + // BufStore stores temp KVs for a row when executing insert statement. + // We could reuse a BufStore for multiple rows of a session to reduce memory allocations. + BufStore *kv.BufferStore + // AddRowValues use to store temp insert rows value, to reduce memory allocations when importing data. + AddRowValues []types.Datum + + // IndexValsBuf is used by index.FetchValues + IndexValsBuf []types.Datum + // IndexKeyBuf is used by index.GenIndexKey + IndexKeyBuf []byte +} + +func (ib *WriteStmtBufs) clean() { + ib.BufStore = nil + ib.RowValBuf = nil + ib.AddRowValues = nil + ib.IndexValsBuf = nil + ib.IndexKeyBuf = nil +} + +// SessionVars is to handle user-defined or global variables in the current session. +type SessionVars struct { + Concurrency + BatchSize + // UsersLock is a lock for user defined variables. + UsersLock sync.RWMutex + // Users are user defined variables. + Users map[string]string + // systems variables, don't modify it directly, use GetSystemVar/SetSystemVar method. + systems map[string]string + // SysWarningCount is the system variable "warning_count", because it is on the hot path, so we extract it from the systems + SysWarningCount int + // SysErrorCount is the system variable "error_count", because it is on the hot path, so we extract it from the systems + SysErrorCount uint16 + + // TxnCtx Should be reset on transaction finished. + TxnCtx *TransactionContext + + // KVVars is the variables for KV storage. + KVVars *kv.Variables + + // TxnIsolationLevelOneShot is used to implements "set transaction isolation level ..." + TxnIsolationLevelOneShot struct { + // State 0 means default + // State 1 means it's set in current transaction. + // State 2 means it should be used in current transaction. + State int + Value string + } + + // Status stands for the session status. e.g. in transaction or not, auto commit is on or off, and so on. + Status uint16 + + // ClientCapability is client's capability. + ClientCapability uint32 + + // TLSConnectionState is the TLS connection state (nil if not using TLS). + TLSConnectionState *tls.ConnectionState + + // ConnectionID is the connection id of the current session. + ConnectionID uint64 + + // PlanID is the unique id of logical and physical plan. + PlanID int + + // PlanColumnID is the unique id for column when building plan. + PlanColumnID int64 + + // CurrentDB is the default database of this session. + CurrentDB string + + // StrictSQLMode indicates if the session is in strict mode. + StrictSQLMode bool + + // CommonGlobalLoaded indicates if common global variable has been loaded for this session. + CommonGlobalLoaded bool + + // InRestrictedSQL indicates if the session is handling restricted SQL execution. + InRestrictedSQL bool + + // GlobalVarsAccessor is used to set and get global variables. + GlobalVarsAccessor GlobalVarAccessor + + // LastFoundRows is the number of found rows of last query statement + LastFoundRows uint64 + + // StmtCtx holds variables for current executing statement. + StmtCtx *stmtctx.StatementContext + + // AllowAggPushDown can be set to false to forbid aggregation push down. + AllowAggPushDown bool + + // AllowWriteRowID can be set to false to forbid write data to _tidb_rowid. + // This variable is currently not recommended to be turned on. + AllowWriteRowID bool + + // CorrelationThreshold is the guard to enable row count estimation using column order correlation. + CorrelationThreshold float64 + + // CorrelationExpFactor is used to control the heuristic approach of row count estimation when CorrelationThreshold is not met. + CorrelationExpFactor int + + // CPUFactor is the CPU cost of processing one expression for one row. + CPUFactor float64 + // CopCPUFactor is the CPU cost of processing one expression for one row in coprocessor. + CopCPUFactor float64 + // NetworkFactor is the network cost of transferring 1 byte data. + NetworkFactor float64 + // ScanFactor is the IO cost of scanning 1 byte data on TiKV. + ScanFactor float64 + // DescScanFactor is the IO cost of scanning 1 byte data on TiKV in desc order. + DescScanFactor float64 + // SeekFactor is the IO cost of seeking the start value of a range in TiKV. + SeekFactor float64 + // MemoryFactor is the memory cost of storing one tuple. + MemoryFactor float64 + // DiskFactor is the IO cost of reading/writing one byte to temporary disk. + DiskFactor float64 + // ConcurrencyFactor is the CPU cost of additional one goroutine. + ConcurrencyFactor float64 + + // CurrInsertValues is used to record current ValuesExpr's values. + // See http://dev.mysql.com/doc/refman/5.7/en/miscellaneous-functions.html#function_values + CurrInsertValues chunk.Row + + // Per-connection time zones. Each client that connects has its own time zone setting, given by the session time_zone variable. + // See https://dev.mysql.com/doc/refman/5.7/en/time-zone-support.html + TimeZone *time.Location + + SQLMode mysql.SQLMode + + /* TiDB system variables */ + + // SkipUTF8Check check on input value. + SkipUTF8Check bool + + // IDAllocator is provided by kvEncoder, if it is provided, we will use it to alloc auto id instead of using + // Table.alloc. + IDAllocator autoid.Allocator + + // EnableCascadesPlanner enables the cascades planner. + EnableCascadesPlanner bool + + // EnableVectorizedExpression enables the vectorized expression evaluation. + EnableVectorizedExpression bool + + // DDLReorgPriority is the operation priority of adding indices. + DDLReorgPriority int + + // WaitSplitRegionFinish defines the split region behaviour is sync or async. + WaitSplitRegionFinish bool + + // WaitSplitRegionTimeout defines the split region timeout. + WaitSplitRegionTimeout uint64 + + writeStmtBufs WriteStmtBufs + + // EnableRadixJoin indicates whether to use radix hash join to execute + // HashJoin. + EnableRadixJoin bool + + // ConstraintCheckInPlace indicates whether to check the constraint when the SQL executing. + ConstraintCheckInPlace bool + + // CommandValue indicates which command current session is doing. + CommandValue uint32 + + // TiDBOptJoinReorderThreshold defines the minimal number of join nodes + // to use the greedy join reorder algorithm. + TiDBOptJoinReorderThreshold int + + // SlowQueryFile indicates which slow query log file for SLOW_QUERY table to parse. + SlowQueryFile string + + // MaxExecutionTime is the timeout for select statement, in milliseconds. + // If the value is 0, timeouts are not enabled. + // See https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_max_execution_time + MaxExecutionTime uint64 + + // Killed is a flag to indicate that this query is killed. + Killed uint32 + + // ConnectionInfo indicates current connection info used by current session, only be lazy assigned by plugin. + ConnectionInfo *ConnectionInfo + + // use noop funcs or not + EnableNoopFuncs bool + + // StartTime is the start time of the last query. + StartTime time.Time + + // DurationParse is the duration of parsing SQL string to AST of the last query. + DurationParse time.Duration + + // DurationCompile is the duration of compiling AST to execution plan of the last query. + DurationCompile time.Duration + + // PrevStmt is used to store the previous executed statement in the current session. + PrevStmt fmt.Stringer + + // AllowRemoveAutoInc indicates whether a user can drop the auto_increment column attribute or not. + AllowRemoveAutoInc bool + + // Unexported fields should be accessed and set through interfaces like GetReplicaRead() and SetReplicaRead(). + + // allowInSubqToJoinAndAgg can be set to false to forbid rewriting the semi join to inner join with agg. + allowInSubqToJoinAndAgg bool + + // replicaRead is used for reading data from replicas, only follower is supported at this time. + replicaRead kv.ReplicaReadType + + // RowEncoder is reused in session for encode row data. + RowEncoder rowcodec.Encoder +} + +// ConnectionInfo present connection used by audit. +type ConnectionInfo struct { + ConnectionID uint32 + ConnectionType string + Host string + ClientIP string + ClientPort string + ServerID int + ServerPort int + Duration float64 + User string + ServerOSLoginUser string + OSVersion string + ClientVersion string + ServerVersion string + SSLVersion string + PID int + DB string +} + +// NewSessionVars creates a session vars object. +func NewSessionVars() *SessionVars { + vars := &SessionVars{ + Users: make(map[string]string), + systems: make(map[string]string), + TxnCtx: &TransactionContext{}, + KVVars: kv.NewVariables(), + StrictSQLMode: true, + Status: mysql.ServerStatusAutocommit, + StmtCtx: new(stmtctx.StatementContext), + AllowAggPushDown: false, + DDLReorgPriority: kv.PriorityLow, + allowInSubqToJoinAndAgg: DefOptInSubqToJoinAndAgg, + CorrelationThreshold: DefOptCorrelationThreshold, + CorrelationExpFactor: DefOptCorrelationExpFactor, + CPUFactor: DefOptCPUFactor, + CopCPUFactor: DefOptCopCPUFactor, + NetworkFactor: DefOptNetworkFactor, + ScanFactor: DefOptScanFactor, + DescScanFactor: DefOptDescScanFactor, + SeekFactor: DefOptSeekFactor, + MemoryFactor: DefOptMemoryFactor, + DiskFactor: DefOptDiskFactor, + ConcurrencyFactor: DefOptConcurrencyFactor, + EnableRadixJoin: false, + EnableVectorizedExpression: DefEnableVectorizedExpression, + CommandValue: uint32(mysql.ComSleep), + TiDBOptJoinReorderThreshold: DefTiDBOptJoinReorderThreshold, + WaitSplitRegionFinish: DefTiDBWaitSplitRegionFinish, + WaitSplitRegionTimeout: DefWaitSplitRegionTimeout, + EnableNoopFuncs: DefTiDBEnableNoopFuncs, + replicaRead: kv.ReplicaReadLeader, + AllowRemoveAutoInc: DefTiDBAllowRemoveAutoInc, + } + vars.Concurrency = Concurrency{ + IndexLookupConcurrency: DefIndexLookupConcurrency, + IndexSerialScanConcurrency: DefIndexSerialScanConcurrency, + IndexLookupJoinConcurrency: DefIndexLookupJoinConcurrency, + HashJoinConcurrency: DefTiDBHashJoinConcurrency, + ProjectionConcurrency: DefTiDBProjectionConcurrency, + DistSQLScanConcurrency: DefDistSQLScanConcurrency, + HashAggPartialConcurrency: DefTiDBHashAggPartialConcurrency, + HashAggFinalConcurrency: DefTiDBHashAggFinalConcurrency, + } + vars.BatchSize = BatchSize{ + IndexLookupSize: DefIndexLookupSize, + InitChunkSize: DefInitChunkSize, + MaxChunkSize: DefMaxChunkSize, + } + return vars +} + +// GetAllowInSubqToJoinAndAgg get AllowInSubqToJoinAndAgg from sql hints and SessionVars.allowInSubqToJoinAndAgg. +func (s *SessionVars) GetAllowInSubqToJoinAndAgg() bool { + if s.StmtCtx.HasAllowInSubqToJoinAndAggHint { + return s.StmtCtx.AllowInSubqToJoinAndAgg + } + return s.allowInSubqToJoinAndAgg +} + +// SetAllowInSubqToJoinAndAgg set SessionVars.allowInSubqToJoinAndAgg. +func (s *SessionVars) SetAllowInSubqToJoinAndAgg(val bool) { + s.allowInSubqToJoinAndAgg = val +} + +// GetReplicaRead get ReplicaRead from sql hints and SessionVars.replicaRead. +func (s *SessionVars) GetReplicaRead() kv.ReplicaReadType { + if s.StmtCtx.HasReplicaReadHint { + return kv.ReplicaReadType(s.StmtCtx.ReplicaRead) + } + return s.replicaRead +} + +// SetReplicaRead set SessionVars.replicaRead. +func (s *SessionVars) SetReplicaRead(val kv.ReplicaReadType) { + s.replicaRead = val +} + +// GetWriteStmtBufs get pointer of SessionVars.writeStmtBufs. +func (s *SessionVars) GetWriteStmtBufs() *WriteStmtBufs { + return &s.writeStmtBufs +} + +// GetSplitRegionTimeout gets split region timeout. +func (s *SessionVars) GetSplitRegionTimeout() time.Duration { + return time.Duration(s.WaitSplitRegionTimeout) * time.Second +} + +// CleanBuffers cleans the temporary bufs +func (s *SessionVars) CleanBuffers() { + s.GetWriteStmtBufs().clean() +} + +// AllocPlanColumnID allocates column id for plan. +func (s *SessionVars) AllocPlanColumnID() int64 { + s.PlanColumnID++ + return s.PlanColumnID +} + +// GetCharsetInfo gets charset and collation for current context. +// What character set should the server translate a statement to after receiving it? +// For this, the server uses the character_set_connection and collation_connection system variables. +// It converts statements sent by the client from character_set_client to character_set_connection +// (except for string literals that have an introducer such as _latin1 or _utf8). +// collation_connection is important for comparisons of literal strings. +// For comparisons of strings with column values, collation_connection does not matter because columns +// have their own collation, which has a higher collation precedence. +// See https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html +func (s *SessionVars) GetCharsetInfo() (charset, collation string) { + charset = s.systems[CharacterSetConnection] + collation = s.systems[CollationConnection] + return +} + +// SetLastInsertID saves the last insert id to the session context. +// TODO: we may store the result for last_insert_id sys var later. +func (s *SessionVars) SetLastInsertID(insertID uint64) { + s.StmtCtx.LastInsertID = insertID +} + +// SetStatusFlag sets the session server status variable. +// If on is ture sets the flag in session status, +// otherwise removes the flag. +func (s *SessionVars) SetStatusFlag(flag uint16, on bool) { + if on { + s.Status |= flag + return + } + s.Status &= ^flag +} + +// GetStatusFlag gets the session server status variable, returns true if it is on. +func (s *SessionVars) GetStatusFlag(flag uint16) bool { + return s.Status&flag > 0 +} + +// InTxn returns if the session is in transaction. +func (s *SessionVars) InTxn() bool { + return s.GetStatusFlag(mysql.ServerStatusInTrans) +} + +// IsAutocommit returns if the session is set to autocommit. +func (s *SessionVars) IsAutocommit() bool { + return s.GetStatusFlag(mysql.ServerStatusAutocommit) +} + +// Location returns the value of time_zone session variable. If it is nil, then return time.Local. +func (s *SessionVars) Location() *time.Location { + loc := s.TimeZone + if loc == nil { + loc = time.Local + } + return loc +} + +// GetSystemVar gets the string value of a system variable. +func (s *SessionVars) GetSystemVar(name string) (string, bool) { + if name == WarningCount { + return strconv.Itoa(s.SysWarningCount), true + } else if name == ErrorCount { + return strconv.Itoa(int(s.SysErrorCount)), true + } + val, ok := s.systems[name] + return val, ok +} + +func (s *SessionVars) setDDLReorgPriority(val string) { + val = strings.ToLower(val) + switch val { + case "priority_low": + s.DDLReorgPriority = kv.PriorityLow + case "priority_normal": + s.DDLReorgPriority = kv.PriorityNormal + case "priority_high": + s.DDLReorgPriority = kv.PriorityHigh + default: + s.DDLReorgPriority = kv.PriorityLow + } +} + +// SetSystemVar sets the value of a system variable. +func (s *SessionVars) SetSystemVar(name string, val string) error { + switch name { + case TxnIsolationOneShot: + switch val { + case "SERIALIZABLE", "READ-UNCOMMITTED": + skipIsolationLevelCheck, err := GetSessionSystemVar(s, TiDBSkipIsolationLevelCheck) + returnErr := ErrUnsupportedIsolationLevel.GenWithStackByArgs(val) + if err != nil { + returnErr = err + } + if !TiDBOptOn(skipIsolationLevelCheck) || err != nil { + return returnErr + } + //SET TRANSACTION ISOLATION LEVEL will affect two internal variables: + // 1. tx_isolation + // 2. transaction_isolation + // The following if condition is used to deduplicate two same warnings. + if name == "transaction_isolation" { + s.StmtCtx.AppendWarning(returnErr) + } + } + s.TxnIsolationLevelOneShot.State = 1 + s.TxnIsolationLevelOneShot.Value = val + case SQLModeVar: + val = mysql.FormatSQLModeStr(val) + // Modes is a list of different modes separated by commas. + sqlMode, err2 := mysql.GetSQLMode(val) + if err2 != nil { + return errors.Trace(err2) + } + s.StrictSQLMode = sqlMode.HasStrictMode() + s.SQLMode = sqlMode + s.SetStatusFlag(mysql.ServerStatusNoBackslashEscaped, sqlMode.HasNoBackslashEscapesMode()) + case AutoCommit: + isAutocommit := TiDBOptOn(val) + s.SetStatusFlag(mysql.ServerStatusAutocommit, isAutocommit) + if isAutocommit { + s.SetStatusFlag(mysql.ServerStatusInTrans, false) + } + case MaxExecutionTime: + timeoutMS := tidbOptPositiveInt32(val, 0) + s.MaxExecutionTime = uint64(timeoutMS) + case TiDBSkipUTF8Check: + s.SkipUTF8Check = TiDBOptOn(val) + case TiDBOptAggPushDown: + s.AllowAggPushDown = TiDBOptOn(val) + case TiDBOptWriteRowID: + s.AllowWriteRowID = TiDBOptOn(val) + case TiDBOptInSubqToJoinAndAgg: + s.SetAllowInSubqToJoinAndAgg(TiDBOptOn(val)) + case TiDBOptCorrelationThreshold: + s.CorrelationThreshold = tidbOptFloat64(val, DefOptCorrelationThreshold) + case TiDBOptCorrelationExpFactor: + s.CorrelationExpFactor = int(tidbOptInt64(val, DefOptCorrelationExpFactor)) + case TiDBOptCPUFactor: + s.CPUFactor = tidbOptFloat64(val, DefOptCPUFactor) + case TiDBOptCopCPUFactor: + s.CopCPUFactor = tidbOptFloat64(val, DefOptCopCPUFactor) + case TiDBOptNetworkFactor: + s.NetworkFactor = tidbOptFloat64(val, DefOptNetworkFactor) + case TiDBOptScanFactor: + s.ScanFactor = tidbOptFloat64(val, DefOptScanFactor) + case TiDBOptDescScanFactor: + s.DescScanFactor = tidbOptFloat64(val, DefOptDescScanFactor) + case TiDBOptSeekFactor: + s.SeekFactor = tidbOptFloat64(val, DefOptSeekFactor) + case TiDBOptMemoryFactor: + s.MemoryFactor = tidbOptFloat64(val, DefOptMemoryFactor) + case TiDBOptDiskFactor: + s.DiskFactor = tidbOptFloat64(val, DefOptDiskFactor) + case TiDBOptConcurrencyFactor: + s.ConcurrencyFactor = tidbOptFloat64(val, DefOptConcurrencyFactor) + case TiDBIndexLookupConcurrency: + s.IndexLookupConcurrency = tidbOptPositiveInt32(val, DefIndexLookupConcurrency) + case TiDBIndexLookupJoinConcurrency: + s.IndexLookupJoinConcurrency = tidbOptPositiveInt32(val, DefIndexLookupJoinConcurrency) + case TiDBIndexLookupSize: + s.IndexLookupSize = tidbOptPositiveInt32(val, DefIndexLookupSize) + case TiDBHashJoinConcurrency: + s.HashJoinConcurrency = tidbOptPositiveInt32(val, DefTiDBHashJoinConcurrency) + case TiDBProjectionConcurrency: + s.ProjectionConcurrency = tidbOptInt64(val, DefTiDBProjectionConcurrency) + case TiDBHashAggPartialConcurrency: + s.HashAggPartialConcurrency = tidbOptPositiveInt32(val, DefTiDBHashAggPartialConcurrency) + case TiDBHashAggFinalConcurrency: + s.HashAggFinalConcurrency = tidbOptPositiveInt32(val, DefTiDBHashAggFinalConcurrency) + case TiDBDistSQLScanConcurrency: + s.DistSQLScanConcurrency = tidbOptPositiveInt32(val, DefDistSQLScanConcurrency) + case TiDBIndexSerialScanConcurrency: + s.IndexSerialScanConcurrency = tidbOptPositiveInt32(val, DefIndexSerialScanConcurrency) + case TiDBBackoffLockFast: + s.KVVars.BackoffLockFast = tidbOptPositiveInt32(val, kv.DefBackoffLockFast) + case TiDBBackOffWeight: + s.KVVars.BackOffWeight = tidbOptPositiveInt32(val, kv.DefBackOffWeight) + case TiDBConstraintCheckInPlace: + s.ConstraintCheckInPlace = TiDBOptOn(val) + case TiDBCurrentTS, TiDBConfig: + return ErrReadOnly + case TiDBMaxChunkSize: + s.MaxChunkSize = tidbOptPositiveInt32(val, DefMaxChunkSize) + case TiDBInitChunkSize: + s.InitChunkSize = tidbOptPositiveInt32(val, DefInitChunkSize) + case TiDBGeneralLog: + atomic.StoreUint32(&ProcessGeneralLog, uint32(tidbOptPositiveInt32(val, DefTiDBGeneralLog))) + case TiDBEnableCascadesPlanner: + s.EnableCascadesPlanner = TiDBOptOn(val) + case TiDBDDLReorgPriority: + s.setDDLReorgPriority(val) + case TiDBEnableRadixJoin: + s.EnableRadixJoin = TiDBOptOn(val) + case TiDBEnableVectorizedExpression: + s.EnableVectorizedExpression = TiDBOptOn(val) + case TiDBOptJoinReorderThreshold: + s.TiDBOptJoinReorderThreshold = tidbOptPositiveInt32(val, DefTiDBOptJoinReorderThreshold) + case TiDBSlowQueryFile: + s.SlowQueryFile = val + case TiDBWaitSplitRegionFinish: + s.WaitSplitRegionFinish = TiDBOptOn(val) + case TiDBWaitSplitRegionTimeout: + s.WaitSplitRegionTimeout = uint64(tidbOptPositiveInt32(val, DefWaitSplitRegionTimeout)) + case TiDBEnableNoopFuncs: + s.EnableNoopFuncs = TiDBOptOn(val) + case TiDBReplicaRead: + if strings.EqualFold(val, "follower") { + s.SetReplicaRead(kv.ReplicaReadFollower) + } else if strings.EqualFold(val, "leader") || len(val) == 0 { + s.SetReplicaRead(kv.ReplicaReadLeader) + } + case TiDBAllowRemoveAutoInc: + s.AllowRemoveAutoInc = TiDBOptOn(val) + // It's a global variable, but it also wants to be cached in server. + case TiDBMaxDeltaSchemaCount: + SetMaxDeltaSchemaCount(tidbOptInt64(val, DefTiDBMaxDeltaSchemaCount)) + } + s.systems[name] = val + return nil +} + +// SetLocalSystemVar sets values of the local variables which in "server" scope. +func SetLocalSystemVar(name string, val string) { + switch name { + case TiDBDDLReorgWorkerCount: + SetDDLReorgWorkerCounter(int32(tidbOptPositiveInt32(val, DefTiDBDDLReorgWorkerCount))) + case TiDBDDLReorgBatchSize: + SetDDLReorgBatchSize(int32(tidbOptPositiveInt32(val, DefTiDBDDLReorgBatchSize))) + case TiDBDDLErrorCountLimit: + SetDDLErrorCountLimit(tidbOptInt64(val, DefTiDBDDLErrorCountLimit)) + } +} + +// special session variables. +const ( + SQLModeVar = "sql_mode" + CharacterSetResults = "character_set_results" + MaxAllowedPacket = "max_allowed_packet" + TimeZone = "time_zone" + TxnIsolation = "tx_isolation" + TransactionIsolation = "transaction_isolation" + TxnIsolationOneShot = "tx_isolation_one_shot" + MaxExecutionTime = "max_execution_time" +) + +// these variables are useless for TiDB, but still need to validate their values for some compatible issues. +// TODO: some more variables need to be added here. +const ( + serverReadOnly = "read_only" +) + +var ( + // TxIsolationNames are the valid values of the variable "tx_isolation" or "transaction_isolation". + TxIsolationNames = map[string]struct{}{ + "READ-UNCOMMITTED": {}, + "READ-COMMITTED": {}, + "REPEATABLE-READ": {}, + "SERIALIZABLE": {}, + } +) + +// TableDelta stands for the changed count for one table. +type TableDelta struct { + Delta int64 + Count int64 + ColSize map[int64]int64 + InitTime time.Time // InitTime is the time that this delta is generated. +} + +// Concurrency defines concurrency values. +type Concurrency struct { + // IndexLookupConcurrency is the number of concurrent index lookup worker. + IndexLookupConcurrency int + + // IndexLookupJoinConcurrency is the number of concurrent index lookup join inner worker. + IndexLookupJoinConcurrency int + + // DistSQLScanConcurrency is the number of concurrent dist SQL scan worker. + DistSQLScanConcurrency int + + // HashJoinConcurrency is the number of concurrent hash join outer worker. + HashJoinConcurrency int + + // ProjectionConcurrency is the number of concurrent projection worker. + ProjectionConcurrency int64 + + // HashAggPartialConcurrency is the number of concurrent hash aggregation partial worker. + HashAggPartialConcurrency int + + // HashAggFinalConcurrency is the number of concurrent hash aggregation final worker. + HashAggFinalConcurrency int + + // IndexSerialScanConcurrency is the number of concurrent index serial scan worker. + IndexSerialScanConcurrency int +} + +// BatchSize defines batch size values. +type BatchSize struct { + + // IndexLookupSize is the number of handles for an index lookup task in index double read executor. + IndexLookupSize int + + // InitChunkSize defines init row count of a Chunk during query execution. + InitChunkSize int + + // MaxChunkSize defines max row count of a Chunk during query execution. + MaxChunkSize int +} diff --git a/sessionctx/variable/session_test.go b/sessionctx/variable/session_test.go new file mode 100644 index 0000000..72a045e --- /dev/null +++ b/sessionctx/variable/session_test.go @@ -0,0 +1,80 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package variable_test + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/util/mock" +) + +var _ = Suite(&testSessionSuite{}) + +type testSessionSuite struct { +} + +func (*testSessionSuite) TestSession(c *C) { + ctx := mock.NewContext() + + ss := ctx.GetSessionVars().StmtCtx + c.Assert(ss, NotNil) + + // For AffectedRows + ss.AddAffectedRows(1) + c.Assert(ss.AffectedRows(), Equals, uint64(1)) + ss.AddAffectedRows(1) + c.Assert(ss.AffectedRows(), Equals, uint64(2)) + + // For RecordRows + ss.AddRecordRows(1) + c.Assert(ss.RecordRows(), Equals, uint64(1)) + ss.AddRecordRows(1) + c.Assert(ss.RecordRows(), Equals, uint64(2)) + + // For FoundRows + ss.AddFoundRows(1) + c.Assert(ss.FoundRows(), Equals, uint64(1)) + ss.AddFoundRows(1) + c.Assert(ss.FoundRows(), Equals, uint64(2)) + + // For UpdatedRows + ss.AddUpdatedRows(1) + c.Assert(ss.UpdatedRows(), Equals, uint64(1)) + ss.AddUpdatedRows(1) + c.Assert(ss.UpdatedRows(), Equals, uint64(2)) + + // For TouchedRows + ss.AddTouchedRows(1) + c.Assert(ss.TouchedRows(), Equals, uint64(1)) + ss.AddTouchedRows(1) + c.Assert(ss.TouchedRows(), Equals, uint64(2)) + + // For CopiedRows + ss.AddCopiedRows(1) + c.Assert(ss.CopiedRows(), Equals, uint64(1)) + ss.AddCopiedRows(1) + c.Assert(ss.CopiedRows(), Equals, uint64(2)) + + // For last insert id + ctx.GetSessionVars().SetLastInsertID(1) + c.Assert(ctx.GetSessionVars().StmtCtx.LastInsertID, Equals, uint64(1)) + + ss.ResetForRetry() + c.Assert(ss.AffectedRows(), Equals, uint64(0)) + c.Assert(ss.FoundRows(), Equals, uint64(0)) + c.Assert(ss.UpdatedRows(), Equals, uint64(0)) + c.Assert(ss.RecordRows(), Equals, uint64(0)) + c.Assert(ss.TouchedRows(), Equals, uint64(0)) + c.Assert(ss.CopiedRows(), Equals, uint64(0)) + c.Assert(ss.WarningCount(), Equals, uint16(0)) +} diff --git a/sessionctx/variable/statusvar.go b/sessionctx/variable/statusvar.go new file mode 100644 index 0000000..3125019 --- /dev/null +++ b/sessionctx/variable/statusvar.go @@ -0,0 +1,176 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package variable + +import ( + "bytes" + "crypto/tls" + "sync" +) + +var statisticsList []Statistics +var statisticsListLock sync.RWMutex + +// DefaultStatusVarScopeFlag is the default scope of status variables. +var DefaultStatusVarScopeFlag = ScopeGlobal | ScopeSession + +// StatusVal is the value of the corresponding status variable. +type StatusVal struct { + Scope ScopeFlag + Value interface{} +} + +// Statistics is the interface of statistics. +type Statistics interface { + // GetScope gets the status variables scope. + GetScope(status string) ScopeFlag + // Stats returns the statistics status variables. + Stats(*SessionVars) (map[string]interface{}, error) +} + +// RegisterStatistics registers statistics. +func RegisterStatistics(s Statistics) { + statisticsListLock.Lock() + statisticsList = append(statisticsList, s) + statisticsListLock.Unlock() +} + +// GetStatusVars gets registered statistics status variables. +// TODO: Refactor this function to avoid repeated memory allocation / dealloc +func GetStatusVars(vars *SessionVars) (map[string]*StatusVal, error) { + statusVars := make(map[string]*StatusVal) + statisticsListLock.RLock() + defer statisticsListLock.RUnlock() + + for _, statistics := range statisticsList { + vals, err := statistics.Stats(vars) + if err != nil { + return nil, err + } + + for name, val := range vals { + scope := statistics.GetScope(name) + statusVars[name] = &StatusVal{Value: val, Scope: scope} + } + } + + return statusVars, nil +} + +// Taken from https://golang.org/pkg/crypto/tls/#pkg-constants . +var tlsCiphers = []uint16{ + tls.TLS_RSA_WITH_RC4_128_SHA, + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_CBC_SHA256, + tls.TLS_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, +} + +var tlsSupportedCiphers string + +// Taken from https://github.com/openssl/openssl/blob/c784a838e0947fcca761ee62def7d077dc06d37f/include/openssl/ssl.h#L141 . +var tlsVersionString = map[uint16]string{ + tls.VersionSSL30: "SSLv3", + tls.VersionTLS10: "TLSv1", + tls.VersionTLS11: "TLSv1.1", + tls.VersionTLS12: "TLSv1.2", +} + +// Taken from https://testssl.sh/openssl-rfc.mapping.html . +var tlsCipherString = map[uint16]string{ + tls.TLS_RSA_WITH_RC4_128_SHA: "RC4-SHA", + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "DES-CBC3-SHA", + tls.TLS_RSA_WITH_AES_128_CBC_SHA: "AES128-SHA", + tls.TLS_RSA_WITH_AES_256_CBC_SHA: "AES256-SHA", + tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "AES128-SHA256", + tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "AES128-GCM-SHA256", + tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "AES256-GCM-SHA384", + tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "ECDHE-ECDSA-RC4-SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "ECDHE-ECDSA-AES128-SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "ECDHE-ECDSA-AES256-SHA", + tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "ECDHE-RSA-RC4-SHA", + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "ECDHE-RSA-DES-CBC3-SHA", + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "ECDHE-RSA-AES128-SHA", + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "ECDHE-RSA-AES256-SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "ECDHE-ECDSA-AES128-SHA256", + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "ECDHE-RSA-AES128-SHA256", + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "ECDHE-RSA-AES128-GCM-SHA256", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "ECDHE-ECDSA-AES128-GCM-SHA256", + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "ECDHE-RSA-AES256-GCM-SHA384", + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "ECDHE-ECDSA-AES256-GCM-SHA384", + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "ECDHE-RSA-CHACHA20-POLY1305", + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "ECDHE-ECDSA-CHACHA20-POLY1305", +} + +var defaultStatus = map[string]*StatusVal{ + "Ssl_cipher": {ScopeGlobal | ScopeSession, ""}, + "Ssl_cipher_list": {ScopeGlobal | ScopeSession, ""}, + "Ssl_verify_mode": {ScopeGlobal | ScopeSession, 0}, + "Ssl_version": {ScopeGlobal | ScopeSession, ""}, +} + +type defaultStatusStat struct { +} + +func (s defaultStatusStat) GetScope(status string) ScopeFlag { + return defaultStatus[status].Scope +} + +func (s defaultStatusStat) Stats(vars *SessionVars) (map[string]interface{}, error) { + statusVars := make(map[string]interface{}) + + for name, v := range defaultStatus { + statusVars[name] = v.Value + } + + // `vars` may be nil in unit tests. + if vars != nil && vars.TLSConnectionState != nil { + statusVars["Ssl_cipher"] = tlsCipherString[vars.TLSConnectionState.CipherSuite] + statusVars["Ssl_cipher_list"] = tlsSupportedCiphers + // tls.VerifyClientCertIfGiven == SSL_VERIFY_PEER | SSL_VERIFY_CLIENT_ONCE + statusVars["Ssl_verify_mode"] = 0x01 | 0x04 + statusVars["Ssl_version"] = tlsVersionString[vars.TLSConnectionState.Version] + } + + return statusVars, nil +} + +func init() { + var ciphersBuffer bytes.Buffer + for _, v := range tlsCiphers { + ciphersBuffer.WriteString(tlsCipherString[v]) + ciphersBuffer.WriteString(":") + } + tlsSupportedCiphers = ciphersBuffer.String() + + var stat defaultStatusStat + RegisterStatistics(stat) +} diff --git a/sessionctx/variable/statusvar_test.go b/sessionctx/variable/statusvar_test.go new file mode 100644 index 0000000..5e00500 --- /dev/null +++ b/sessionctx/variable/statusvar_test.go @@ -0,0 +1,72 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package variable + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/util/testleak" +) + +var _ = Suite(&testStatusVarSuite{}) + +type testStatusVarSuite struct { + ms *mockStatistics +} + +func (s *testStatusVarSuite) SetUpSuite(c *C) { + s.ms = &mockStatistics{} + RegisterStatistics(s.ms) +} + +// mockStatistics represents mocked statistics. +type mockStatistics struct{} + +const ( + testStatus = "test_status" + testSessionStatus = "test_session_status" + testStatusVal = "test_status_val" +) + +var specificStatusScopes = map[string]ScopeFlag{ + testSessionStatus: ScopeSession, +} + +func (ms *mockStatistics) GetScope(status string) ScopeFlag { + scope, ok := specificStatusScopes[status] + if !ok { + return DefaultStatusVarScopeFlag + } + + return scope +} + +func (ms *mockStatistics) Stats(vars *SessionVars) (map[string]interface{}, error) { + m := make(map[string]interface{}, len(specificStatusScopes)) + m[testStatus] = testStatusVal + + return m, nil +} + +func (s *testStatusVarSuite) TestStatusVar(c *C) { + defer testleak.AfterTest(c)() + scope := s.ms.GetScope(testStatus) + c.Assert(scope, Equals, DefaultStatusVarScopeFlag) + scope = s.ms.GetScope(testSessionStatus) + c.Assert(scope, Equals, ScopeSession) + + vars, err := GetStatusVars(nil) + c.Assert(err, IsNil) + v := &StatusVal{Scope: DefaultStatusVarScopeFlag, Value: testStatusVal} + c.Assert(v, DeepEquals, vars[testStatus]) +} diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go new file mode 100644 index 0000000..70ecb86 --- /dev/null +++ b/sessionctx/variable/sysvar.go @@ -0,0 +1,943 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package variable + +import ( + "strconv" + "strings" + + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" +) + +// ScopeFlag is for system variable whether can be changed in global/session dynamically or not. +type ScopeFlag uint8 + +const ( + // ScopeNone means the system variable can not be changed dynamically. + ScopeNone ScopeFlag = 0 + // ScopeGlobal means the system variable can be changed globally. + ScopeGlobal ScopeFlag = 1 << 0 + // ScopeSession means the system variable can only be changed in current session. + ScopeSession ScopeFlag = 1 << 1 +) + +// SysVar is for system variable. +type SysVar struct { + // Scope is for whether can be changed or not + Scope ScopeFlag + + // Name is the variable name. + Name string + + // Value is the variable value. + Value string +} + +// SysVars is global sys vars map. +var SysVars map[string]*SysVar + +// GetSysVar returns sys var info for name as key. +func GetSysVar(name string) *SysVar { + name = strings.ToLower(name) + return SysVars[name] +} + +// Variable errors +var ( + ErrUnsupportedValueForVar = terror.ClassVariable.New(mysql.ErrUnsupportedValueForVar, mysql.MySQLErrName[mysql.ErrUnsupportedValueForVar]) + ErrUnknownSystemVar = terror.ClassVariable.New(mysql.ErrUnknownSystemVariable, mysql.MySQLErrName[mysql.ErrUnknownSystemVariable]) + ErrIncorrectScope = terror.ClassVariable.New(mysql.ErrIncorrectGlobalLocalVar, mysql.MySQLErrName[mysql.ErrIncorrectGlobalLocalVar]) + ErrUnknownTimeZone = terror.ClassVariable.New(mysql.ErrUnknownTimeZone, mysql.MySQLErrName[mysql.ErrUnknownTimeZone]) + ErrReadOnly = terror.ClassVariable.New(mysql.ErrVariableIsReadonly, mysql.MySQLErrName[mysql.ErrVariableIsReadonly]) + ErrWrongValueForVar = terror.ClassVariable.New(mysql.ErrWrongValueForVar, mysql.MySQLErrName[mysql.ErrWrongValueForVar]) + ErrWrongTypeForVar = terror.ClassVariable.New(mysql.ErrWrongTypeForVar, mysql.MySQLErrName[mysql.ErrWrongTypeForVar]) + ErrTruncatedWrongValue = terror.ClassVariable.New(mysql.ErrTruncatedWrongValue, mysql.MySQLErrName[mysql.ErrTruncatedWrongValue]) + ErrMaxPreparedStmtCountReached = terror.ClassVariable.New(mysql.ErrMaxPreparedStmtCountReached, mysql.MySQLErrName[mysql.ErrMaxPreparedStmtCountReached]) + ErrUnsupportedIsolationLevel = terror.ClassVariable.New(mysql.ErrUnsupportedIsolationLevel, mysql.MySQLErrName[mysql.ErrUnsupportedIsolationLevel]) +) + +func init() { + SysVars = make(map[string]*SysVar) + for _, v := range defaultSysVars { + SysVars[v.Name] = v + } + initSynonymsSysVariables() + + // Register terror to mysql error map. + mySQLErrCodes := map[terror.ErrCode]uint16{ + mysql.ErrCantGetValidID: mysql.ErrCantGetValidID, + mysql.ErrCantSetToNull: mysql.ErrCantSetToNull, + mysql.ErrSnapshotTooOld: mysql.ErrSnapshotTooOld, + mysql.ErrUnsupportedValueForVar: mysql.ErrUnsupportedValueForVar, + mysql.ErrUnknownSystemVariable: mysql.ErrUnknownSystemVariable, + mysql.ErrIncorrectGlobalLocalVar: mysql.ErrIncorrectGlobalLocalVar, + mysql.ErrUnknownTimeZone: mysql.ErrUnknownTimeZone, + mysql.ErrVariableIsReadonly: mysql.ErrVariableIsReadonly, + mysql.ErrWrongValueForVar: mysql.ErrWrongValueForVar, + mysql.ErrWrongTypeForVar: mysql.ErrWrongTypeForVar, + mysql.ErrTruncatedWrongValue: mysql.ErrTruncatedWrongValue, + mysql.ErrMaxPreparedStmtCountReached: mysql.ErrMaxPreparedStmtCountReached, + mysql.ErrUnsupportedIsolationLevel: mysql.ErrUnsupportedIsolationLevel, + } + terror.ErrClassToMySQLCodes[terror.ClassVariable] = mySQLErrCodes +} + +// BoolToIntStr converts bool to int string, for example "0" or "1". +func BoolToIntStr(b bool) string { + if b { + return "1" + } + return "0" +} + +// BoolToInt32 converts bool to int32 +func BoolToInt32(b bool) int32 { + if b { + return 1 + } + return 0 +} + +// we only support MySQL now +var defaultSysVars = []*SysVar{ + {ScopeGlobal, "gtid_mode", "OFF"}, + {ScopeGlobal, FlushTime, "0"}, + {ScopeSession, PseudoSlaveMode, ""}, + {ScopeNone, "performance_schema_max_mutex_classes", "200"}, + {ScopeGlobal | ScopeSession, LowPriorityUpdates, "0"}, + {ScopeGlobal | ScopeSession, SessionTrackGtids, "OFF"}, + {ScopeGlobal | ScopeSession, "ndbinfo_max_rows", ""}, + {ScopeGlobal | ScopeSession, "ndb_index_stat_option", ""}, + {ScopeGlobal | ScopeSession, OldPasswords, "0"}, + {ScopeNone, "innodb_version", "5.6.25"}, + {ScopeGlobal, MaxConnections, "151"}, + {ScopeGlobal | ScopeSession, BigTables, "0"}, + {ScopeNone, "skip_external_locking", "1"}, + {ScopeGlobal, "slave_pending_jobs_size_max", "16777216"}, + {ScopeNone, "innodb_sync_array_size", "1"}, + {ScopeSession, "rand_seed2", ""}, + {ScopeGlobal, ValidatePasswordCheckUserName, "0"}, + {ScopeGlobal, "validate_password_number_count", "1"}, + {ScopeSession, "gtid_next", ""}, + {ScopeGlobal | ScopeSession, SQLSelectLimit, "18446744073709551615"}, + {ScopeGlobal, "ndb_show_foreign_key_mock_tables", ""}, + {ScopeNone, "multi_range_count", "256"}, + {ScopeGlobal | ScopeSession, DefaultWeekFormat, "0"}, + {ScopeGlobal | ScopeSession, "binlog_error_action", "IGNORE_ERROR"}, + {ScopeGlobal, "slave_transaction_retries", "10"}, + {ScopeGlobal | ScopeSession, "default_storage_engine", "InnoDB"}, + {ScopeNone, "ft_query_expansion_limit", "20"}, + {ScopeGlobal, MaxConnectErrors, "100"}, + {ScopeGlobal, SyncBinlog, "0"}, + {ScopeNone, "max_digest_length", "1024"}, + {ScopeNone, "innodb_force_load_corrupted", "0"}, + {ScopeNone, "performance_schema_max_table_handles", "4000"}, + {ScopeGlobal, InnodbFastShutdown, "1"}, + {ScopeNone, "ft_max_word_len", "84"}, + {ScopeGlobal, "log_backward_compatible_user_definitions", ""}, + {ScopeNone, "lc_messages_dir", "/usr/local/mysql-5.6.25-osx10.8-x86_64/share/"}, + {ScopeGlobal, "ft_boolean_syntax", "+ -><()~*:\"\"&|"}, + {ScopeGlobal, TableDefinitionCache, "-1"}, + {ScopeNone, SkipNameResolve, "0"}, + {ScopeNone, "performance_schema_max_file_handles", "32768"}, + {ScopeSession, "transaction_allow_batching", ""}, + {ScopeGlobal | ScopeSession, SQLModeVar, mysql.DefaultSQLMode}, + {ScopeNone, "performance_schema_max_statement_classes", "168"}, + {ScopeGlobal, "server_id", "0"}, + {ScopeGlobal, "innodb_flushing_avg_loops", "30"}, + {ScopeGlobal | ScopeSession, TmpTableSize, "16777216"}, + {ScopeGlobal, "innodb_max_purge_lag", "0"}, + {ScopeGlobal | ScopeSession, "preload_buffer_size", "32768"}, + {ScopeGlobal, "slave_checkpoint_period", "300"}, + {ScopeGlobal, CheckProxyUsers, "0"}, + {ScopeNone, "have_query_cache", "YES"}, + {ScopeGlobal, "innodb_flush_log_at_timeout", "1"}, + {ScopeGlobal, "innodb_max_undo_log_size", ""}, + {ScopeGlobal | ScopeSession, "range_alloc_block_size", "4096"}, + {ScopeGlobal, ConnectTimeout, "10"}, + {ScopeGlobal | ScopeSession, MaxExecutionTime, "0"}, + {ScopeGlobal | ScopeSession, CollationServer, mysql.DefaultCollationName}, + {ScopeNone, "have_rtree_keys", "YES"}, + {ScopeGlobal, "innodb_old_blocks_pct", "37"}, + {ScopeGlobal, "innodb_file_format", "Antelope"}, + {ScopeGlobal, "innodb_compression_failure_threshold_pct", "5"}, + {ScopeNone, "performance_schema_events_waits_history_long_size", "10000"}, + {ScopeGlobal, "innodb_checksum_algorithm", "innodb"}, + {ScopeNone, "innodb_ft_sort_pll_degree", "2"}, + {ScopeNone, "thread_stack", "262144"}, + {ScopeGlobal, "relay_log_info_repository", "FILE"}, + {ScopeGlobal | ScopeSession, SQLLogBin, "1"}, + {ScopeGlobal, SuperReadOnly, "0"}, + {ScopeGlobal | ScopeSession, "max_delayed_threads", "20"}, + {ScopeNone, "protocol_version", "10"}, + {ScopeGlobal | ScopeSession, "new", "OFF"}, + {ScopeGlobal | ScopeSession, "myisam_sort_buffer_size", "8388608"}, + {ScopeGlobal | ScopeSession, "optimizer_trace_offset", "-1"}, + {ScopeGlobal, InnodbBufferPoolDumpAtShutdown, "0"}, + {ScopeGlobal | ScopeSession, SQLNotes, "1"}, + {ScopeGlobal, InnodbCmpPerIndexEnabled, "0"}, + {ScopeGlobal, "innodb_ft_server_stopword_table", ""}, + {ScopeNone, "performance_schema_max_file_instances", "7693"}, + {ScopeNone, "log_output", "FILE"}, + {ScopeGlobal, "binlog_group_commit_sync_delay", ""}, + {ScopeGlobal, "binlog_group_commit_sync_no_delay_count", ""}, + {ScopeNone, "have_crypt", "YES"}, + {ScopeGlobal, "innodb_log_write_ahead_size", ""}, + {ScopeNone, "innodb_log_group_home_dir", "./"}, + {ScopeNone, "performance_schema_events_statements_history_size", "10"}, + {ScopeGlobal, GeneralLog, "0"}, + {ScopeGlobal, "validate_password_dictionary_file", ""}, + {ScopeGlobal, BinlogOrderCommits, "1"}, + {ScopeGlobal, MasterVerifyChecksum, "0"}, + {ScopeGlobal, "key_cache_division_limit", "100"}, + {ScopeGlobal, "rpl_semi_sync_master_trace_level", ""}, + {ScopeGlobal | ScopeSession, "max_insert_delayed_threads", "20"}, + {ScopeNone, "performance_schema_session_connect_attrs_size", "512"}, + {ScopeGlobal | ScopeSession, "time_zone", "SYSTEM"}, + {ScopeGlobal, "innodb_max_dirty_pages_pct", "75"}, + {ScopeGlobal, InnodbFilePerTable, "1"}, + {ScopeGlobal, InnodbLogCompressedPages, "1"}, + {ScopeGlobal, "master_info_repository", "FILE"}, + {ScopeGlobal, "rpl_stop_slave_timeout", "31536000"}, + {ScopeNone, "skip_networking", "0"}, + {ScopeGlobal, "innodb_monitor_reset", ""}, + {ScopeNone, "have_ssl", "DISABLED"}, + {ScopeNone, "have_openssl", "DISABLED"}, + {ScopeNone, "ssl_ca", ""}, + {ScopeNone, "ssl_cert", ""}, + {ScopeNone, "ssl_key", ""}, + {ScopeNone, "ssl_cipher", ""}, + {ScopeNone, "tls_version", "TLSv1,TLSv1.1,TLSv1.2"}, + {ScopeNone, "system_time_zone", "CST"}, + {ScopeGlobal, InnodbPrintAllDeadlocks, "0"}, + {ScopeNone, "innodb_autoinc_lock_mode", "1"}, + {ScopeGlobal, "slave_net_timeout", "3600"}, + {ScopeGlobal, "key_buffer_size", "8388608"}, + {ScopeGlobal | ScopeSession, ForeignKeyChecks, "OFF"}, + {ScopeGlobal, "host_cache_size", "279"}, + {ScopeGlobal, DelayKeyWrite, "ON"}, + {ScopeNone, "metadata_locks_cache_size", "1024"}, + {ScopeNone, "innodb_force_recovery", "0"}, + {ScopeGlobal, "innodb_file_format_max", "Antelope"}, + {ScopeGlobal | ScopeSession, "debug", ""}, + {ScopeGlobal, "log_warnings", "1"}, + {ScopeGlobal, OfflineMode, "0"}, + {ScopeGlobal | ScopeSession, InnodbStrictMode, "1"}, + {ScopeGlobal, "innodb_rollback_segments", "128"}, + {ScopeGlobal | ScopeSession, "join_buffer_size", "262144"}, + {ScopeNone, "innodb_mirrored_log_groups", "1"}, + {ScopeGlobal, "max_binlog_size", "1073741824"}, + {ScopeGlobal, "sync_master_info", "10000"}, + {ScopeGlobal, "concurrent_insert", "AUTO"}, + {ScopeGlobal, InnodbAdaptiveHashIndex, "1"}, + {ScopeGlobal, InnodbFtEnableStopword, "1"}, + {ScopeGlobal, "general_log_file", "/usr/local/mysql/data/localhost.log"}, + {ScopeGlobal | ScopeSession, InnodbSupportXA, "1"}, + {ScopeGlobal, "innodb_compression_level", "6"}, + {ScopeNone, "innodb_file_format_check", "1"}, + {ScopeNone, "myisam_mmap_size", "18446744073709551615"}, + {ScopeGlobal, "init_slave", ""}, + {ScopeNone, "innodb_buffer_pool_instances", "8"}, + {ScopeGlobal | ScopeSession, BlockEncryptionMode, "aes-128-ecb"}, + {ScopeGlobal | ScopeSession, "max_length_for_sort_data", "1024"}, + {ScopeNone, "character_set_system", "utf8"}, + {ScopeGlobal | ScopeSession, InteractiveTimeout, "28800"}, + {ScopeGlobal, InnodbOptimizeFullTextOnly, "0"}, + {ScopeNone, "character_sets_dir", "/usr/local/mysql-5.6.25-osx10.8-x86_64/share/charsets/"}, + {ScopeGlobal | ScopeSession, QueryCacheType, "OFF"}, + {ScopeNone, "innodb_rollback_on_timeout", "0"}, + {ScopeGlobal | ScopeSession, "query_alloc_block_size", "8192"}, + {ScopeGlobal, SlaveCompressedProtocol, "0"}, + {ScopeGlobal | ScopeSession, InitConnect, ""}, + {ScopeGlobal, "rpl_semi_sync_slave_trace_level", ""}, + {ScopeNone, "have_compress", "YES"}, + {ScopeNone, "thread_concurrency", "10"}, + {ScopeGlobal | ScopeSession, "query_prealloc_size", "8192"}, + {ScopeNone, "relay_log_space_limit", "0"}, + {ScopeGlobal | ScopeSession, MaxUserConnections, "0"}, + {ScopeNone, "performance_schema_max_thread_classes", "50"}, + {ScopeGlobal, "innodb_api_trx_level", "0"}, + {ScopeNone, "disconnect_on_expired_password", "1"}, + {ScopeNone, "performance_schema_max_file_classes", "50"}, + {ScopeGlobal, "expire_logs_days", "0"}, + {ScopeGlobal | ScopeSession, BinlogRowQueryLogEvents, "0"}, + {ScopeGlobal, "default_password_lifetime", ""}, + {ScopeNone, "pid_file", "/usr/local/mysql/data/localhost.pid"}, + {ScopeNone, "innodb_undo_tablespaces", "0"}, + {ScopeGlobal, InnodbStatusOutputLocks, "0"}, + {ScopeNone, "performance_schema_accounts_size", "100"}, + {ScopeGlobal | ScopeSession, "max_error_count", "64"}, + {ScopeGlobal, "max_write_lock_count", "18446744073709551615"}, + {ScopeNone, "performance_schema_max_socket_instances", "322"}, + {ScopeNone, "performance_schema_max_table_instances", "12500"}, + {ScopeGlobal, "innodb_stats_persistent_sample_pages", "20"}, + {ScopeGlobal, "show_compatibility_56", ""}, + {ScopeGlobal, LogSlowSlaveStatements, "0"}, + {ScopeNone, "innodb_open_files", "2000"}, + {ScopeGlobal, "innodb_spin_wait_delay", "6"}, + {ScopeGlobal, "thread_cache_size", "9"}, + {ScopeGlobal, LogSlowAdminStatements, "0"}, + {ScopeNone, "innodb_checksums", "ON"}, + {ScopeNone, "hostname", ServerHostname}, + {ScopeGlobal | ScopeSession, "auto_increment_offset", "1"}, + {ScopeNone, "ft_stopword_file", "(built-in)"}, + {ScopeGlobal, "innodb_max_dirty_pages_pct_lwm", "0"}, + {ScopeGlobal, LogQueriesNotUsingIndexes, "0"}, + {ScopeSession, "timestamp", ""}, + {ScopeGlobal | ScopeSession, QueryCacheWlockInvalidate, "0"}, + {ScopeGlobal | ScopeSession, "sql_buffer_result", "OFF"}, + {ScopeGlobal | ScopeSession, "character_set_filesystem", "binary"}, + {ScopeGlobal | ScopeSession, "collation_database", mysql.DefaultCollationName}, + {ScopeGlobal | ScopeSession, AutoIncrementIncrement, "1"}, + {ScopeGlobal | ScopeSession, "max_heap_table_size", "16777216"}, + {ScopeGlobal | ScopeSession, "div_precision_increment", "4"}, + {ScopeGlobal, "innodb_lru_scan_depth", "1024"}, + {ScopeGlobal, "innodb_purge_rseg_truncate_frequency", ""}, + {ScopeGlobal | ScopeSession, SQLAutoIsNull, "0"}, + {ScopeNone, "innodb_api_enable_binlog", "0"}, + {ScopeGlobal | ScopeSession, "innodb_ft_user_stopword_table", ""}, + {ScopeNone, "server_id_bits", "32"}, + {ScopeGlobal, "innodb_log_checksum_algorithm", ""}, + {ScopeNone, "innodb_buffer_pool_load_at_startup", "1"}, + {ScopeGlobal | ScopeSession, "sort_buffer_size", "262144"}, + {ScopeGlobal, "innodb_flush_neighbors", "1"}, + {ScopeNone, "innodb_use_sys_malloc", "1"}, + {ScopeNone, "performance_schema_max_socket_classes", "10"}, + {ScopeNone, "performance_schema_max_stage_classes", "150"}, + {ScopeGlobal, "innodb_purge_batch_size", "300"}, + {ScopeNone, "have_profiling", "NO"}, + {ScopeGlobal, "slave_checkpoint_group", "512"}, + {ScopeGlobal | ScopeSession, "character_set_client", mysql.DefaultCharset}, + {ScopeNone, "slave_load_tmpdir", "/var/tmp/"}, + {ScopeGlobal, InnodbBufferPoolDumpNow, "0"}, + {ScopeGlobal, RelayLogPurge, "1"}, + {ScopeGlobal, "ndb_distribution", ""}, + {ScopeGlobal, "myisam_data_pointer_size", "6"}, + {ScopeGlobal, "ndb_optimization_delay", ""}, + {ScopeGlobal, "innodb_ft_num_word_optimize", "2000"}, + {ScopeGlobal | ScopeSession, "max_join_size", "18446744073709551615"}, + {ScopeNone, CoreFile, "0"}, + {ScopeGlobal | ScopeSession, "max_seeks_for_key", "18446744073709551615"}, + {ScopeNone, "innodb_log_buffer_size", "8388608"}, + {ScopeGlobal, "delayed_insert_timeout", "300"}, + {ScopeGlobal, "max_relay_log_size", "0"}, + {ScopeGlobal | ScopeSession, MaxSortLength, "1024"}, + {ScopeNone, "metadata_locks_hash_instances", "8"}, + {ScopeGlobal, "ndb_eventbuffer_free_percent", ""}, + {ScopeNone, "large_files_support", "1"}, + {ScopeGlobal, "binlog_max_flush_queue_time", "0"}, + {ScopeGlobal, "innodb_fill_factor", ""}, + {ScopeGlobal, "log_syslog_facility", ""}, + {ScopeNone, "innodb_ft_min_token_size", "3"}, + {ScopeGlobal | ScopeSession, "transaction_write_set_extraction", ""}, + {ScopeGlobal | ScopeSession, "ndb_blob_write_batch_bytes", ""}, + {ScopeGlobal, "automatic_sp_privileges", "1"}, + {ScopeGlobal, "innodb_flush_sync", ""}, + {ScopeNone, "performance_schema_events_statements_history_long_size", "10000"}, + {ScopeGlobal, "innodb_monitor_disable", ""}, + {ScopeNone, "innodb_doublewrite", "1"}, + {ScopeGlobal, "slave_parallel_type", ""}, + {ScopeNone, "log_bin_use_v1_row_events", "0"}, + {ScopeSession, "innodb_optimize_point_storage", ""}, + {ScopeNone, "innodb_api_disable_rowlock", "0"}, + {ScopeGlobal, "innodb_adaptive_flushing_lwm", "10"}, + {ScopeNone, "innodb_log_files_in_group", "2"}, + {ScopeGlobal, InnodbBufferPoolLoadNow, "0"}, + {ScopeNone, "performance_schema_max_rwlock_classes", "40"}, + {ScopeNone, "binlog_gtid_simple_recovery", "1"}, + {ScopeNone, Port, "4000"}, + {ScopeNone, "performance_schema_digests_size", "10000"}, + {ScopeGlobal | ScopeSession, Profiling, "0"}, + {ScopeNone, "lower_case_table_names", "2"}, + {ScopeSession, "rand_seed1", ""}, + {ScopeGlobal, "sha256_password_proxy_users", ""}, + {ScopeGlobal | ScopeSession, SQLQuoteShowCreate, "1"}, + {ScopeGlobal | ScopeSession, "binlogging_impossible_mode", "IGNORE_ERROR"}, + {ScopeGlobal | ScopeSession, QueryCacheSize, "1048576"}, + {ScopeGlobal, "innodb_stats_transient_sample_pages", "8"}, + {ScopeGlobal, InnodbStatsOnMetadata, "0"}, + {ScopeNone, "server_uuid", "00000000-0000-0000-0000-000000000000"}, + {ScopeNone, "open_files_limit", "5000"}, + {ScopeGlobal | ScopeSession, "ndb_force_send", ""}, + {ScopeNone, "skip_show_database", "0"}, + {ScopeGlobal, "log_timestamps", ""}, + {ScopeNone, "version_compile_machine", "x86_64"}, + {ScopeGlobal, "slave_parallel_workers", "0"}, + {ScopeGlobal, "event_scheduler", "OFF"}, + {ScopeGlobal | ScopeSession, "ndb_deferred_constraints", ""}, + {ScopeGlobal, "log_syslog_include_pid", ""}, + {ScopeSession, "last_insert_id", ""}, + {ScopeNone, "innodb_ft_cache_size", "8000000"}, + {ScopeNone, LogBin, "0"}, + {ScopeGlobal, InnodbDisableSortFileCache, "0"}, + {ScopeGlobal, "log_error_verbosity", ""}, + {ScopeNone, "performance_schema_hosts_size", "100"}, + {ScopeGlobal, "innodb_replication_delay", "0"}, + {ScopeGlobal, SlowQueryLog, "0"}, + {ScopeSession, "debug_sync", ""}, + {ScopeGlobal, InnodbStatsAutoRecalc, "1"}, + {ScopeGlobal | ScopeSession, "lc_messages", "en_US"}, + {ScopeGlobal | ScopeSession, "bulk_insert_buffer_size", "8388608"}, + {ScopeGlobal | ScopeSession, BinlogDirectNonTransactionalUpdates, "0"}, + {ScopeGlobal, "innodb_change_buffering", "all"}, + {ScopeGlobal | ScopeSession, SQLBigSelects, "1"}, + {ScopeGlobal | ScopeSession, CharacterSetResults, mysql.DefaultCharset}, + {ScopeGlobal, "innodb_max_purge_lag_delay", "0"}, + {ScopeGlobal | ScopeSession, "session_track_schema", ""}, + {ScopeGlobal, "innodb_io_capacity_max", "2000"}, + {ScopeGlobal, "innodb_autoextend_increment", "64"}, + {ScopeGlobal | ScopeSession, "binlog_format", "STATEMENT"}, + {ScopeGlobal | ScopeSession, "optimizer_trace", "enabled=off,one_line=off"}, + {ScopeGlobal | ScopeSession, "read_rnd_buffer_size", "262144"}, + {ScopeNone, "version_comment", "TiDB Server (Apache License 2.0), MySQL 5.7 compatible"}, + {ScopeGlobal | ScopeSession, NetWriteTimeout, "60"}, + {ScopeGlobal, InnodbBufferPoolLoadAbort, "0"}, + {ScopeGlobal | ScopeSession, TxnIsolation, "REPEATABLE-READ"}, + {ScopeGlobal | ScopeSession, TransactionIsolation, "REPEATABLE-READ"}, + {ScopeGlobal | ScopeSession, "collation_connection", mysql.DefaultCollationName}, + {ScopeGlobal, "rpl_semi_sync_master_timeout", ""}, + {ScopeGlobal | ScopeSession, "transaction_prealloc_size", "4096"}, + {ScopeNone, "slave_skip_errors", "OFF"}, + {ScopeNone, "performance_schema_setup_objects_size", "100"}, + {ScopeGlobal, "sync_relay_log", "10000"}, + {ScopeGlobal, "innodb_ft_result_cache_limit", "2000000000"}, + {ScopeNone, "innodb_sort_buffer_size", "1048576"}, + {ScopeGlobal, "innodb_ft_enable_diag_print", "OFF"}, + {ScopeNone, "thread_handling", "one-thread-per-connection"}, + {ScopeGlobal, "stored_program_cache", "256"}, + {ScopeNone, "performance_schema_max_mutex_instances", "15906"}, + {ScopeGlobal, "innodb_adaptive_max_sleep_delay", "150000"}, + {ScopeNone, "large_pages", "OFF"}, + {ScopeGlobal | ScopeSession, "session_track_system_variables", ""}, + {ScopeGlobal, "innodb_change_buffer_max_size", "25"}, + {ScopeGlobal, LogBinTrustFunctionCreators, "0"}, + {ScopeNone, "innodb_write_io_threads", "4"}, + {ScopeGlobal, "mysql_native_password_proxy_users", ""}, + {ScopeGlobal, serverReadOnly, "0"}, + {ScopeNone, "large_page_size", "0"}, + {ScopeNone, "table_open_cache_instances", "1"}, + {ScopeGlobal, InnodbStatsPersistent, "1"}, + {ScopeGlobal | ScopeSession, "session_track_state_change", ""}, + {ScopeNone, "optimizer_switch", "index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,engine_condition_pushdown=on,index_condition_pushdown=on,mrr=on,mrr_cost_based=on,block_nested_loop=on,batched_key_access=off,materialization=on,semijoin=on,loosescan=on,firstmatch=on,subquery_materialization_cost_based=on,use_index_extensions=on"}, + {ScopeGlobal, "delayed_queue_size", "1000"}, + {ScopeNone, "innodb_read_only", "0"}, + {ScopeNone, "datetime_format", "%Y-%m-%d %H:%i:%s"}, + {ScopeGlobal, "log_syslog", ""}, + {ScopeNone, "version", mysql.ServerVersion}, + {ScopeGlobal | ScopeSession, "transaction_alloc_block_size", "8192"}, + {ScopeGlobal, "sql_slave_skip_counter", "0"}, + {ScopeGlobal, "innodb_large_prefix", "OFF"}, + {ScopeNone, "performance_schema_max_cond_classes", "80"}, + {ScopeGlobal, "innodb_io_capacity", "200"}, + {ScopeGlobal, "max_binlog_cache_size", "18446744073709547520"}, + {ScopeGlobal | ScopeSession, "ndb_index_stat_enable", ""}, + {ScopeGlobal, "executed_gtids_compression_period", ""}, + {ScopeNone, "time_format", "%H:%i:%s"}, + {ScopeGlobal | ScopeSession, OldAlterTable, "0"}, + {ScopeGlobal | ScopeSession, "long_query_time", "10.000000"}, + {ScopeNone, "innodb_use_native_aio", "0"}, + {ScopeGlobal, "log_throttle_queries_not_using_indexes", "0"}, + {ScopeNone, "locked_in_memory", "0"}, + {ScopeNone, "innodb_api_enable_mdl", "0"}, + {ScopeGlobal, "binlog_cache_size", "32768"}, + {ScopeGlobal, "innodb_compression_pad_pct_max", "50"}, + {ScopeGlobal, InnodbCommitConcurrency, "0"}, + {ScopeNone, "ft_min_word_len", "4"}, + {ScopeGlobal, EnforceGtidConsistency, "OFF"}, + {ScopeGlobal, SecureAuth, "1"}, + {ScopeNone, "max_tmp_tables", "32"}, + {ScopeGlobal, InnodbRandomReadAhead, "0"}, + {ScopeGlobal | ScopeSession, UniqueChecks, "1"}, + {ScopeGlobal, "internal_tmp_disk_storage_engine", ""}, + {ScopeGlobal | ScopeSession, "myisam_repair_threads", "1"}, + {ScopeGlobal, "ndb_eventbuffer_max_alloc", ""}, + {ScopeGlobal, "innodb_read_ahead_threshold", "56"}, + {ScopeGlobal, "key_cache_block_size", "1024"}, + {ScopeGlobal, "rpl_semi_sync_slave_enabled", ""}, + {ScopeNone, "ndb_recv_thread_cpu_mask", ""}, + {ScopeGlobal, "gtid_purged", ""}, + {ScopeGlobal, "max_binlog_stmt_cache_size", "18446744073709547520"}, + {ScopeGlobal | ScopeSession, "lock_wait_timeout", "31536000"}, + {ScopeGlobal | ScopeSession, "read_buffer_size", "131072"}, + {ScopeNone, "innodb_read_io_threads", "4"}, + {ScopeGlobal | ScopeSession, MaxSpRecursionDepth, "0"}, + {ScopeNone, "ignore_builtin_innodb", "0"}, + {ScopeGlobal, "rpl_semi_sync_master_enabled", ""}, + {ScopeGlobal, "slow_query_log_file", "/usr/local/mysql/data/localhost-slow.log"}, + {ScopeGlobal, "innodb_thread_sleep_delay", "10000"}, + {ScopeNone, "license", "Apache License 2.0"}, + {ScopeGlobal, "innodb_ft_aux_table", ""}, + {ScopeGlobal | ScopeSession, SQLWarnings, "0"}, + {ScopeGlobal | ScopeSession, KeepFilesOnCreate, "0"}, + {ScopeGlobal, "slave_preserve_commit_order", ""}, + {ScopeNone, "innodb_data_file_path", "ibdata1:12M:autoextend"}, + {ScopeNone, "performance_schema_setup_actors_size", "100"}, + {ScopeNone, "innodb_additional_mem_pool_size", "8388608"}, + {ScopeNone, "log_error", "/usr/local/mysql/data/localhost.err"}, + {ScopeGlobal, "slave_exec_mode", "STRICT"}, + {ScopeGlobal, "binlog_stmt_cache_size", "32768"}, + {ScopeNone, "relay_log_info_file", "relay-log.info"}, + {ScopeNone, "innodb_ft_total_cache_size", "640000000"}, + {ScopeNone, "performance_schema_max_rwlock_instances", "9102"}, + {ScopeGlobal, "table_open_cache", "2000"}, + {ScopeNone, "log_slave_updates", "0"}, + {ScopeNone, "performance_schema_events_stages_history_long_size", "10000"}, + {ScopeGlobal | ScopeSession, AutoCommit, "1"}, + {ScopeSession, "insert_id", ""}, + {ScopeGlobal | ScopeSession, "default_tmp_storage_engine", "InnoDB"}, + {ScopeGlobal | ScopeSession, "optimizer_search_depth", "62"}, + {ScopeGlobal, "max_points_in_geometry", ""}, + {ScopeGlobal, "innodb_stats_sample_pages", "8"}, + {ScopeGlobal | ScopeSession, "profiling_history_size", "15"}, + {ScopeGlobal | ScopeSession, "character_set_database", mysql.DefaultCharset}, + {ScopeNone, "have_symlink", "YES"}, + {ScopeGlobal | ScopeSession, "storage_engine", "InnoDB"}, + {ScopeGlobal | ScopeSession, "sql_log_off", "0"}, + // In MySQL, the default value of `explicit_defaults_for_timestamp` is `0`. + // But In TiDB, it's set to `1` to be consistent with TiDB timestamp behavior. + // See: https://github.com/pingcap/tidb/pull/6068 for details + {ScopeNone, "explicit_defaults_for_timestamp", "1"}, + {ScopeNone, "performance_schema_events_waits_history_size", "10"}, + {ScopeGlobal, "log_syslog_tag", ""}, + {ScopeGlobal | ScopeSession, TxReadOnly, "0"}, + {ScopeGlobal | ScopeSession, TransactionReadOnly, "0"}, + {ScopeGlobal, "rpl_semi_sync_master_wait_point", ""}, + {ScopeGlobal, "innodb_undo_log_truncate", ""}, + {ScopeSession, "innodb_create_intrinsic", ""}, + {ScopeGlobal, "gtid_executed_compression_period", ""}, + {ScopeGlobal, "ndb_log_empty_epochs", ""}, + {ScopeGlobal, MaxPreparedStmtCount, strconv.FormatInt(DefMaxPreparedStmtCount, 10)}, + {ScopeNone, "have_geometry", "YES"}, + {ScopeGlobal | ScopeSession, "optimizer_trace_max_mem_size", "16384"}, + {ScopeGlobal | ScopeSession, "net_retry_count", "10"}, + {ScopeSession, "ndb_table_no_logging", ""}, + {ScopeGlobal | ScopeSession, "optimizer_trace_features", "greedy_search=on,range_optimizer=on,dynamic_range=on,repeated_subselect=on"}, + {ScopeGlobal, "innodb_flush_log_at_trx_commit", "1"}, + {ScopeGlobal, "rewriter_enabled", ""}, + {ScopeGlobal, "query_cache_min_res_unit", "4096"}, + {ScopeGlobal | ScopeSession, "updatable_views_with_limit", "YES"}, + {ScopeGlobal | ScopeSession, "optimizer_prune_level", "1"}, + {ScopeGlobal, "slave_sql_verify_checksum", "1"}, + {ScopeGlobal | ScopeSession, "completion_type", "NO_CHAIN"}, + {ScopeGlobal, "binlog_checksum", "CRC32"}, + {ScopeNone, "report_port", "3306"}, + {ScopeGlobal | ScopeSession, ShowOldTemporals, "0"}, + {ScopeGlobal, "query_cache_limit", "1048576"}, + {ScopeGlobal, "innodb_buffer_pool_size", "134217728"}, + {ScopeGlobal, InnodbAdaptiveFlushing, "1"}, + {ScopeNone, "datadir", "/usr/local/mysql/data/"}, + {ScopeGlobal | ScopeSession, WaitTimeout, strconv.FormatInt(DefWaitTimeout, 10)}, + {ScopeGlobal, "innodb_monitor_enable", ""}, + {ScopeNone, "date_format", "%Y-%m-%d"}, + {ScopeGlobal, "innodb_buffer_pool_filename", "ib_buffer_pool"}, + {ScopeGlobal, "slow_launch_time", "2"}, + {ScopeGlobal, "slave_max_allowed_packet", "1073741824"}, + {ScopeGlobal | ScopeSession, "ndb_use_transactions", ""}, + {ScopeNone, "innodb_purge_threads", "1"}, + {ScopeGlobal, "innodb_concurrency_tickets", "5000"}, + {ScopeGlobal, "innodb_monitor_reset_all", ""}, + {ScopeNone, "performance_schema_users_size", "100"}, + {ScopeGlobal, "ndb_log_updated_only", ""}, + {ScopeNone, "basedir", "/usr/local/mysql"}, + {ScopeGlobal, "innodb_old_blocks_time", "1000"}, + {ScopeGlobal, "innodb_stats_method", "nulls_equal"}, + {ScopeGlobal | ScopeSession, InnodbLockWaitTimeout, strconv.FormatInt(DefInnodbLockWaitTimeout, 10)}, + {ScopeGlobal, LocalInFile, "1"}, + {ScopeGlobal | ScopeSession, "myisam_stats_method", "nulls_unequal"}, + {ScopeNone, "version_compile_os", "osx10.8"}, + {ScopeNone, "relay_log_recovery", "0"}, + {ScopeNone, "old", "0"}, + {ScopeGlobal | ScopeSession, InnodbTableLocks, "1"}, + {ScopeNone, PerformanceSchema, "0"}, + {ScopeNone, "myisam_recover_options", "OFF"}, + {ScopeGlobal | ScopeSession, NetBufferLength, "16384"}, + {ScopeGlobal, "rpl_semi_sync_master_wait_for_slave_count", ""}, + {ScopeGlobal | ScopeSession, "binlog_row_image", "FULL"}, + {ScopeNone, "innodb_locks_unsafe_for_binlog", "0"}, + {ScopeSession, "rbr_exec_mode", ""}, + {ScopeGlobal, "myisam_max_sort_file_size", "9223372036853727232"}, + {ScopeNone, "back_log", "80"}, + {ScopeNone, "lower_case_file_system", "1"}, + {ScopeGlobal, "rpl_semi_sync_master_wait_no_slave", ""}, + {ScopeGlobal | ScopeSession, GroupConcatMaxLen, "1024"}, + {ScopeSession, "pseudo_thread_id", ""}, + {ScopeNone, "socket", "/tmp/myssock"}, + {ScopeNone, "have_dynamic_loading", "YES"}, + {ScopeGlobal, "rewriter_verbose", ""}, + {ScopeGlobal, "innodb_undo_logs", "128"}, + {ScopeNone, "performance_schema_max_cond_instances", "3504"}, + {ScopeGlobal, "delayed_insert_limit", "100"}, + {ScopeGlobal, Flush, "0"}, + {ScopeGlobal | ScopeSession, "eq_range_index_dive_limit", "10"}, + {ScopeNone, "performance_schema_events_stages_history_size", "10"}, + {ScopeGlobal | ScopeSession, "character_set_connection", mysql.DefaultCharset}, + {ScopeGlobal, MyISAMUseMmap, "0"}, + {ScopeGlobal | ScopeSession, "ndb_join_pushdown", ""}, + {ScopeGlobal | ScopeSession, CharacterSetServer, mysql.DefaultCharset}, + {ScopeGlobal, "validate_password_special_char_count", "1"}, + {ScopeNone, "performance_schema_max_thread_instances", "402"}, + {ScopeGlobal, "slave_rows_search_algorithms", "TABLE_SCAN,INDEX_SCAN"}, + {ScopeGlobal | ScopeSession, "ndbinfo_show_hidden", ""}, + {ScopeGlobal | ScopeSession, "net_read_timeout", "30"}, + {ScopeNone, "innodb_page_size", "16384"}, + {ScopeGlobal | ScopeSession, MaxAllowedPacket, "67108864"}, + {ScopeNone, "innodb_log_file_size", "50331648"}, + {ScopeGlobal, "sync_relay_log_info", "10000"}, + {ScopeGlobal | ScopeSession, "optimizer_trace_limit", "1"}, + {ScopeNone, "innodb_ft_max_token_size", "84"}, + {ScopeGlobal, "validate_password_length", "8"}, + {ScopeGlobal, "ndb_log_binlog_index", ""}, + {ScopeGlobal, "innodb_api_bk_commit_interval", "5"}, + {ScopeNone, "innodb_undo_directory", "."}, + {ScopeNone, "bind_address", "*"}, + {ScopeGlobal, "innodb_sync_spin_loops", "30"}, + {ScopeGlobal | ScopeSession, SQLSafeUpdates, "0"}, + {ScopeNone, "tmpdir", "/var/tmp/"}, + {ScopeGlobal, "innodb_thread_concurrency", "0"}, + {ScopeGlobal, SlaveAllowBatching, "0"}, + {ScopeGlobal, "innodb_buffer_pool_dump_pct", ""}, + {ScopeGlobal | ScopeSession, "lc_time_names", "en_US"}, + {ScopeGlobal | ScopeSession, "max_statement_time", ""}, + {ScopeGlobal | ScopeSession, EndMakersInJSON, "0"}, + {ScopeGlobal, AvoidTemporalUpgrade, "0"}, + {ScopeGlobal, "key_cache_age_threshold", "300"}, + {ScopeGlobal, InnodbStatusOutput, "0"}, + {ScopeSession, "identity", ""}, + {ScopeGlobal | ScopeSession, "min_examined_row_limit", "0"}, + {ScopeGlobal, "sync_frm", "ON"}, + {ScopeGlobal, "innodb_online_alter_log_max_size", "134217728"}, + {ScopeSession, WarningCount, "0"}, + {ScopeSession, ErrorCount, "0"}, + {ScopeGlobal | ScopeSession, "information_schema_stats_expiry", "86400"}, + {ScopeGlobal, "thread_pool_size", "16"}, + /* TiDB specific variables */ + {ScopeSession, TiDBSnapshot, ""}, + {ScopeSession, TiDBOptAggPushDown, BoolToIntStr(DefOptAggPushDown)}, + {ScopeSession, TiDBOptWriteRowID, BoolToIntStr(DefOptWriteRowID)}, + {ScopeGlobal | ScopeSession, TiDBBuildStatsConcurrency, strconv.Itoa(DefBuildStatsConcurrency)}, + {ScopeGlobal | ScopeSession, TiDBDistSQLScanConcurrency, strconv.Itoa(DefDistSQLScanConcurrency)}, + {ScopeGlobal | ScopeSession, TiDBOptInSubqToJoinAndAgg, BoolToIntStr(DefOptInSubqToJoinAndAgg)}, + {ScopeGlobal | ScopeSession, TiDBOptCorrelationThreshold, strconv.FormatFloat(DefOptCorrelationThreshold, 'f', -1, 64)}, + {ScopeGlobal | ScopeSession, TiDBOptCorrelationExpFactor, strconv.Itoa(DefOptCorrelationExpFactor)}, + {ScopeGlobal | ScopeSession, TiDBOptCPUFactor, strconv.FormatFloat(DefOptCPUFactor, 'f', -1, 64)}, + {ScopeGlobal | ScopeSession, TiDBOptCopCPUFactor, strconv.FormatFloat(DefOptCopCPUFactor, 'f', -1, 64)}, + {ScopeGlobal | ScopeSession, TiDBOptNetworkFactor, strconv.FormatFloat(DefOptNetworkFactor, 'f', -1, 64)}, + {ScopeGlobal | ScopeSession, TiDBOptScanFactor, strconv.FormatFloat(DefOptScanFactor, 'f', -1, 64)}, + {ScopeGlobal | ScopeSession, TiDBOptDescScanFactor, strconv.FormatFloat(DefOptDescScanFactor, 'f', -1, 64)}, + {ScopeGlobal | ScopeSession, TiDBOptSeekFactor, strconv.FormatFloat(DefOptSeekFactor, 'f', -1, 64)}, + {ScopeGlobal | ScopeSession, TiDBOptMemoryFactor, strconv.FormatFloat(DefOptMemoryFactor, 'f', -1, 64)}, + {ScopeGlobal | ScopeSession, TiDBOptDiskFactor, strconv.FormatFloat(DefOptDiskFactor, 'f', -1, 64)}, + {ScopeGlobal | ScopeSession, TiDBOptConcurrencyFactor, strconv.FormatFloat(DefOptConcurrencyFactor, 'f', -1, 64)}, + {ScopeGlobal | ScopeSession, TiDBIndexLookupSize, strconv.Itoa(DefIndexLookupSize)}, + {ScopeGlobal | ScopeSession, TiDBIndexLookupConcurrency, strconv.Itoa(DefIndexLookupConcurrency)}, + {ScopeGlobal | ScopeSession, TiDBIndexLookupJoinConcurrency, strconv.Itoa(DefIndexLookupJoinConcurrency)}, + {ScopeGlobal | ScopeSession, TiDBIndexSerialScanConcurrency, strconv.Itoa(DefIndexSerialScanConcurrency)}, + {ScopeGlobal | ScopeSession, TiDBSkipUTF8Check, BoolToIntStr(DefSkipUTF8Check)}, + {ScopeSession, TiDBCurrentTS, strconv.Itoa(DefCurretTS)}, + {ScopeGlobal | ScopeSession, TiDBMaxChunkSize, strconv.Itoa(DefMaxChunkSize)}, + {ScopeGlobal | ScopeSession, TiDBInitChunkSize, strconv.Itoa(DefInitChunkSize)}, + {ScopeGlobal | ScopeSession, TiDBEnableCascadesPlanner, "0"}, + {ScopeSession, TxnIsolationOneShot, ""}, + {ScopeGlobal | ScopeSession, TiDBHashJoinConcurrency, strconv.Itoa(DefTiDBHashJoinConcurrency)}, + {ScopeGlobal | ScopeSession, TiDBProjectionConcurrency, strconv.Itoa(DefTiDBProjectionConcurrency)}, + {ScopeGlobal | ScopeSession, TiDBHashAggPartialConcurrency, strconv.Itoa(DefTiDBHashAggPartialConcurrency)}, + {ScopeGlobal | ScopeSession, TiDBHashAggFinalConcurrency, strconv.Itoa(DefTiDBHashAggFinalConcurrency)}, + {ScopeGlobal | ScopeSession, TiDBBackoffLockFast, strconv.Itoa(kv.DefBackoffLockFast)}, + {ScopeGlobal | ScopeSession, TiDBBackOffWeight, strconv.Itoa(kv.DefBackOffWeight)}, + {ScopeGlobal | ScopeSession, TiDBConstraintCheckInPlace, BoolToIntStr(DefTiDBConstraintCheckInPlace)}, + {ScopeGlobal | ScopeSession, TiDBEnableVectorizedExpression, BoolToIntStr(DefEnableVectorizedExpression)}, + {ScopeGlobal | ScopeSession, TiDBSkipIsolationLevelCheck, BoolToIntStr(DefTiDBSkipIsolationLevelCheck)}, + /* The following variable is defined as session scope but is actually server scope. */ + {ScopeSession, TiDBGeneralLog, strconv.Itoa(DefTiDBGeneralLog)}, + {ScopeSession, TiDBConfig, ""}, + {ScopeGlobal, TiDBDDLReorgWorkerCount, strconv.Itoa(DefTiDBDDLReorgWorkerCount)}, + {ScopeGlobal, TiDBDDLReorgBatchSize, strconv.Itoa(DefTiDBDDLReorgBatchSize)}, + {ScopeGlobal, TiDBDDLErrorCountLimit, strconv.Itoa(DefTiDBDDLErrorCountLimit)}, + {ScopeSession, TiDBDDLReorgPriority, "PRIORITY_LOW"}, + {ScopeGlobal, TiDBMaxDeltaSchemaCount, strconv.Itoa(DefTiDBMaxDeltaSchemaCount)}, + {ScopeSession, TiDBEnableRadixJoin, BoolToIntStr(DefTiDBUseRadixJoin)}, + {ScopeGlobal | ScopeSession, TiDBOptJoinReorderThreshold, strconv.Itoa(DefTiDBOptJoinReorderThreshold)}, + {ScopeSession, TiDBSlowQueryFile, ""}, + {ScopeGlobal, TiDBScatterRegion, BoolToIntStr(DefTiDBScatterRegion)}, + {ScopeSession, TiDBWaitSplitRegionFinish, BoolToIntStr(DefTiDBWaitSplitRegionFinish)}, + {ScopeSession, TiDBWaitSplitRegionTimeout, strconv.Itoa(DefWaitSplitRegionTimeout)}, + {ScopeGlobal | ScopeSession, TiDBEnableNoopFuncs, BoolToIntStr(DefTiDBEnableNoopFuncs)}, + {ScopeSession, TiDBReplicaRead, "leader"}, + {ScopeSession, TiDBAllowRemoveAutoInc, BoolToIntStr(DefTiDBAllowRemoveAutoInc)}, +} + +// SynonymsSysVariables is synonyms of system variables. +var SynonymsSysVariables = map[string][]string{} + +func addSynonymsSysVariables(synonyms ...string) { + for _, s := range synonyms { + SynonymsSysVariables[s] = synonyms + } +} + +func initSynonymsSysVariables() { + addSynonymsSysVariables(TxnIsolation, TransactionIsolation) + addSynonymsSysVariables(TxReadOnly, TransactionReadOnly) +} + +// SetNamesVariables is the system variable names related to set names statements. +var SetNamesVariables = []string{ + "character_set_client", + "character_set_connection", + "character_set_results", +} + +const ( + // CharacterSetConnection is the name for character_set_connection system variable. + CharacterSetConnection = "character_set_connection" + // CollationConnection is the name for collation_connection system variable. + CollationConnection = "collation_connection" + // CharsetDatabase is the name for character_set_database system variable. + CharsetDatabase = "character_set_database" + // CollationDatabase is the name for collation_database system variable. + CollationDatabase = "collation_database" + // GeneralLog is the name for 'general_log' system variable. + GeneralLog = "general_log" + // AvoidTemporalUpgrade is the name for 'avoid_temporal_upgrade' system variable. + AvoidTemporalUpgrade = "avoid_temporal_upgrade" + // MaxPreparedStmtCount is the name for 'max_prepared_stmt_count' system variable. + MaxPreparedStmtCount = "max_prepared_stmt_count" + // BigTables is the name for 'big_tables' system variable. + BigTables = "big_tables" + // CheckProxyUsers is the name for 'check_proxy_users' system variable. + CheckProxyUsers = "check_proxy_users" + // CoreFile is the name for 'core_file' system variable. + CoreFile = "core_file" + // DefaultWeekFormat is the name for 'default_week_format' system variable. + DefaultWeekFormat = "default_week_format" + // GroupConcatMaxLen is the name for 'group_concat_max_len' system variable. + GroupConcatMaxLen = "group_concat_max_len" + // DelayKeyWrite is the name for 'delay_key_write' system variable. + DelayKeyWrite = "delay_key_write" + // EndMakersInJSON is the name for 'end_markers_in_json' system variable. + EndMakersInJSON = "end_markers_in_json" + // InnodbCommitConcurrency is the name for 'innodb_commit_concurrency' system variable. + InnodbCommitConcurrency = "innodb_commit_concurrency" + // InnodbFastShutdown is the name for 'innodb_fast_shutdown' system variable. + InnodbFastShutdown = "innodb_fast_shutdown" + // InnodbLockWaitTimeout is the name for 'innodb_lock_wait_timeout' system variable. + InnodbLockWaitTimeout = "innodb_lock_wait_timeout" + // SQLLogBin is the name for 'sql_log_bin' system variable. + SQLLogBin = "sql_log_bin" + // LogBin is the name for 'log_bin' system variable. + LogBin = "log_bin" + // MaxSortLength is the name for 'max_sort_length' system variable. + MaxSortLength = "max_sort_length" + // MaxSpRecursionDepth is the name for 'max_sp_recursion_depth' system variable. + MaxSpRecursionDepth = "max_sp_recursion_depth" + // MaxUserConnections is the name for 'max_user_connections' system variable. + MaxUserConnections = "max_user_connections" + // OfflineMode is the name for 'offline_mode' system variable. + OfflineMode = "offline_mode" + // InteractiveTimeout is the name for 'interactive_timeout' system variable. + InteractiveTimeout = "interactive_timeout" + // FlushTime is the name for 'flush_time' system variable. + FlushTime = "flush_time" + // PseudoSlaveMode is the name for 'pseudo_slave_mode' system variable. + PseudoSlaveMode = "pseudo_slave_mode" + // LowPriorityUpdates is the name for 'low_priority_updates' system variable. + LowPriorityUpdates = "low_priority_updates" + // SessionTrackGtids is the name for 'session_track_gtids' system variable. + SessionTrackGtids = "session_track_gtids" + // OldPasswords is the name for 'old_passwords' system variable. + OldPasswords = "old_passwords" + // MaxConnections is the name for 'max_connections' system variable. + MaxConnections = "max_connections" + // SkipNameResolve is the name for 'skip_name_resolve' system variable. + SkipNameResolve = "skip_name_resolve" + // ForeignKeyChecks is the name for 'foreign_key_checks' system variable. + ForeignKeyChecks = "foreign_key_checks" + // SQLSafeUpdates is the name for 'sql_safe_updates' system variable. + SQLSafeUpdates = "sql_safe_updates" + // WarningCount is the name for 'warning_count' system variable. + WarningCount = "warning_count" + // ErrorCount is the name for 'error_count' system variable. + ErrorCount = "error_count" + // SQLSelectLimit is the name for 'sql_select_limit' system variable. + SQLSelectLimit = "sql_select_limit" + // MaxConnectErrors is the name for 'max_connect_errors' system variable. + MaxConnectErrors = "max_connect_errors" + // TableDefinitionCache is the name for 'table_definition_cache' system variable. + TableDefinitionCache = "table_definition_cache" + // TmpTableSize is the name for 'tmp_table_size' system variable. + TmpTableSize = "tmp_table_size" + // ConnectTimeout is the name for 'connect_timeout' system variable. + ConnectTimeout = "connect_timeout" + // SyncBinlog is the name for 'sync_binlog' system variable. + SyncBinlog = "sync_binlog" + // BlockEncryptionMode is the name for 'block_encryption_mode' system variable. + BlockEncryptionMode = "block_encryption_mode" + // WaitTimeout is the name for 'wait_timeout' system variable. + WaitTimeout = "wait_timeout" + // ValidatePasswordNumberCount is the name of 'validate_password_number_count' system variable. + ValidatePasswordNumberCount = "validate_password_number_count" + // ValidatePasswordLength is the name of 'validate_password_length' system variable. + ValidatePasswordLength = "validate_password_length" + // Port is the name for 'port' system variable. + Port = "port" + // DataDir is the name for 'datadir' system variable. + DataDir = "datadir" + // Profiling is the name for 'Profiling' system variable. + Profiling = "profiling" + // Socket is the name for 'socket' system variable. + Socket = "socket" + // BinlogOrderCommits is the name for 'binlog_order_commits' system variable. + BinlogOrderCommits = "binlog_order_commits" + // MasterVerifyChecksum is the name for 'master_verify_checksum' system variable. + MasterVerifyChecksum = "master_verify_checksum" + // ValidatePasswordCheckUserName is the name for 'validate_password_check_user_name' system variable. + ValidatePasswordCheckUserName = "validate_password_check_user_name" + // SuperReadOnly is the name for 'super_read_only' system variable. + SuperReadOnly = "super_read_only" + // SQLNotes is the name for 'sql_notes' system variable. + SQLNotes = "sql_notes" + // QueryCacheType is the name for 'query_cache_type' system variable. + QueryCacheType = "query_cache_type" + // SlaveCompressedProtocol is the name for 'slave_compressed_protocol' system variable. + SlaveCompressedProtocol = "slave_compressed_protocol" + // BinlogRowQueryLogEvents is the name for 'binlog_rows_query_log_events' system variable. + BinlogRowQueryLogEvents = "binlog_rows_query_log_events" + // LogSlowSlaveStatements is the name for 'log_slow_slave_statements' system variable. + LogSlowSlaveStatements = "log_slow_slave_statements" + // LogSlowAdminStatements is the name for 'log_slow_admin_statements' system variable. + LogSlowAdminStatements = "log_slow_admin_statements" + // LogQueriesNotUsingIndexes is the name for 'log_queries_not_using_indexes' system variable. + LogQueriesNotUsingIndexes = "log_queries_not_using_indexes" + // QueryCacheWlockInvalidate is the name for 'query_cache_wlock_invalidate' system variable. + QueryCacheWlockInvalidate = "query_cache_wlock_invalidate" + // SQLAutoIsNull is the name for 'sql_auto_is_null' system variable. + SQLAutoIsNull = "sql_auto_is_null" + // RelayLogPurge is the name for 'relay_log_purge' system variable. + RelayLogPurge = "relay_log_purge" + // AutomaticSpPrivileges is the name for 'automatic_sp_privileges' system variable. + AutomaticSpPrivileges = "automatic_sp_privileges" + // SQLQuoteShowCreate is the name for 'sql_quote_show_create' system variable. + SQLQuoteShowCreate = "sql_quote_show_create" + // SlowQueryLog is the name for 'slow_query_log' system variable. + SlowQueryLog = "slow_query_log" + // BinlogDirectNonTransactionalUpdates is the name for 'binlog_direct_non_transactional_updates' system variable. + BinlogDirectNonTransactionalUpdates = "binlog_direct_non_transactional_updates" + // SQLBigSelects is the name for 'sql_big_selects' system variable. + SQLBigSelects = "sql_big_selects" + // LogBinTrustFunctionCreators is the name for 'log_bin_trust_function_creators' system variable. + LogBinTrustFunctionCreators = "log_bin_trust_function_creators" + // OldAlterTable is the name for 'old_alter_table' system variable. + OldAlterTable = "old_alter_table" + // EnforceGtidConsistency is the name for 'enforce_gtid_consistency' system variable. + EnforceGtidConsistency = "enforce_gtid_consistency" + // SecureAuth is the name for 'secure_auth' system variable. + SecureAuth = "secure_auth" + // UniqueChecks is the name for 'unique_checks' system variable. + UniqueChecks = "unique_checks" + // SQLWarnings is the name for 'sql_warnings' system variable. + SQLWarnings = "sql_warnings" + // AutoCommit is the name for 'autocommit' system variable. + AutoCommit = "autocommit" + // KeepFilesOnCreate is the name for 'keep_files_on_create' system variable. + KeepFilesOnCreate = "keep_files_on_create" + // ShowOldTemporals is the name for 'show_old_temporals' system variable. + ShowOldTemporals = "show_old_temporals" + // LocalInFile is the name for 'local_infile' system variable. + LocalInFile = "local_infile" + // PerformanceSchema is the name for 'performance_schema' system variable. + PerformanceSchema = "performance_schema" + // Flush is the name for 'flush' system variable. + Flush = "flush" + // SlaveAllowBatching is the name for 'slave_allow_batching' system variable. + SlaveAllowBatching = "slave_allow_batching" + // MyISAMUseMmap is the name for 'myisam_use_mmap' system variable. + MyISAMUseMmap = "myisam_use_mmap" + // InnodbFilePerTable is the name for 'innodb_file_per_table' system variable. + InnodbFilePerTable = "innodb_file_per_table" + // InnodbLogCompressedPages is the name for 'innodb_log_compressed_pages' system variable. + InnodbLogCompressedPages = "innodb_log_compressed_pages" + // InnodbPrintAllDeadlocks is the name for 'innodb_print_all_deadlocks' system variable. + InnodbPrintAllDeadlocks = "innodb_print_all_deadlocks" + // InnodbStrictMode is the name for 'innodb_strict_mode' system variable. + InnodbStrictMode = "innodb_strict_mode" + // InnodbCmpPerIndexEnabled is the name for 'innodb_cmp_per_index_enabled' system variable. + InnodbCmpPerIndexEnabled = "innodb_cmp_per_index_enabled" + // InnodbBufferPoolDumpAtShutdown is the name for 'innodb_buffer_pool_dump_at_shutdown' system variable. + InnodbBufferPoolDumpAtShutdown = "innodb_buffer_pool_dump_at_shutdown" + // InnodbAdaptiveHashIndex is the name for 'innodb_adaptive_hash_index' system variable. + InnodbAdaptiveHashIndex = "innodb_adaptive_hash_index" + // InnodbFtEnableStopword is the name for 'innodb_ft_enable_stopword' system variable. + InnodbFtEnableStopword = "innodb_ft_enable_stopword" + // InnodbSupportXA is the name for 'innodb_support_xa' system variable. + InnodbSupportXA = "innodb_support_xa" + // InnodbOptimizeFullTextOnly is the name for 'innodb_optimize_fulltext_only' system variable. + InnodbOptimizeFullTextOnly = "innodb_optimize_fulltext_only" + // InnodbStatusOutputLocks is the name for 'innodb_status_output_locks' system variable. + InnodbStatusOutputLocks = "innodb_status_output_locks" + // InnodbBufferPoolDumpNow is the name for 'innodb_buffer_pool_dump_now' system variable. + InnodbBufferPoolDumpNow = "innodb_buffer_pool_dump_now" + // InnodbBufferPoolLoadNow is the name for 'innodb_buffer_pool_load_now' system variable. + InnodbBufferPoolLoadNow = "innodb_buffer_pool_load_now" + // InnodbStatsOnMetadata is the name for 'innodb_stats_on_metadata' system variable. + InnodbStatsOnMetadata = "innodb_stats_on_metadata" + // InnodbDisableSortFileCache is the name for 'innodb_disable_sort_file_cache' system variable. + InnodbDisableSortFileCache = "innodb_disable_sort_file_cache" + // InnodbStatsAutoRecalc is the name for 'innodb_stats_auto_recalc' system variable. + InnodbStatsAutoRecalc = "innodb_stats_auto_recalc" + // InnodbBufferPoolLoadAbort is the name for 'innodb_buffer_pool_load_abort' system variable. + InnodbBufferPoolLoadAbort = "innodb_buffer_pool_load_abort" + // InnodbStatsPersistent is the name for 'innodb_stats_persistent' system variable. + InnodbStatsPersistent = "innodb_stats_persistent" + // InnodbRandomReadAhead is the name for 'innodb_random_read_ahead' system variable. + InnodbRandomReadAhead = "innodb_random_read_ahead" + // InnodbAdaptiveFlushing is the name for 'innodb_adaptive_flushing' system variable. + InnodbAdaptiveFlushing = "innodb_adaptive_flushing" + // InnodbTableLocks is the name for 'innodb_table_locks' system variable. + InnodbTableLocks = "innodb_table_locks" + // InnodbStatusOutput is the name for 'innodb_status_output' system variable. + InnodbStatusOutput = "innodb_status_output" + + // NetBufferLength is the name for 'net_buffer_length' system variable. + NetBufferLength = "net_buffer_length" + // QueryCacheSize is the name of 'query_cache_size' system variable. + QueryCacheSize = "query_cache_size" + // TxReadOnly is the name of 'tx_read_only' system variable. + TxReadOnly = "tx_read_only" + // TransactionReadOnly is the name of 'transaction_read_only' system variable. + TransactionReadOnly = "transaction_read_only" + // CharacterSetServer is the name of 'character_set_server' system variable. + CharacterSetServer = "character_set_server" + // AutoIncrementIncrement it the name of 'auto_increment_increment' system variable. + AutoIncrementIncrement = "auto_increment_increment" + // InitConnect is the name of 'init_connect' system variable. + InitConnect = "init_connect" + // CollationServer is the name of 'collation_server' variable. + CollationServer = "collation_server" + // NetWriteTimeout is the name of 'net_write_timeout' variable. + NetWriteTimeout = "net_write_timeout" + // ThreadPoolSize is the name of 'thread_pool_size' variable. + ThreadPoolSize = "thread_pool_size" +) + +// GlobalVarAccessor is the interface for accessing global scope system and status variables. +type GlobalVarAccessor interface { + // GetAllSysVars gets all the global system variable values. + GetAllSysVars() (map[string]string, error) + // GetGlobalSysVar gets the global system variable value for name. + GetGlobalSysVar(name string) (string, error) + // SetGlobalSysVar sets the global system variable name to value. + SetGlobalSysVar(name string, value string) error +} diff --git a/sessionctx/variable/sysvar_test.go b/sessionctx/variable/sysvar_test.go new file mode 100644 index 0000000..85111a7 --- /dev/null +++ b/sessionctx/variable/sysvar_test.go @@ -0,0 +1,55 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package variable + +import ( + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" +) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + TestingT(t) +} + +var _ = Suite(&testSysVarSuite{}) + +type testSysVarSuite struct { +} + +func (*testSysVarSuite) TestBoolToInt32(c *C) { + c.Assert(BoolToInt32(true), Equals, int32(1)) + c.Assert(BoolToInt32(false), Equals, int32(0)) +} + +func (*testSysVarSuite) TestError(c *C) { + kvErrs := []*terror.Error{ + ErrUnsupportedValueForVar, + ErrUnknownSystemVar, + ErrIncorrectScope, + ErrUnknownTimeZone, + ErrReadOnly, + ErrWrongValueForVar, + ErrWrongTypeForVar, + ErrTruncatedWrongValue, + ErrMaxPreparedStmtCountReached, + ErrUnsupportedIsolationLevel, + } + for _, err := range kvErrs { + c.Assert(err.ToSQLError().Code != mysql.ErrUnknown, IsTrue) + } +} diff --git a/sessionctx/variable/tidb_vars.go b/sessionctx/variable/tidb_vars.go new file mode 100644 index 0000000..fe6e90e --- /dev/null +++ b/sessionctx/variable/tidb_vars.go @@ -0,0 +1,282 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package variable + +import ( + "os" +) + +/* + Steps to add a new TiDB specific system variable: + + 1. Add a new variable name with comment in this file. + 2. Add the default value of the new variable in this file. + 3. Add SysVar instance in 'defaultSysVars' slice with the default value. + 4. Add a field in `SessionVars`. + 5. Update the `NewSessionVars` function to set the field to its default value. + 6. Update the `variable.SetSessionSystemVar` function to use the new value when SET statement is executed. + 7. If it is a global variable, add it in `session.loadCommonGlobalVarsSQL`. + 8. Update ValidateSetSystemVar if the variable's value need to be validated. + 9. Use this variable to control the behavior in code. +*/ + +// TiDB system variable names that only in session scope. +const ( + // tidb_snapshot is used for reading history data, the default value is empty string. + // The value can be a datetime string like '2017-11-11 20:20:20' or a tso string. When this variable is set, the session reads history data of that time. + TiDBSnapshot = "tidb_snapshot" + + // tidb_opt_agg_push_down is used to enable/disable the optimizer rule of aggregation push down. + TiDBOptAggPushDown = "tidb_opt_agg_push_down" + + // tidb_opt_write_row_id is used to enable/disable the operations of insert、replace and update to _tidb_rowid. + TiDBOptWriteRowID = "tidb_opt_write_row_id" + + // TiDBCurrentTS is used to get the current transaction timestamp. + // It is read-only. + TiDBCurrentTS = "tidb_current_ts" + + // tidb_config is a read-only variable that shows the config of the current server. + TiDBConfig = "tidb_config" + + // tidb_general_log is used to log every query in the server in info level. + TiDBGeneralLog = "tidb_general_log" + + // tidb_skip_isolation_level_check is used to control whether to return error when set unsupported transaction + // isolation level. + TiDBSkipIsolationLevelCheck = "tidb_skip_isolation_level_check" + + // TiDBReplicaRead is used for reading data from replicas, followers for example. + TiDBReplicaRead = "tidb_replica_read" + + // TiDBAllowRemoveAutoInc indicates whether a user can drop the auto_increment column attribute or not. + TiDBAllowRemoveAutoInc = "tidb_allow_remove_auto_inc" +) + +// TiDB system variable names that both in session and global scope. +const ( + // tidb_build_stats_concurrency is used to speed up the ANALYZE statement, when a table has multiple indices, + // those indices can be scanned concurrently, with the cost of higher system performance impact. + TiDBBuildStatsConcurrency = "tidb_build_stats_concurrency" + + // tidb_distsql_scan_concurrency is used to set the concurrency of a distsql scan task. + // A distsql scan task can be a table scan or a index scan, which may be distributed to many TiKV nodes. + // Higher concurrency may reduce latency, but with the cost of higher memory usage and system performance impact. + // If the query has a LIMIT clause, high concurrency makes the system do much more work than needed. + TiDBDistSQLScanConcurrency = "tidb_distsql_scan_concurrency" + + // tidb_opt_insubquery_to_join_and_agg is used to enable/disable the optimizer rule of rewriting IN subquery. + TiDBOptInSubqToJoinAndAgg = "tidb_opt_insubq_to_join_and_agg" + + // tidb_opt_correlation_threshold is a guard to enable row count estimation using column order correlation. + TiDBOptCorrelationThreshold = "tidb_opt_correlation_threshold" + + // tidb_opt_correlation_exp_factor is an exponential factor to control heuristic approach when tidb_opt_correlation_threshold is not satisfied. + TiDBOptCorrelationExpFactor = "tidb_opt_correlation_exp_factor" + + // tidb_opt_cpu_factor is the CPU cost of processing one expression for one row. + TiDBOptCPUFactor = "tidb_opt_cpu_factor" + // tidb_opt_copcpu_factor is the CPU cost of processing one expression for one row in coprocessor. + TiDBOptCopCPUFactor = "tidb_opt_copcpu_factor" + // tidb_opt_network_factor is the network cost of transferring 1 byte data. + TiDBOptNetworkFactor = "tidb_opt_network_factor" + // tidb_opt_scan_factor is the IO cost of scanning 1 byte data on TiKV. + TiDBOptScanFactor = "tidb_opt_scan_factor" + // tidb_opt_desc_factor is the IO cost of scanning 1 byte data on TiKV in desc order. + TiDBOptDescScanFactor = "tidb_opt_desc_factor" + // tidb_opt_seek_factor is the IO cost of seeking the start value in a range on TiKV. + TiDBOptSeekFactor = "tidb_opt_seek_factor" + // tidb_opt_memory_factor is the memory cost of storing one tuple. + TiDBOptMemoryFactor = "tidb_opt_memory_factor" + // tidb_opt_disk_factor is the IO cost of reading/writing one byte to temporary disk. + TiDBOptDiskFactor = "tidb_opt_disk_factor" + // tidb_opt_concurrency_factor is the CPU cost of additional one goroutine. + TiDBOptConcurrencyFactor = "tidb_opt_concurrency_factor" + + // tidb_index_lookup_size is used for index lookup executor. + // The index lookup executor first scan a batch of handles from a index, then use those handles to lookup the table + // rows, this value controls how much of handles in a batch to do a lookup task. + // Small value sends more RPCs to TiKV, consume more system resource. + // Large value may do more work than needed if the query has a limit. + TiDBIndexLookupSize = "tidb_index_lookup_size" + + // tidb_index_lookup_concurrency is used for index lookup executor. + // A lookup task may have 'tidb_index_lookup_size' of handles at maximun, the handles may be distributed + // in many TiKV nodes, we executes multiple concurrent index lookup tasks concurrently to reduce the time + // waiting for a task to finish. + // Set this value higher may reduce the latency but consumes more system resource. + TiDBIndexLookupConcurrency = "tidb_index_lookup_concurrency" + + // tidb_index_lookup_join_concurrency is used for index lookup join executor. + // IndexLookUpJoin starts "tidb_index_lookup_join_concurrency" inner workers + // to fetch inner rows and join the matched (outer, inner) row pairs. + TiDBIndexLookupJoinConcurrency = "tidb_index_lookup_join_concurrency" + + // tidb_index_serial_scan_concurrency is used for controlling the concurrency of index scan operation + // when we need to keep the data output order the same as the order of index data. + TiDBIndexSerialScanConcurrency = "tidb_index_serial_scan_concurrency" + + // TiDBMaxChunkSize is used to control the max chunk size during query execution. + TiDBMaxChunkSize = "tidb_max_chunk_size" + + // TiDBInitChunkSize is used to control the init chunk size during query execution. + TiDBInitChunkSize = "tidb_init_chunk_size" + + // tidb_enable_cascades_planner is used to control whether to enable the cascades planner. + TiDBEnableCascadesPlanner = "tidb_enable_cascades_planner" + + // tidb_skip_utf8_check skips the UTF8 validate process, validate UTF8 has performance cost, if we can make sure + // the input string values are valid, we can skip the check. + TiDBSkipUTF8Check = "tidb_skip_utf8_check" + + // tidb_hash_join_concurrency is used for hash join executor. + // The hash join outer executor starts multiple concurrent join workers to probe the hash table. + TiDBHashJoinConcurrency = "tidb_hash_join_concurrency" + + // tidb_projection_concurrency is used for projection operator. + // This variable controls the worker number of projection operator. + TiDBProjectionConcurrency = "tidb_projection_concurrency" + + // tidb_hashagg_partial_concurrency is used for hash agg executor. + // The hash agg executor starts multiple concurrent partial workers to do partial aggregate works. + TiDBHashAggPartialConcurrency = "tidb_hashagg_partial_concurrency" + + // tidb_hashagg_final_concurrency is used for hash agg executor. + // The hash agg executor starts multiple concurrent final workers to do final aggregate works. + TiDBHashAggFinalConcurrency = "tidb_hashagg_final_concurrency" + + // tidb_backoff_lock_fast is used for tikv backoff base time in milliseconds. + TiDBBackoffLockFast = "tidb_backoff_lock_fast" + + // tidb_backoff_weight is used to control the max back off time in TiDB. + // The default maximum back off time is a small value. + // BackOffWeight could multiply it to let the user adjust the maximum time for retrying. + // Only positive integers can be accepted, which means that the maximum back off time can only grow. + TiDBBackOffWeight = "tidb_backoff_weight" + + // tidb_ddl_reorg_worker_cnt defines the count of ddl reorg workers. + TiDBDDLReorgWorkerCount = "tidb_ddl_reorg_worker_cnt" + + // tidb_ddl_reorg_batch_size defines the transaction batch size of ddl reorg workers. + TiDBDDLReorgBatchSize = "tidb_ddl_reorg_batch_size" + + // tidb_ddl_error_count_limit defines the count of ddl error limit. + TiDBDDLErrorCountLimit = "tidb_ddl_error_count_limit" + + // tidb_ddl_reorg_priority defines the operations priority of adding indices. + // It can be: PRIORITY_LOW, PRIORITY_NORMAL, PRIORITY_HIGH + TiDBDDLReorgPriority = "tidb_ddl_reorg_priority" + + // tidb_max_delta_schema_count defines the max length of deltaSchemaInfos. + // deltaSchemaInfos is a queue that maintains the history of schema changes. + TiDBMaxDeltaSchemaCount = "tidb_max_delta_schema_count" + + // tidb_scatter_region will scatter the regions for DDLs when it is ON. + TiDBScatterRegion = "tidb_scatter_region" + + // TiDBWaitSplitRegionFinish defines the split region behaviour is sync or async. + TiDBWaitSplitRegionFinish = "tidb_wait_split_region_finish" + + // TiDBWaitSplitRegionTimeout uses to set the split and scatter region back off time. + TiDBWaitSplitRegionTimeout = "tidb_wait_split_region_timeout" + + // tidb_enable_radix_join indicates to use radix hash join algorithm to execute + // HashJoin. + TiDBEnableRadixJoin = "tidb_enable_radix_join" + + // tidb_constraint_check_in_place indicates to check the constraint when the SQL executing. + // It could hurt the performance of bulking insert when it is ON. + TiDBConstraintCheckInPlace = "tidb_constraint_check_in_place" + + // tidb_enable_vectorized_expression is used to control whether to enable the vectorized expression evaluation. + TiDBEnableVectorizedExpression = "tidb_enable_vectorized_expression" + + // TIDBOptJoinReorderThreshold defines the threshold less than which + // we'll choose a rather time consuming algorithm to calculate the join order. + TiDBOptJoinReorderThreshold = "tidb_opt_join_reorder_threshold" + + // SlowQueryFile indicates which slow query log file for SLOW_QUERY table to parse. + TiDBSlowQueryFile = "tidb_slow_query_file" + + // TiDBEnableNoopFuncs set true will enable using fake funcs(like get_lock release_lock) + TiDBEnableNoopFuncs = "tidb_enable_noop_functions" +) + +// Default TiDB system variable values. +const ( + DefHostname = "localhost" + DefIndexLookupConcurrency = 4 + DefIndexLookupJoinConcurrency = 4 + DefIndexSerialScanConcurrency = 1 + DefIndexLookupSize = 20000 + DefDistSQLScanConcurrency = 15 + DefBuildStatsConcurrency = 4 + DefSkipUTF8Check = false + DefOptAggPushDown = false + DefOptWriteRowID = false + DefOptCorrelationThreshold = 0.9 + DefOptCorrelationExpFactor = 1 + DefOptCPUFactor = 3.0 + DefOptCopCPUFactor = 3.0 + DefOptNetworkFactor = 1.0 + DefOptScanFactor = 1.5 + DefOptDescScanFactor = 3.0 + DefOptSeekFactor = 20.0 + DefOptMemoryFactor = 0.001 + DefOptDiskFactor = 1.5 + DefOptConcurrencyFactor = 3.0 + DefOptInSubqToJoinAndAgg = true + DefCurretTS = 0 + DefInitChunkSize = 32 + DefMaxChunkSize = 1024 + DefMaxPreparedStmtCount = -1 + DefWaitTimeout = 0 + DefTiDBGeneralLog = 0 + DefTiDBRetryLimit = 10 + DefTiDBDisableTxnAutoRetry = true + DefTiDBConstraintCheckInPlace = false + DefTiDBHashJoinConcurrency = 5 + DefTiDBProjectionConcurrency = 4 + DefTiDBDDLReorgWorkerCount = 4 + DefTiDBDDLReorgBatchSize = 256 + DefTiDBDDLErrorCountLimit = 512 + DefTiDBMaxDeltaSchemaCount = 1024 + DefTiDBHashAggPartialConcurrency = 4 + DefTiDBHashAggFinalConcurrency = 4 + DefTiDBUseRadixJoin = false + DefEnableVectorizedExpression = true + DefTiDBOptJoinReorderThreshold = 0 + DefTiDBSkipIsolationLevelCheck = false + DefTiDBScatterRegion = false + DefTiDBWaitSplitRegionFinish = true + DefWaitSplitRegionTimeout = 300 // 300s + DefTiDBEnableNoopFuncs = false + DefTiDBAllowRemoveAutoInc = false + DefInnodbLockWaitTimeout = 50 // 50s +) + +// Process global variables. +var ( + ProcessGeneralLog uint32 + ddlReorgWorkerCounter int32 = DefTiDBDDLReorgWorkerCount + maxDDLReorgWorkerCount int32 = 128 + ddlReorgBatchSize int32 = DefTiDBDDLReorgBatchSize + ddlErrorCountlimit int64 = DefTiDBDDLErrorCountLimit + maxDeltaSchemaCount int64 = DefTiDBMaxDeltaSchemaCount + // Export for testing. + MaxDDLReorgBatchSize int32 = 10240 + MinDDLReorgBatchSize int32 = 32 + ServerHostname, _ = os.Hostname() + MaxOfMaxAllowedPacket uint64 = 1073741824 +) diff --git a/sessionctx/variable/varsutil.go b/sessionctx/variable/varsutil.go new file mode 100644 index 0000000..dbc81f3 --- /dev/null +++ b/sessionctx/variable/varsutil.go @@ -0,0 +1,589 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package variable + +import ( + "encoding/json" + "fmt" + "math" + "strconv" + "strings" + "sync/atomic" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/config" + "github.com/pingcap/tidb/types" +) + +// secondsPerYear represents seconds in a normal year. Leap year is not considered here. +const secondsPerYear = 60 * 60 * 24 * 365 + +// SetDDLReorgWorkerCounter sets ddlReorgWorkerCounter count. +// Max worker count is maxDDLReorgWorkerCount. +func SetDDLReorgWorkerCounter(cnt int32) { + if cnt > maxDDLReorgWorkerCount { + cnt = maxDDLReorgWorkerCount + } + atomic.StoreInt32(&ddlReorgWorkerCounter, cnt) +} + +// GetDDLReorgWorkerCounter gets ddlReorgWorkerCounter. +func GetDDLReorgWorkerCounter() int32 { + return atomic.LoadInt32(&ddlReorgWorkerCounter) +} + +// SetDDLReorgBatchSize sets ddlReorgBatchSize size. +// Max batch size is MaxDDLReorgBatchSize. +func SetDDLReorgBatchSize(cnt int32) { + if cnt > MaxDDLReorgBatchSize { + cnt = MaxDDLReorgBatchSize + } + if cnt < MinDDLReorgBatchSize { + cnt = MinDDLReorgBatchSize + } + atomic.StoreInt32(&ddlReorgBatchSize, cnt) +} + +// GetDDLReorgBatchSize gets ddlReorgBatchSize. +func GetDDLReorgBatchSize() int32 { + return atomic.LoadInt32(&ddlReorgBatchSize) +} + +// SetDDLErrorCountLimit sets ddlErrorCountlimit size. +func SetDDLErrorCountLimit(cnt int64) { + atomic.StoreInt64(&ddlErrorCountlimit, cnt) +} + +// GetDDLErrorCountLimit gets ddlErrorCountlimit size. +func GetDDLErrorCountLimit() int64 { + return atomic.LoadInt64(&ddlErrorCountlimit) +} + +// SetMaxDeltaSchemaCount sets maxDeltaSchemaCount size. +func SetMaxDeltaSchemaCount(cnt int64) { + atomic.StoreInt64(&maxDeltaSchemaCount, cnt) +} + +// GetMaxDeltaSchemaCount gets maxDeltaSchemaCount size. +func GetMaxDeltaSchemaCount() int64 { + return atomic.LoadInt64(&maxDeltaSchemaCount) +} + +// GetSessionSystemVar gets a system variable. +// If it is a session only variable, use the default value defined in code. +// Returns error if there is no such variable. +func GetSessionSystemVar(s *SessionVars, key string) (string, error) { + key = strings.ToLower(key) + gVal, ok, err := GetSessionOnlySysVars(s, key) + if err != nil || ok { + return gVal, err + } + gVal, err = s.GlobalVarsAccessor.GetGlobalSysVar(key) + if err != nil { + return "", err + } + s.systems[key] = gVal + return gVal, nil +} + +// GetSessionOnlySysVars get the default value defined in code for session only variable. +// The return bool value indicates whether it's a session only variable. +func GetSessionOnlySysVars(s *SessionVars, key string) (string, bool, error) { + sysVar := SysVars[key] + if sysVar == nil { + return "", false, ErrUnknownSystemVar.GenWithStackByArgs(key) + } + // For virtual system variables: + switch sysVar.Name { + case TiDBCurrentTS: + return fmt.Sprintf("%d", s.TxnCtx.StartTS), true, nil + case TiDBGeneralLog: + return fmt.Sprintf("%d", atomic.LoadUint32(&ProcessGeneralLog)), true, nil + case TiDBConfig: + conf := config.GetGlobalConfig() + j, err := json.MarshalIndent(conf, "", "\t") + if err != nil { + return "", false, err + } + return string(j), true, nil + } + sVal, ok := s.GetSystemVar(key) + if ok { + return sVal, true, nil + } + if sysVar.Scope&ScopeGlobal == 0 { + // None-Global variable can use pre-defined default value. + return sysVar.Value, true, nil + } + return "", false, nil +} + +// GetGlobalSystemVar gets a global system variable. +func GetGlobalSystemVar(s *SessionVars, key string) (string, error) { + key = strings.ToLower(key) + gVal, ok, err := GetScopeNoneSystemVar(key) + if err != nil || ok { + return gVal, err + } + gVal, err = s.GlobalVarsAccessor.GetGlobalSysVar(key) + if err != nil { + return "", err + } + return gVal, nil +} + +// GetScopeNoneSystemVar checks the validation of `key`, +// and return the default value if its scope is `ScopeNone`. +func GetScopeNoneSystemVar(key string) (string, bool, error) { + sysVar := SysVars[key] + if sysVar == nil { + return "", false, ErrUnknownSystemVar.GenWithStackByArgs(key) + } + if sysVar.Scope == ScopeNone { + return sysVar.Value, true, nil + } + return "", false, nil +} + +// epochShiftBits is used to reserve logical part of the timestamp. +const epochShiftBits = 18 + +// SetSessionSystemVar sets system variable and updates SessionVars states. +func SetSessionSystemVar(vars *SessionVars, name string, value types.Datum) error { + name = strings.ToLower(name) + sysVar := SysVars[name] + if sysVar == nil { + return ErrUnknownSystemVar + } + sVal := "" + var err error + if !value.IsNull() { + sVal, err = value.ToString() + } + if err != nil { + return err + } + sVal, err = ValidateSetSystemVar(vars, name, sVal) + if err != nil { + return err + } + return vars.SetSystemVar(name, sVal) +} + +// ValidateGetSystemVar checks if system variable exists and validates its scope when get system variable. +func ValidateGetSystemVar(name string, isGlobal bool) error { + sysVar, exists := SysVars[name] + if !exists { + return ErrUnknownSystemVar.GenWithStackByArgs(name) + } + switch sysVar.Scope { + case ScopeGlobal, ScopeNone: + if !isGlobal { + return ErrIncorrectScope.GenWithStackByArgs(name, "GLOBAL") + } + case ScopeSession: + if isGlobal { + return ErrIncorrectScope.GenWithStackByArgs(name, "SESSION") + } + } + return nil +} + +func checkUInt64SystemVar(name, value string, min, max uint64, vars *SessionVars) (string, error) { + if len(value) == 0 { + return value, ErrWrongTypeForVar.GenWithStackByArgs(name) + } + if value[0] == '-' { + _, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return value, ErrWrongTypeForVar.GenWithStackByArgs(name) + } + vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenWithStackByArgs(name, value)) + return fmt.Sprintf("%d", min), nil + } + val, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return value, ErrWrongTypeForVar.GenWithStackByArgs(name) + } + if val < min { + vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenWithStackByArgs(name, value)) + return fmt.Sprintf("%d", min), nil + } + if val > max { + vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenWithStackByArgs(name, value)) + return fmt.Sprintf("%d", max), nil + } + return value, nil +} + +func checkInt64SystemVar(name, value string, min, max int64, vars *SessionVars) (string, error) { + val, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return value, ErrWrongTypeForVar.GenWithStackByArgs(name) + } + if val < min { + vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenWithStackByArgs(name, value)) + return fmt.Sprintf("%d", min), nil + } + if val > max { + vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenWithStackByArgs(name, value)) + return fmt.Sprintf("%d", max), nil + } + return value, nil +} + +const ( + // initChunkSizeUpperBound indicates upper bound value of tidb_init_chunk_size. + initChunkSizeUpperBound = 32 + // maxChunkSizeLowerBound indicates lower bound value of tidb_max_chunk_size. + maxChunkSizeLowerBound = 32 +) + +// ValidateSetSystemVar checks if system variable satisfies specific restriction. +func ValidateSetSystemVar(vars *SessionVars, name string, value string) (string, error) { + if strings.EqualFold(value, "DEFAULT") { + if val := GetSysVar(name); val != nil { + return val.Value, nil + } + return value, ErrUnknownSystemVar.GenWithStackByArgs(name) + } + switch name { + case ConnectTimeout: + return checkUInt64SystemVar(name, value, 2, secondsPerYear, vars) + case DefaultWeekFormat: + return checkUInt64SystemVar(name, value, 0, 7, vars) + case DelayKeyWrite: + if strings.EqualFold(value, "ON") || value == "1" { + return "ON", nil + } else if strings.EqualFold(value, "OFF") || value == "0" { + return "OFF", nil + } else if strings.EqualFold(value, "ALL") || value == "2" { + return "ALL", nil + } + return value, ErrWrongValueForVar.GenWithStackByArgs(name, value) + case FlushTime: + return checkUInt64SystemVar(name, value, 0, secondsPerYear, vars) + case ForeignKeyChecks: + if strings.EqualFold(value, "ON") || value == "1" { + // TiDB does not yet support foreign keys. + // For now, resist the change and show a warning. + vars.StmtCtx.AppendWarning(ErrUnsupportedValueForVar.GenWithStackByArgs(name, value)) + return "OFF", nil + } else if strings.EqualFold(value, "OFF") || value == "0" { + return "OFF", nil + } + return value, ErrWrongValueForVar.GenWithStackByArgs(name, value) + case GroupConcatMaxLen: + // The reasonable range of 'group_concat_max_len' is 4~18446744073709551615(64-bit platforms) + // See https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_group_concat_max_len for details + return checkUInt64SystemVar(name, value, 4, math.MaxUint64, vars) + case InteractiveTimeout: + return checkUInt64SystemVar(name, value, 1, secondsPerYear, vars) + case InnodbCommitConcurrency: + return checkUInt64SystemVar(name, value, 0, 1000, vars) + case InnodbFastShutdown: + return checkUInt64SystemVar(name, value, 0, 2, vars) + case InnodbLockWaitTimeout: + return checkUInt64SystemVar(name, value, 1, 1073741824, vars) + // See "https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_max_allowed_packet" + case MaxAllowedPacket: + return checkUInt64SystemVar(name, value, 1024, MaxOfMaxAllowedPacket, vars) + case MaxConnections: + return checkUInt64SystemVar(name, value, 1, 100000, vars) + case MaxConnectErrors: + return checkUInt64SystemVar(name, value, 1, math.MaxUint64, vars) + case MaxSortLength: + return checkUInt64SystemVar(name, value, 4, 8388608, vars) + case MaxSpRecursionDepth: + return checkUInt64SystemVar(name, value, 0, 255, vars) + case MaxUserConnections: + return checkUInt64SystemVar(name, value, 0, 4294967295, vars) + case OldPasswords: + return checkUInt64SystemVar(name, value, 0, 2, vars) + case TiDBMaxDeltaSchemaCount: + return checkInt64SystemVar(name, value, 100, 16384, vars) + case SessionTrackGtids: + if strings.EqualFold(value, "OFF") || value == "0" { + return "OFF", nil + } else if strings.EqualFold(value, "OWN_GTID") || value == "1" { + return "OWN_GTID", nil + } else if strings.EqualFold(value, "ALL_GTIDS") || value == "2" { + return "ALL_GTIDS", nil + } + return value, ErrWrongValueForVar.GenWithStackByArgs(name, value) + case SQLSelectLimit: + return checkUInt64SystemVar(name, value, 0, math.MaxUint64, vars) + case SyncBinlog: + return checkUInt64SystemVar(name, value, 0, 4294967295, vars) + case TableDefinitionCache: + return checkUInt64SystemVar(name, value, 400, 524288, vars) + case TmpTableSize: + return checkUInt64SystemVar(name, value, 1024, math.MaxUint64, vars) + case WaitTimeout: + return checkUInt64SystemVar(name, value, 0, 31536000, vars) + case MaxPreparedStmtCount: + return checkInt64SystemVar(name, value, -1, 1048576, vars) + case ValidatePasswordLength, ValidatePasswordNumberCount: + return checkUInt64SystemVar(name, value, 0, math.MaxUint64, vars) + case WarningCount, ErrorCount: + return value, ErrReadOnly.GenWithStackByArgs(name) + case EnforceGtidConsistency: + if strings.EqualFold(value, "OFF") || value == "0" { + return "OFF", nil + } else if strings.EqualFold(value, "ON") || value == "1" { + return "ON", nil + } else if strings.EqualFold(value, "WARN") || value == "2" { + return "WARN", nil + } + return value, ErrWrongValueForVar.GenWithStackByArgs(name, value) + case QueryCacheType: + if strings.EqualFold(value, "OFF") || value == "0" { + return "OFF", nil + } else if strings.EqualFold(value, "ON") || value == "1" { + return "ON", nil + } else if strings.EqualFold(value, "DEMAND") || value == "2" { + return "DEMAND", nil + } + return value, ErrWrongValueForVar.GenWithStackByArgs(name, value) + case SecureAuth: + if strings.EqualFold(value, "ON") || value == "1" { + return "1", nil + } + return value, ErrWrongValueForVar.GenWithStackByArgs(name, value) + case TiDBSkipUTF8Check, TiDBOptAggPushDown, TiDBOptInSubqToJoinAndAgg, + TiDBEnableCascadesPlanner, TiDBEnableNoopFuncs, + TiDBScatterRegion, TiDBGeneralLog, TiDBConstraintCheckInPlace, TiDBEnableVectorizedExpression: + fallthrough + case GeneralLog, AvoidTemporalUpgrade, BigTables, CheckProxyUsers, LogBin, + CoreFile, EndMakersInJSON, SQLLogBin, OfflineMode, PseudoSlaveMode, LowPriorityUpdates, + SkipNameResolve, SQLSafeUpdates, serverReadOnly, SlaveAllowBatching, + Flush, PerformanceSchema, LocalInFile, ShowOldTemporals, KeepFilesOnCreate, AutoCommit, + SQLWarnings, UniqueChecks, OldAlterTable, LogBinTrustFunctionCreators, SQLBigSelects, + BinlogDirectNonTransactionalUpdates, SQLQuoteShowCreate, AutomaticSpPrivileges, + RelayLogPurge, SQLAutoIsNull, QueryCacheWlockInvalidate, ValidatePasswordCheckUserName, + SuperReadOnly, BinlogOrderCommits, MasterVerifyChecksum, BinlogRowQueryLogEvents, LogSlowSlaveStatements, + LogSlowAdminStatements, LogQueriesNotUsingIndexes, Profiling: + if strings.EqualFold(value, "ON") { + return "1", nil + } else if strings.EqualFold(value, "OFF") { + return "0", nil + } + val, err := strconv.ParseInt(value, 10, 64) + if err == nil { + if val == 0 { + return "0", nil + } else if val == 1 { + return "1", nil + } + } + return value, ErrWrongValueForVar.GenWithStackByArgs(name, value) + case MyISAMUseMmap, InnodbTableLocks, InnodbStatusOutput, InnodbAdaptiveFlushing, InnodbRandomReadAhead, + InnodbStatsPersistent, InnodbBufferPoolLoadAbort, InnodbBufferPoolLoadNow, InnodbBufferPoolDumpNow, + InnodbCmpPerIndexEnabled, InnodbFilePerTable, InnodbPrintAllDeadlocks, + InnodbStrictMode, InnodbAdaptiveHashIndex, InnodbFtEnableStopword, InnodbStatusOutputLocks: + if strings.EqualFold(value, "ON") { + return "1", nil + } else if strings.EqualFold(value, "OFF") { + return "0", nil + } + val, err := strconv.ParseInt(value, 10, 64) + if err == nil { + if val == 1 || val < 0 { + return "1", nil + } else if val == 0 { + return "0", nil + } + } + return value, ErrWrongValueForVar.GenWithStackByArgs(name, value) + case MaxExecutionTime: + return checkUInt64SystemVar(name, value, 0, math.MaxUint64, vars) + case ThreadPoolSize: + return checkUInt64SystemVar(name, value, 1, 64, vars) + case TiDBDDLReorgBatchSize: + return checkUInt64SystemVar(name, value, uint64(MinDDLReorgBatchSize), uint64(MaxDDLReorgBatchSize), vars) + case TiDBDDLErrorCountLimit: + return checkUInt64SystemVar(name, value, uint64(0), math.MaxInt64, vars) + case TiDBIndexLookupConcurrency, TiDBIndexLookupJoinConcurrency, + TiDBIndexLookupSize, + TiDBHashJoinConcurrency, + TiDBHashAggPartialConcurrency, + TiDBHashAggFinalConcurrency, + TiDBDistSQLScanConcurrency, + TiDBIndexSerialScanConcurrency, TiDBDDLReorgWorkerCount, + TiDBBackoffLockFast, TiDBBackOffWeight: + v, err := strconv.Atoi(value) + if err != nil { + return value, ErrWrongTypeForVar.GenWithStackByArgs(name) + } + if v <= 0 { + return value, ErrWrongValueForVar.GenWithStackByArgs(name, value) + } + return value, nil + case TiDBOptCorrelationExpFactor: + v, err := strconv.Atoi(value) + if err != nil { + return value, ErrWrongTypeForVar.GenWithStackByArgs(name) + } + if v < 0 { + return value, ErrWrongValueForVar.GenWithStackByArgs(name, value) + } + return value, nil + case TiDBOptCorrelationThreshold: + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return value, ErrWrongTypeForVar.GenWithStackByArgs(name) + } + if v < 0 || v > 1 { + return value, ErrWrongValueForVar.GenWithStackByArgs(name, value) + } + return value, nil + case TiDBOptCPUFactor, + TiDBOptCopCPUFactor, + TiDBOptNetworkFactor, + TiDBOptScanFactor, + TiDBOptDescScanFactor, + TiDBOptSeekFactor, + TiDBOptMemoryFactor, + TiDBOptDiskFactor, + TiDBOptConcurrencyFactor: + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return value, ErrWrongTypeForVar.GenWithStackByArgs(name) + } + if v < 0 { + return value, ErrWrongValueForVar.GenWithStackByArgs(name, value) + } + return value, nil + case TiDBProjectionConcurrency: + _, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return value, ErrWrongValueForVar.GenWithStackByArgs(name) + } + return value, nil + case TxnIsolation, TransactionIsolation: + upVal := strings.ToUpper(value) + _, exists := TxIsolationNames[upVal] + if !exists { + return "", ErrWrongValueForVar.GenWithStackByArgs(name, value) + } + switch upVal { + case "SERIALIZABLE", "READ-UNCOMMITTED": + skipIsolationLevelCheck, err := GetSessionSystemVar(vars, TiDBSkipIsolationLevelCheck) + returnErr := ErrUnsupportedIsolationLevel.GenWithStackByArgs(value) + if err != nil { + returnErr = err + } + if !TiDBOptOn(skipIsolationLevelCheck) || err != nil { + return "", returnErr + } + //SET TRANSACTION ISOLATION LEVEL will affect two internal variables: + // 1. tx_isolation + // 2. transaction_isolation + // The following if condition is used to deduplicate two same warnings. + if name == "transaction_isolation" { + vars.StmtCtx.AppendWarning(returnErr) + } + } + return upVal, nil + case TiDBInitChunkSize: + v, err := strconv.Atoi(value) + if err != nil { + return value, ErrWrongTypeForVar.GenWithStackByArgs(name) + } + if v <= 0 { + return value, ErrWrongValueForVar.GenWithStackByArgs(name, value) + } + if v > initChunkSizeUpperBound { + return value, errors.Errorf("tidb_init_chunk_size(%d) cannot be bigger than %d", v, initChunkSizeUpperBound) + } + return value, nil + case TiDBMaxChunkSize: + v, err := strconv.Atoi(value) + if err != nil { + return value, ErrWrongTypeForVar.GenWithStackByArgs(name) + } + if v < maxChunkSizeLowerBound { + return value, errors.Errorf("tidb_max_chunk_size(%d) cannot be smaller than %d", v, maxChunkSizeLowerBound) + } + return value, nil + case TiDBOptJoinReorderThreshold: + v, err := strconv.Atoi(value) + if err != nil { + return value, ErrWrongTypeForVar.GenWithStackByArgs(name) + } + if v < 0 || v >= 64 { + return value, errors.Errorf("tidb_join_order_algo_threshold(%d) cannot be smaller than 0 or larger than 63", v) + } + case TiDBWaitSplitRegionTimeout: + v, err := strconv.Atoi(value) + if err != nil { + return value, ErrWrongTypeForVar.GenWithStackByArgs(name) + } + if v <= 0 { + return value, errors.Errorf("tidb_wait_split_region_timeout(%d) cannot be smaller than 1", v) + } + case TiDBReplicaRead: + if strings.EqualFold(value, "follower") { + return "follower", nil + } else if strings.EqualFold(value, "leader") || len(value) == 0 { + return "leader", nil + } + return value, ErrWrongValueForVar.GenWithStackByArgs(name, value) + case TiDBAllowRemoveAutoInc: + switch { + case strings.EqualFold(value, "ON") || value == "1": + return "on", nil + case strings.EqualFold(value, "OFF") || value == "0": + return "off", nil + } + return value, ErrWrongValueForVar.GenWithStackByArgs(name, value) + } + return value, nil +} + +// TiDBOptOn could be used for all tidb session variable options, we use "ON"/1 to turn on those options. +func TiDBOptOn(opt string) bool { + return strings.EqualFold(opt, "ON") || opt == "1" +} + +func tidbOptPositiveInt32(opt string, defaultVal int) int { + val, err := strconv.Atoi(opt) + if err != nil || val <= 0 { + return defaultVal + } + return val +} + +func tidbOptInt64(opt string, defaultVal int64) int64 { + val, err := strconv.ParseInt(opt, 10, 64) + if err != nil { + return defaultVal + } + return val +} + +func tidbOptFloat64(opt string, defaultVal float64) float64 { + val, err := strconv.ParseFloat(opt, 64) + if err != nil { + return defaultVal + } + return val +} + +// GoTimeToTS converts a Go time to uint64 timestamp. +func GoTimeToTS(t time.Time) uint64 { + ts := (t.UnixNano() / int64(time.Millisecond)) << epochShiftBits + return uint64(ts) +} diff --git a/sessionctx/variable/varsutil_test.go b/sessionctx/variable/varsutil_test.go new file mode 100644 index 0000000..982ca6b --- /dev/null +++ b/sessionctx/variable/varsutil_test.go @@ -0,0 +1,170 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package variable + +import ( + "reflect" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/util/testleak" +) + +var _ = Suite(&testVarsutilSuite{}) + +type testVarsutilSuite struct { +} + +func (s *testVarsutilSuite) TestTiDBOptOn(c *C) { + defer testleak.AfterTest(c)() + tbl := []struct { + val string + on bool + }{ + {"ON", true}, + {"on", true}, + {"On", true}, + {"1", true}, + {"off", false}, + {"No", false}, + {"0", false}, + {"1.1", false}, + {"", false}, + } + for _, t := range tbl { + on := TiDBOptOn(t.val) + c.Assert(on, Equals, t.on) + } +} + +func (s *testVarsutilSuite) TestNewSessionVars(c *C) { + defer testleak.AfterTest(c)() + vars := NewSessionVars() + + c.Assert(vars.IndexLookupSize, Equals, DefIndexLookupSize) + c.Assert(vars.IndexLookupConcurrency, Equals, DefIndexLookupConcurrency) + c.Assert(vars.IndexSerialScanConcurrency, Equals, DefIndexSerialScanConcurrency) + c.Assert(vars.IndexLookupJoinConcurrency, Equals, DefIndexLookupJoinConcurrency) + c.Assert(vars.HashJoinConcurrency, Equals, DefTiDBHashJoinConcurrency) + c.Assert(vars.ProjectionConcurrency, Equals, int64(DefTiDBProjectionConcurrency)) + c.Assert(vars.HashAggPartialConcurrency, Equals, DefTiDBHashAggPartialConcurrency) + c.Assert(vars.HashAggFinalConcurrency, Equals, DefTiDBHashAggFinalConcurrency) + c.Assert(vars.DistSQLScanConcurrency, Equals, DefDistSQLScanConcurrency) + c.Assert(vars.MaxChunkSize, Equals, DefMaxChunkSize) + c.Assert(vars.EnableRadixJoin, Equals, DefTiDBUseRadixJoin) + c.Assert(vars.AllowWriteRowID, Equals, DefOptWriteRowID) + c.Assert(vars.TiDBOptJoinReorderThreshold, Equals, DefTiDBOptJoinReorderThreshold) + + assertFieldsGreaterThanZero(c, reflect.ValueOf(vars.Concurrency)) + assertFieldsGreaterThanZero(c, reflect.ValueOf(vars.BatchSize)) +} + +func assertFieldsGreaterThanZero(c *C, val reflect.Value) { + for i := 0; i < val.NumField(); i++ { + fieldVal := val.Field(i) + c.Assert(fieldVal.Int(), Greater, int64(0)) + } +} + +func (s *testVarsutilSuite) TestSetOverflowBehave(c *C) { + ddRegWorker := maxDDLReorgWorkerCount + 1 + SetDDLReorgWorkerCounter(ddRegWorker) + c.Assert(maxDDLReorgWorkerCount, Equals, GetDDLReorgWorkerCounter()) + + ddlReorgBatchSize := MaxDDLReorgBatchSize + 1 + SetDDLReorgBatchSize(ddlReorgBatchSize) + c.Assert(MaxDDLReorgBatchSize, Equals, GetDDLReorgBatchSize()) + ddlReorgBatchSize = MinDDLReorgBatchSize - 1 + SetDDLReorgBatchSize(ddlReorgBatchSize) + c.Assert(MinDDLReorgBatchSize, Equals, GetDDLReorgBatchSize()) + + val := tidbOptInt64("a", 1) + c.Assert(val, Equals, int64(1)) + val2 := tidbOptFloat64("b", 1.2) + c.Assert(val2, Equals, 1.2) +} + +func (s *testVarsutilSuite) TestValidate(c *C) { + v := NewSessionVars() + v.GlobalVarsAccessor = NewMockGlobalAccessor() + v.TimeZone = time.UTC + + tests := []struct { + key string + value string + error bool + }{ + {DelayKeyWrite, "ON", false}, + {DelayKeyWrite, "OFF", false}, + {DelayKeyWrite, "ALL", false}, + {DelayKeyWrite, "3", true}, + {ForeignKeyChecks, "3", true}, + {MaxSpRecursionDepth, "256", false}, + {SessionTrackGtids, "OFF", false}, + {SessionTrackGtids, "OWN_GTID", false}, + {SessionTrackGtids, "ALL_GTIDS", false}, + {SessionTrackGtids, "ON", true}, + {EnforceGtidConsistency, "OFF", false}, + {EnforceGtidConsistency, "ON", false}, + {EnforceGtidConsistency, "WARN", false}, + {QueryCacheType, "OFF", false}, + {QueryCacheType, "ON", false}, + {QueryCacheType, "DEMAND", false}, + {QueryCacheType, "3", true}, + {SecureAuth, "1", false}, + {SecureAuth, "3", true}, + {MyISAMUseMmap, "ON", false}, + {MyISAMUseMmap, "OFF", false}, + {TiDBOptCorrelationExpFactor, "a", true}, + {TiDBOptCorrelationExpFactor, "-10", true}, + {TiDBOptCorrelationThreshold, "a", true}, + {TiDBOptCorrelationThreshold, "-2", true}, + {TiDBOptCPUFactor, "a", true}, + {TiDBOptCPUFactor, "-2", true}, + {TiDBOptCopCPUFactor, "a", true}, + {TiDBOptCopCPUFactor, "-2", true}, + {TiDBOptNetworkFactor, "a", true}, + {TiDBOptNetworkFactor, "-2", true}, + {TiDBOptScanFactor, "a", true}, + {TiDBOptScanFactor, "-2", true}, + {TiDBOptDescScanFactor, "a", true}, + {TiDBOptDescScanFactor, "-2", true}, + {TiDBOptSeekFactor, "a", true}, + {TiDBOptSeekFactor, "-2", true}, + {TiDBOptMemoryFactor, "a", true}, + {TiDBOptMemoryFactor, "-2", true}, + {TiDBOptDiskFactor, "a", true}, + {TiDBOptDiskFactor, "-2", true}, + {TiDBOptConcurrencyFactor, "a", true}, + {TiDBOptConcurrencyFactor, "-2", true}, + {TxnIsolation, "READ-UNCOMMITTED", true}, + {TiDBInitChunkSize, "a", true}, + {TiDBInitChunkSize, "-1", true}, + {TiDBMaxChunkSize, "a", true}, + {TiDBMaxChunkSize, "-1", true}, + {TiDBOptJoinReorderThreshold, "a", true}, + {TiDBOptJoinReorderThreshold, "-1", true}, + {TiDBReplicaRead, "invalid", true}, + } + + for _, t := range tests { + _, err := ValidateSetSystemVar(v, t.key, t.value) + if t.error { + c.Assert(err, NotNil, Commentf("%v got err=%v", t, err)) + } else { + c.Assert(err, IsNil, Commentf("%v got err=%v", t, err)) + } + } + +} diff --git a/statistics/builder.go b/statistics/builder.go new file mode 100644 index 0000000..ff75383 --- /dev/null +++ b/statistics/builder.go @@ -0,0 +1,167 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package statistics + +import ( + "github.com/pingcap/errors" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" +) + +// SortedBuilder is used to build histograms for PK and index. +type SortedBuilder struct { + sc *stmtctx.StatementContext + numBuckets int64 + valuesPerBucket int64 + lastNumber int64 + bucketIdx int64 + Count int64 + hist *Histogram +} + +// NewSortedBuilder creates a new SortedBuilder. +func NewSortedBuilder(sc *stmtctx.StatementContext, numBuckets, id int64, tp *types.FieldType) *SortedBuilder { + return &SortedBuilder{ + sc: sc, + numBuckets: numBuckets, + valuesPerBucket: 1, + hist: NewHistogram(id, 0, 0, 0, tp, int(numBuckets), 0), + } +} + +// Hist returns the histogram built by SortedBuilder. +func (b *SortedBuilder) Hist() *Histogram { + return b.hist +} + +// Iterate updates the histogram incrementally. +func (b *SortedBuilder) Iterate(data types.Datum) error { + b.Count++ + if b.Count == 1 { + b.hist.AppendBucket(&data, &data, 1, 1) + b.hist.NDV = 1 + return nil + } + cmp, err := b.hist.GetUpper(int(b.bucketIdx)).CompareDatum(b.sc, &data) + if err != nil { + return errors.Trace(err) + } + if cmp == 0 { + // The new item has the same value as current bucket value, to ensure that + // a same value only stored in a single bucket, we do not increase bucketIdx even if it exceeds + // valuesPerBucket. + b.hist.Buckets[b.bucketIdx].Count++ + b.hist.Buckets[b.bucketIdx].Repeat++ + } else if b.hist.Buckets[b.bucketIdx].Count+1-b.lastNumber <= b.valuesPerBucket { + // The bucket still have room to store a new item, update the bucket. + b.hist.updateLastBucket(&data, b.hist.Buckets[b.bucketIdx].Count+1, 1) + b.hist.NDV++ + } else { + // All buckets are full, we should merge buckets. + if b.bucketIdx+1 == b.numBuckets { + b.hist.mergeBuckets(int(b.bucketIdx)) + b.valuesPerBucket *= 2 + b.bucketIdx = b.bucketIdx / 2 + if b.bucketIdx == 0 { + b.lastNumber = 0 + } else { + b.lastNumber = b.hist.Buckets[b.bucketIdx-1].Count + } + } + // We may merge buckets, so we should check it again. + if b.hist.Buckets[b.bucketIdx].Count+1-b.lastNumber <= b.valuesPerBucket { + b.hist.updateLastBucket(&data, b.hist.Buckets[b.bucketIdx].Count+1, 1) + } else { + b.lastNumber = b.hist.Buckets[b.bucketIdx].Count + b.bucketIdx++ + b.hist.AppendBucket(&data, &data, b.lastNumber+1, 1) + } + b.hist.NDV++ + } + return nil +} + +// BuildColumnHist build a histogram for a column. +// numBuckets: number of buckets for the histogram. +// id: the id of the table. +// collector: the collector of samples. +// tp: the FieldType for the column. +// count: represents the row count for the column. +// ndv: represents the number of distinct values for the column. +// nullCount: represents the number of null values for the column. +func BuildColumnHist(ctx sessionctx.Context, numBuckets, id int64, collector *SampleCollector, tp *types.FieldType, count int64, ndv int64, nullCount int64) (*Histogram, error) { + if ndv > count { + ndv = count + } + if count == 0 || len(collector.Samples) == 0 { + return NewHistogram(id, ndv, nullCount, 0, tp, 0, collector.TotalSize), nil + } + sc := ctx.GetSessionVars().StmtCtx + samples := collector.Samples + err := SortSampleItems(sc, samples) + if err != nil { + return nil, err + } + hg := NewHistogram(id, ndv, nullCount, 0, tp, int(numBuckets), collector.TotalSize) + + sampleNum := int64(len(samples)) + // As we use samples to build the histogram, the bucket number and repeat should multiply a factor. + sampleFactor := float64(count) / float64(len(samples)) + // Since bucket count is increased by sampleFactor, so the actual max values per bucket is + // floor(valuesPerBucket/sampleFactor)*sampleFactor, which may less than valuesPerBucket, + // thus we need to add a sampleFactor to avoid building too many buckets. + valuesPerBucket := float64(count)/float64(numBuckets) + sampleFactor + ndvFactor := float64(count) / float64(hg.NDV) + if ndvFactor > sampleFactor { + ndvFactor = sampleFactor + } + bucketIdx := 0 + var lastCount int64 + var corrXYSum float64 + hg.AppendBucket(&samples[0].Value, &samples[0].Value, int64(sampleFactor), int64(ndvFactor)) + for i := int64(1); i < sampleNum; i++ { + corrXYSum += float64(i) * float64(samples[i].Ordinal) + cmp, err := hg.GetUpper(bucketIdx).CompareDatum(sc, &samples[i].Value) + if err != nil { + return nil, errors.Trace(err) + } + totalCount := float64(i+1) * sampleFactor + if cmp == 0 { + // The new item has the same value as current bucket value, to ensure that + // a same value only stored in a single bucket, we do not increase bucketIdx even if it exceeds + // valuesPerBucket. + hg.Buckets[bucketIdx].Count = int64(totalCount) + if float64(hg.Buckets[bucketIdx].Repeat) == ndvFactor { + hg.Buckets[bucketIdx].Repeat = int64(2 * sampleFactor) + } else { + hg.Buckets[bucketIdx].Repeat += int64(sampleFactor) + } + } else if totalCount-float64(lastCount) <= valuesPerBucket { + // The bucket still have room to store a new item, update the bucket. + hg.updateLastBucket(&samples[i].Value, int64(totalCount), int64(ndvFactor)) + } else { + lastCount = hg.Buckets[bucketIdx].Count + // The bucket is full, store the item in the next bucket. + bucketIdx++ + hg.AppendBucket(&samples[i].Value, &samples[i].Value, int64(totalCount), int64(ndvFactor)) + } + } + return hg, nil +} + +// BuildColumn builds histogram from samples for column. +func BuildColumn(ctx sessionctx.Context, numBuckets, id int64, collector *SampleCollector, tp *types.FieldType) (*Histogram, error) { + return BuildColumnHist(ctx, numBuckets, id, collector, tp, collector.Count, collector.FMSketch.NDV(), collector.NullCount) +} diff --git a/statistics/cmsketch.go b/statistics/cmsketch.go new file mode 100644 index 0000000..d1e011b --- /dev/null +++ b/statistics/cmsketch.go @@ -0,0 +1,173 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package statistics + +import ( + "reflect" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tipb/go-tipb" + "github.com/spaolacci/murmur3" +) + +// CMSketch is used to estimate point queries. +// Refer: https://en.wikipedia.org/wiki/Count-min_sketch +type CMSketch struct { + depth int32 + width int32 + count uint64 + table [][]uint32 +} + +// NewCMSketch returns a new CM sketch. +func NewCMSketch(d, w int32) *CMSketch { + tbl := make([][]uint32, d) + for i := range tbl { + tbl[i] = make([]uint32, w) + } + return &CMSketch{depth: d, width: w, table: tbl} +} + +// InsertBytes inserts the bytes value into the CM Sketch. +func (c *CMSketch) InsertBytes(bytes []byte) { + c.insertBytesByCount(bytes, 1) +} + +// insertBytesByCount adds the bytes value into the TopN (if value already in TopN) or CM Sketch by delta, this does not updates c.defaultValue. +func (c *CMSketch) insertBytesByCount(bytes []byte, count uint64) { + // TODO: implement the insert method. +} + +func (c *CMSketch) queryValue(sc *stmtctx.StatementContext, val types.Datum) (uint64, error) { + bytes, err := tablecodec.EncodeValue(sc, nil, val) + if err != nil { + return 0, errors.Trace(err) + } + return c.QueryBytes(bytes), nil +} + +// QueryBytes is used to query the count of specified bytes. +func (c *CMSketch) QueryBytes(d []byte) uint64 { + h1, h2 := murmur3.Sum128(d) + return c.queryHashValue(h1, h2) +} + +func (c *CMSketch) queryHashValue(h1, h2 uint64) uint64 { + // TODO: implement the query method. + return uint64(0) +} + +// MergeCMSketch merges two CM Sketch. +func (c *CMSketch) MergeCMSketch(rc *CMSketch) error { + if c == nil || rc == nil { + return nil + } + if c.depth != rc.depth || c.width != rc.width { + return errors.New("Dimensions of Count-Min Sketch should be the same") + } + c.count += rc.count + for i := range c.table { + for j := range c.table[i] { + c.table[i][j] += rc.table[i][j] + } + } + return nil +} + +// CMSketchToProto converts CMSketch to its protobuf representation. +func CMSketchToProto(c *CMSketch) *tipb.CMSketch { + protoSketch := &tipb.CMSketch{Rows: make([]*tipb.CMSketchRow, c.depth)} + for i := range c.table { + protoSketch.Rows[i] = &tipb.CMSketchRow{Counters: make([]uint32, c.width)} + for j := range c.table[i] { + protoSketch.Rows[i].Counters[j] = c.table[i][j] + } + } + return protoSketch +} + +// CMSketchFromProto converts CMSketch from its protobuf representation. +func CMSketchFromProto(protoSketch *tipb.CMSketch) *CMSketch { + if protoSketch == nil { + return nil + } + c := NewCMSketch(int32(len(protoSketch.Rows)), int32(len(protoSketch.Rows[0].Counters))) + for i, row := range protoSketch.Rows { + c.count = 0 + for j, counter := range row.Counters { + c.table[i][j] = counter + c.count = c.count + uint64(counter) + } + } + return c +} + +// EncodeCMSketch encodes the given CMSketch to byte slice. +// Note that it does not include the topN. +func EncodeCMSketch(c *CMSketch) ([]byte, error) { + if c == nil { + return nil, nil + } + p := CMSketchToProto(c) + p.TopN = nil + protoData, err := p.Marshal() + return protoData, err +} + +// DecodeCMSketch decode a CMSketch from the given byte slice. +func DecodeCMSketch(data []byte) (*CMSketch, error) { + if data == nil { + return nil, nil + } + p := &tipb.CMSketch{} + err := p.Unmarshal(data) + if err != nil { + return nil, errors.Trace(err) + } + if len(p.Rows) == 0 { + return nil, nil + } + return CMSketchFromProto(p), nil +} + +// TotalCount returns the total count in the sketch, it is only used for test. +func (c *CMSketch) TotalCount() uint64 { + return c.count +} + +// Equal tests if two CM Sketch equal, it is only used for test. +func (c *CMSketch) Equal(rc *CMSketch) bool { + return reflect.DeepEqual(c, rc) +} + +// Copy makes a copy for current CMSketch. +func (c *CMSketch) Copy() *CMSketch { + if c == nil { + return nil + } + tbl := make([][]uint32, c.depth) + for i := range tbl { + tbl[i] = make([]uint32, c.width) + copy(tbl[i], c.table[i]) + } + return &CMSketch{count: c.count, width: c.width, depth: c.depth, table: tbl} +} + +// GetWidthAndDepth returns the width and depth of CM Sketch. +func (c *CMSketch) GetWidthAndDepth() (int32, int32) { + return c.width, c.depth +} diff --git a/statistics/cmsketch_test.go b/statistics/cmsketch_test.go new file mode 100644 index 0000000..58c688f --- /dev/null +++ b/statistics/cmsketch_test.go @@ -0,0 +1,134 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package statistics + +import ( + "math" + "math/rand" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/codec" +) + +func (c *CMSketch) insert(val *types.Datum) error { + bytes, err := codec.EncodeValue(nil, nil, *val) + if err != nil { + return errors.Trace(err) + } + c.InsertBytes(bytes) + return nil +} + +// buildCMSketchAndMapWithOffset builds cm sketch using zipf and the generated values starts from `offset`. +func buildCMSketchAndMapWithOffset(d, w int32, seed int64, total, imax uint64, s float64, offset int64) (*CMSketch, map[int64]uint32, error) { + cms := NewCMSketch(d, w) + mp := make(map[int64]uint32) + zipf := rand.NewZipf(rand.New(rand.NewSource(seed)), s, 1, imax) + for i := uint64(0); i < total; i++ { + val := types.NewIntDatum(int64(zipf.Uint64()) + offset) + err := cms.insert(&val) + if err != nil { + return nil, nil, errors.Trace(err) + } + mp[val.GetInt64()]++ + } + return cms, mp, nil +} + +func buildCMSketchAndMap(d, w int32, seed int64, total, imax uint64, s float64) (*CMSketch, map[int64]uint32, error) { + return buildCMSketchAndMapWithOffset(d, w, seed, total, imax, s, 0) +} + +func averageAbsoluteError(cms *CMSketch, mp map[int64]uint32) (uint64, error) { + sc := &stmtctx.StatementContext{TimeZone: time.Local} + var total uint64 + for num, count := range mp { + estimate, err := cms.queryValue(sc, types.NewIntDatum(num)) + if err != nil { + return 0, errors.Trace(err) + } + var diff uint64 + if uint64(count) > estimate { + diff = uint64(count) - estimate + } else { + diff = estimate - uint64(count) + } + total += uint64(diff) + } + return total / uint64(len(mp)), nil +} + +func (s *testStatisticsSuite) TestCMSketch(c *C) { + tests := []struct { + zipfFactor float64 + avgError uint64 + }{ + { + zipfFactor: 1.1, + avgError: 3, + }, + { + zipfFactor: 2, + avgError: 24, + }, + { + zipfFactor: 3, + avgError: 63, + }, + } + d, w := int32(5), int32(2048) + total, imax := uint64(100000), uint64(1000000) + for _, t := range tests { + lSketch, lMap, err := buildCMSketchAndMap(d, w, 0, total, imax, t.zipfFactor) + c.Check(err, IsNil) + avg, err := averageAbsoluteError(lSketch, lMap) + c.Assert(err, IsNil) + c.Check(avg, LessEqual, t.avgError) + + rSketch, rMap, err := buildCMSketchAndMap(d, w, 1, total, imax, t.zipfFactor) + c.Check(err, IsNil) + avg, err = averageAbsoluteError(rSketch, rMap) + c.Assert(err, IsNil) + c.Check(avg, LessEqual, t.avgError) + + err = lSketch.MergeCMSketch(rSketch) + c.Assert(err, IsNil) + for val, count := range rMap { + lMap[val] += count + } + avg, err = averageAbsoluteError(lSketch, lMap) + c.Assert(err, IsNil) + c.Check(avg, Less, t.avgError*2) + } +} + +func (s *testStatisticsSuite) TestCMSketchCoding(c *C) { + lSketch := NewCMSketch(5, 2048) + lSketch.count = 2048 * math.MaxUint32 + for i := range lSketch.table { + for j := range lSketch.table[i] { + lSketch.table[i][j] = math.MaxUint32 + } + } + bytes, err := EncodeCMSketch(lSketch) + c.Assert(err, IsNil) + c.Assert(len(bytes), Equals, 61457) + rSketch, err := DecodeCMSketch(bytes) + c.Assert(err, IsNil) + c.Assert(lSketch.Equal(rSketch), IsTrue) +} diff --git a/statistics/fmsketch.go b/statistics/fmsketch.go new file mode 100644 index 0000000..50c496b --- /dev/null +++ b/statistics/fmsketch.go @@ -0,0 +1,124 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package statistics + +import ( + "hash" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/codec" + "github.com/pingcap/tipb/go-tipb" + "github.com/spaolacci/murmur3" +) + +// FMSketch is used to count the number of distinct elements in a set. +type FMSketch struct { + hashset map[uint64]bool + mask uint64 + maxSize int + hashFunc hash.Hash64 +} + +// NewFMSketch returns a new FM sketch. +func NewFMSketch(maxSize int) *FMSketch { + return &FMSketch{ + hashset: make(map[uint64]bool), + maxSize: maxSize, + hashFunc: murmur3.New64(), + } +} + +// NDV returns the ndv of the sketch. +func (s *FMSketch) NDV() int64 { + return int64(s.mask+1) * int64(len(s.hashset)) +} + +func (s *FMSketch) insertHashValue(hashVal uint64) { + if (hashVal & s.mask) != 0 { + return + } + s.hashset[hashVal] = true + if len(s.hashset) > s.maxSize { + s.mask = s.mask*2 + 1 + for key := range s.hashset { + if (key & s.mask) != 0 { + delete(s.hashset, key) + } + } + } +} + +// InsertValue inserts a value into the FM sketch. +func (s *FMSketch) InsertValue(sc *stmtctx.StatementContext, value types.Datum) error { + bytes, err := codec.EncodeValue(sc, nil, value) + if err != nil { + return errors.Trace(err) + } + s.hashFunc.Reset() + _, err = s.hashFunc.Write(bytes) + if err != nil { + return errors.Trace(err) + } + s.insertHashValue(s.hashFunc.Sum64()) + return nil +} + +func buildFMSketch(sc *stmtctx.StatementContext, values []types.Datum, maxSize int) (*FMSketch, int64, error) { + s := NewFMSketch(maxSize) + for _, value := range values { + err := s.InsertValue(sc, value) + if err != nil { + return nil, 0, errors.Trace(err) + } + } + return s, s.NDV(), nil +} + +func (s *FMSketch) mergeFMSketch(rs *FMSketch) { + if s.mask < rs.mask { + s.mask = rs.mask + for key := range s.hashset { + if (key & s.mask) != 0 { + delete(s.hashset, key) + } + } + } + for key := range rs.hashset { + s.insertHashValue(key) + } +} + +// FMSketchToProto converts FMSketch to its protobuf representation. +func FMSketchToProto(s *FMSketch) *tipb.FMSketch { + protoSketch := new(tipb.FMSketch) + protoSketch.Mask = s.mask + for val := range s.hashset { + protoSketch.Hashset = append(protoSketch.Hashset, val) + } + return protoSketch +} + +// FMSketchFromProto converts FMSketch from its protobuf representation. +func FMSketchFromProto(protoSketch *tipb.FMSketch) *FMSketch { + sketch := &FMSketch{ + hashset: make(map[uint64]bool), + mask: protoSketch.Mask, + } + for _, val := range protoSketch.Hashset { + sketch.hashset[val] = true + } + return sketch +} diff --git a/statistics/fmsketch_test.go b/statistics/fmsketch_test.go new file mode 100644 index 0000000..fe6a384 --- /dev/null +++ b/statistics/fmsketch_test.go @@ -0,0 +1,76 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package statistics + +import ( + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" +) + +// extractSampleItemsDatums is for test purpose only to extract Datum slice +// from SampleItem slice. +func extractSampleItemsDatums(items []*SampleItem) []types.Datum { + datums := make([]types.Datum, len(items)) + for i, item := range items { + datums[i] = item.Value + } + return datums +} + +func (s *testStatisticsSuite) TestSketch(c *C) { + sc := &stmtctx.StatementContext{TimeZone: time.Local} + maxSize := 1000 + sampleSketch, ndv, err := buildFMSketch(sc, extractSampleItemsDatums(s.samples), maxSize) + c.Check(err, IsNil) + c.Check(ndv, Equals, int64(6232)) + + rcSketch, ndv, err := buildFMSketch(sc, s.rc.(*recordSet).data, maxSize) + c.Check(err, IsNil) + c.Check(ndv, Equals, int64(73344)) + + pkSketch, ndv, err := buildFMSketch(sc, s.pk.(*recordSet).data, maxSize) + c.Check(err, IsNil) + c.Check(ndv, Equals, int64(100480)) + + sampleSketch.mergeFMSketch(pkSketch) + sampleSketch.mergeFMSketch(rcSketch) + c.Check(sampleSketch.NDV(), Equals, int64(100480)) + + maxSize = 2 + sketch := NewFMSketch(maxSize) + sketch.insertHashValue(1) + sketch.insertHashValue(2) + c.Check(len(sketch.hashset), Equals, maxSize) + sketch.insertHashValue(4) + c.Check(len(sketch.hashset), LessEqual, maxSize) +} + +func (s *testStatisticsSuite) TestSketchProtoConversion(c *C) { + sc := &stmtctx.StatementContext{TimeZone: time.Local} + maxSize := 1000 + sampleSketch, ndv, err := buildFMSketch(sc, extractSampleItemsDatums(s.samples), maxSize) + c.Check(err, IsNil) + c.Check(ndv, Equals, int64(6232)) + + p := FMSketchToProto(sampleSketch) + f := FMSketchFromProto(p) + c.Assert(sampleSketch.mask, Equals, f.mask) + c.Assert(len(sampleSketch.hashset), Equals, len(f.hashset)) + for val := range sampleSketch.hashset { + c.Assert(f.hashset[val], IsTrue) + } +} diff --git a/statistics/handle.go b/statistics/handle.go new file mode 100644 index 0000000..73ee0eb --- /dev/null +++ b/statistics/handle.go @@ -0,0 +1,454 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package statistics + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/store/tikv/oracle" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/sqlexec" + atomic2 "go.uber.org/atomic" + "go.uber.org/zap" +) + +// statsCache caches the tables in memory for Handle. +type statsCache struct { + tables map[int64]*Table + // version is the latest version of cache. + version uint64 +} + +// Handle can update stats info periodically. +type Handle struct { + mu struct { + sync.Mutex + ctx sessionctx.Context + // pid2tid is the map from partition ID to table ID. + pid2tid map[int64]int64 + // schemaVersion is the version of information schema when `pid2tid` is built. + schemaVersion int64 + } + + // It can be read by multiply readers at the same time without acquire lock, but it can be + // written only after acquire the lock. + statsCache struct { + sync.Mutex + atomic.Value + } + + restrictedExec sqlexec.RestrictedSQLExecutor + + lease atomic2.Duration +} + +// Clear the statsCache, only for test. +func (h *Handle) Clear() { + h.mu.Lock() + h.statsCache.Store(statsCache{tables: make(map[int64]*Table)}) + h.mu.ctx.GetSessionVars().InitChunkSize = 1 + h.mu.ctx.GetSessionVars().MaxChunkSize = 1 + h.mu.ctx.GetSessionVars().ProjectionConcurrency = 0 + h.mu.Unlock() +} + +// NewHandle creates a Handle for update stats. +func NewHandle(ctx sessionctx.Context, lease time.Duration) *Handle { + handle := &Handle{} + handle.lease.Store(lease) + // It is safe to use it concurrently because the exec won't touch the ctx. + if exec, ok := ctx.(sqlexec.RestrictedSQLExecutor); ok { + handle.restrictedExec = exec + } + handle.mu.ctx = ctx + handle.statsCache.Store(statsCache{tables: make(map[int64]*Table)}) + return handle +} + +// Lease returns the stats lease. +func (h *Handle) Lease() time.Duration { + return h.lease.Load() +} + +// SetLease sets the stats lease. +func (h *Handle) SetLease(lease time.Duration) { + h.lease.Store(lease) +} + +// DurationToTS converts duration to timestamp. +func DurationToTS(d time.Duration) uint64 { + return oracle.ComposeTS(d.Nanoseconds()/int64(time.Millisecond), 0) +} + +// Update reads stats meta from store and updates the stats map. +func (h *Handle) Update(is infoschema.InfoSchema) error { + oldCache := h.statsCache.Load().(statsCache) + lastVersion := oldCache.version + // We need this because for two tables, the smaller version may write later than the one with larger version. + // Consider the case that there are two tables A and B, their version and commit time is (A0, A1) and (B0, B1), + // and A0 < B0 < B1 < A1. We will first read the stats of B, and update the lastVersion to B0, but we cannot read + // the table stats of A0 if we read stats that greater than lastVersion which is B0. + // We can read the stats if the diff between commit time and version is less than three lease. + offset := DurationToTS(3 * h.Lease()) + if oldCache.version >= offset { + lastVersion = lastVersion - offset + } else { + lastVersion = 0 + } + sql := fmt.Sprintf("SELECT version, table_id, modify_count, count from mysql.stats_meta where version > %d order by version", lastVersion) + rows, _, err := h.restrictedExec.ExecRestrictedSQL(sql) + if err != nil { + return errors.Trace(err) + } + + tables := make([]*Table, 0, len(rows)) + deletedTableIDs := make([]int64, 0, len(rows)) + for _, row := range rows { + version := row.GetUint64(0) + physicalID := row.GetInt64(1) + modifyCount := row.GetInt64(2) + count := row.GetInt64(3) + lastVersion = version + table, ok := is.TableByID(physicalID) + if !ok { + logutil.BgLogger().Debug("unknown physical ID in stats meta table, maybe it has been dropped", zap.Int64("ID", physicalID)) + deletedTableIDs = append(deletedTableIDs, physicalID) + continue + } + tableInfo := table.Meta() + tbl, err := h.tableStatsFromStorage(tableInfo, physicalID) + // Error is not nil may mean that there are some ddl changes on this table, we will not update it. + if err != nil { + logutil.BgLogger().Debug("error occurred when read table stats", zap.String("table", tableInfo.Name.O), zap.Error(err)) + continue + } + if tbl == nil { + deletedTableIDs = append(deletedTableIDs, physicalID) + continue + } + tbl.Version = version + tbl.Count = count + tbl.ModifyCount = modifyCount + tbl.Name = getFullTableName(is, tableInfo) + tables = append(tables, tbl) + } + h.updateStatsCache(oldCache.update(tables, deletedTableIDs, lastVersion)) + return nil +} + +func getFullTableName(is infoschema.InfoSchema, tblInfo *model.TableInfo) string { + for _, schema := range is.AllSchemas() { + if t, err := is.TableByName(schema.Name, tblInfo.Name); err == nil { + if t.Meta().ID == tblInfo.ID { + return schema.Name.O + "." + tblInfo.Name.O + } + } + } + return fmt.Sprintf("%d", tblInfo.ID) +} + +// GetTableStats retrieves the statistics table from cache, and the cache will be updated by a goroutine. +func (h *Handle) GetTableStats(tblInfo *model.TableInfo) *Table { + return h.GetPartitionStats(tblInfo, tblInfo.ID) +} + +// GetPartitionStats retrieves the partition stats from cache. +func (h *Handle) GetPartitionStats(tblInfo *model.TableInfo, pid int64) *Table { + statsCache := h.statsCache.Load().(statsCache) + tbl, ok := statsCache.tables[pid] + if !ok { + tbl = PseudoTable(tblInfo) + tbl.PhysicalID = pid + h.updateStatsCache(statsCache.update([]*Table{tbl}, nil, statsCache.version)) + return tbl + } + return tbl +} + +func (h *Handle) updateStatsCache(newCache statsCache) { + h.statsCache.Lock() + oldCache := h.statsCache.Load().(statsCache) + if oldCache.version <= newCache.version { + h.statsCache.Store(newCache) + } + h.statsCache.Unlock() +} + +func (sc statsCache) copy() statsCache { + newCache := statsCache{tables: make(map[int64]*Table, len(sc.tables)), version: sc.version} + for k, v := range sc.tables { + newCache.tables[k] = v + } + return newCache +} + +// update updates the statistics table cache using copy on write. +func (sc statsCache) update(tables []*Table, deletedIDs []int64, newVersion uint64) statsCache { + newCache := sc.copy() + newCache.version = newVersion + for _, tbl := range tables { + id := tbl.PhysicalID + newCache.tables[id] = tbl + } + for _, id := range deletedIDs { + delete(newCache.tables, id) + } + return newCache +} + +func (h *Handle) cmSketchFromStorage(tblID int64, isIndex, histID int64) (_ *CMSketch, err error) { + selSQL := fmt.Sprintf("select cm_sketch from mysql.stats_histograms where table_id = %d and is_index = %d and hist_id = %d", tblID, isIndex, histID) + rows, _, err := h.restrictedExec.ExecRestrictedSQL(selSQL) + if err != nil || len(rows) == 0 { + return nil, err + } + return DecodeCMSketch(rows[0].GetBytes(0)) +} + +func (h *Handle) indexStatsFromStorage(row chunk.Row, table *Table, tableInfo *model.TableInfo) error { + histID := row.GetInt64(2) + distinct := row.GetInt64(3) + histVer := row.GetUint64(4) + nullCount := row.GetInt64(5) + idx := table.Indices[histID] + for _, idxInfo := range tableInfo.Indices { + if histID != idxInfo.ID { + continue + } + if idx == nil || idx.LastUpdateVersion < histVer { + hg, err := h.histogramFromStorage(table.PhysicalID, histID, types.NewFieldType(mysql.TypeBlob), distinct, 1, histVer, nullCount, 0) + if err != nil { + return errors.Trace(err) + } + cms, err := h.cmSketchFromStorage(table.PhysicalID, 1, idxInfo.ID) + if err != nil { + return errors.Trace(err) + } + idx = &Index{Histogram: *hg, CMSketch: cms, Info: idxInfo} + } + break + } + if idx != nil { + table.Indices[histID] = idx + } else { + logutil.BgLogger().Debug("we cannot find index id in table info. It may be deleted.", zap.Int64("indexID", histID), zap.String("table", tableInfo.Name.O)) + } + return nil +} + +func (h *Handle) columnStatsFromStorage(row chunk.Row, table *Table, tableInfo *model.TableInfo) error { + histID := row.GetInt64(2) + distinct := row.GetInt64(3) + histVer := row.GetUint64(4) + nullCount := row.GetInt64(5) + totColSize := row.GetInt64(6) + col := table.Columns[histID] + for _, colInfo := range tableInfo.Columns { + if histID != colInfo.ID { + continue + } + if col == nil || col.LastUpdateVersion < histVer { + hg, err := h.histogramFromStorage(table.PhysicalID, histID, &colInfo.FieldType, distinct, 0, histVer, nullCount, totColSize) + if err != nil { + return errors.Trace(err) + } + cms, err := h.cmSketchFromStorage(table.PhysicalID, 0, colInfo.ID) + if err != nil { + return errors.Trace(err) + } + col = &Column{ + PhysicalID: table.PhysicalID, + Histogram: *hg, + Info: colInfo, + CMSketch: cms, + Count: int64(hg.TotalRowCount()), + IsHandle: tableInfo.PKIsHandle && mysql.HasPriKeyFlag(colInfo.Flag), + } + break + } + if col.TotColSize != totColSize { + newCol := *col + newCol.TotColSize = totColSize + col = &newCol + } + break + } + if col != nil { + table.Columns[col.ID] = col + } else { + // If we didn't find a Column or Index in tableInfo, we won't load the histogram for it. + // But don't worry, next lease the ddl will be updated, and we will load a same table for two times to + // avoid error. + logutil.BgLogger().Debug("we cannot find column in table info now. It may be deleted", zap.Int64("colID", histID), zap.String("table", tableInfo.Name.O)) + } + return nil +} + +// tableStatsFromStorage loads table stats info from storage. +func (h *Handle) tableStatsFromStorage(tableInfo *model.TableInfo, physicalID int64) (_ *Table, err error) { + table, ok := h.statsCache.Load().(statsCache).tables[physicalID] + // If table stats is pseudo, we also need to copy it, since we will use the column stats when + // the average error rate of it is small. + if !ok { + histColl := HistColl{ + PhysicalID: physicalID, + HavePhysicalID: true, + Columns: make(map[int64]*Column, len(tableInfo.Columns)), + Indices: make(map[int64]*Index, len(tableInfo.Indices)), + } + table = &Table{ + HistColl: histColl, + } + } else { + // We copy it before writing to avoid race. + table = table.Copy() + } + table.Pseudo = false + selSQL := fmt.Sprintf("select table_id, is_index, hist_id, distinct_count, version, null_count, tot_col_size, stats_ver, flag, correlation, last_analyze_pos from mysql.stats_histograms where table_id = %d", physicalID) + rows, _, err := h.restrictedExec.ExecRestrictedSQL(selSQL) + // Check deleted table. + if err != nil || len(rows) == 0 { + return nil, nil + } + for _, row := range rows { + if row.GetInt64(1) > 0 { + err = h.indexStatsFromStorage(row, table, tableInfo) + } else { + err = h.columnStatsFromStorage(row, table, tableInfo) + } + if err != nil { + return nil, err + } + } + return table, nil +} + +// SaveStatsToStorage saves the stats to storage. +func (h *Handle) SaveStatsToStorage(tableID int64, count int64, isIndex int, hg *Histogram, cms *CMSketch) (err error) { + h.mu.Lock() + defer h.mu.Unlock() + ctx := context.TODO() + exec := h.mu.ctx.(sqlexec.SQLExecutor) + _, err = exec.Execute(ctx, "begin") + if err != nil { + return errors.Trace(err) + } + defer func() { + err = finishTransaction(context.Background(), exec, err) + }() + txn, err := h.mu.ctx.Txn(true) + if err != nil { + return errors.Trace(err) + } + + version := txn.StartTS() + sqls := make([]string, 0, 4) + sqls = append(sqls, fmt.Sprintf("replace into mysql.stats_meta (version, table_id, count) values (%d, %d, %d)", version, tableID, count)) + data, err := EncodeCMSketch(cms) + if err != nil { + return + } + sqls = append(sqls, fmt.Sprintf("replace into mysql.stats_histograms (table_id, is_index, hist_id, distinct_count, version, null_count, cm_sketch, tot_col_size, stats_ver, flag) values (%d, %d, %d, %d, %d, %d, X'%X', %d, %d, %d)", + tableID, isIndex, hg.ID, hg.NDV, version, hg.NullCount, data, hg.TotColSize, 0, 0)) + sqls = append(sqls, fmt.Sprintf("delete from mysql.stats_buckets where table_id = %d and is_index = %d and hist_id = %d", tableID, isIndex, hg.ID)) + sc := h.mu.ctx.GetSessionVars().StmtCtx + for i := range hg.Buckets { + count := hg.Buckets[i].Count + if i > 0 { + count -= hg.Buckets[i-1].Count + } + var upperBound types.Datum + upperBound, err = hg.GetUpper(i).ConvertTo(sc, types.NewFieldType(mysql.TypeBlob)) + if err != nil { + return + } + var lowerBound types.Datum + lowerBound, err = hg.GetLower(i).ConvertTo(sc, types.NewFieldType(mysql.TypeBlob)) + if err != nil { + return + } + sqls = append(sqls, fmt.Sprintf("insert into mysql.stats_buckets(table_id, is_index, hist_id, bucket_id, count, repeats, lower_bound, upper_bound) values(%d, %d, %d, %d, %d, %d, X'%X', X'%X')", tableID, isIndex, hg.ID, i, count, hg.Buckets[i].Repeat, lowerBound.GetBytes(), upperBound.GetBytes())) + } + return execSQLs(context.Background(), exec, sqls) +} + +// finishTransaction will execute `commit` when error is nil, otherwise `rollback`. +func finishTransaction(ctx context.Context, exec sqlexec.SQLExecutor, err error) error { + if err == nil { + _, err = exec.Execute(ctx, "commit") + } else { + _, err1 := exec.Execute(ctx, "rollback") + terror.Log(errors.Trace(err1)) + } + return errors.Trace(err) +} + +func execSQLs(ctx context.Context, exec sqlexec.SQLExecutor, sqls []string) error { + for _, sql := range sqls { + _, err := exec.Execute(ctx, sql) + if err != nil { + return err + } + } + return nil +} + +func (h *Handle) histogramFromStorage(tableID int64, colID int64, tp *types.FieldType, distinct int64, isIndex int, ver uint64, nullCount int64, totColSize int64) (_ *Histogram, err error) { + selSQL := fmt.Sprintf("select count, repeats, lower_bound, upper_bound from mysql.stats_buckets where table_id = %d and is_index = %d and hist_id = %d order by bucket_id", tableID, isIndex, colID) + rows, fields, err := h.restrictedExec.ExecRestrictedSQL(selSQL) + if err != nil { + return nil, errors.Trace(err) + } + bucketSize := len(rows) + hg := NewHistogram(colID, distinct, nullCount, ver, tp, bucketSize, totColSize) + totalCount := int64(0) + for i := 0; i < bucketSize; i++ { + count := rows[i].GetInt64(0) + repeats := rows[i].GetInt64(1) + var upperBound, lowerBound types.Datum + if isIndex == 1 { + lowerBound = rows[i].GetDatum(2, &fields[2].Column.FieldType) + upperBound = rows[i].GetDatum(3, &fields[3].Column.FieldType) + } else { + sc := &stmtctx.StatementContext{TimeZone: time.UTC} + d := rows[i].GetDatum(2, &fields[2].Column.FieldType) + lowerBound, err = d.ConvertTo(sc, tp) + if err != nil { + return nil, errors.Trace(err) + } + d = rows[i].GetDatum(3, &fields[3].Column.FieldType) + upperBound, err = d.ConvertTo(sc, tp) + if err != nil { + return nil, errors.Trace(err) + } + } + totalCount += count + hg.AppendBucket(&lowerBound, &upperBound, totalCount, repeats) + } + hg.PreCalculateScalar() + return hg, nil +} diff --git a/statistics/handle_test.go b/statistics/handle_test.go new file mode 100644 index 0000000..7196d9f --- /dev/null +++ b/statistics/handle_test.go @@ -0,0 +1,212 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package statistics_test + +import ( + "fmt" + "time" + "unsafe" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/statistics" + "github.com/pingcap/tidb/store/tikv/oracle" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/testkit" +) + +func (s *testStatsSuite) TestStatsCache(c *C) { + defer cleanEnv(c, s.store, s.do) + testKit := testkit.NewTestKit(c, s.store) + testKit.MustExec("use test") + testKit.MustExec("create table t (c1 int, c2 int)") + testKit.MustExec("insert into t values(1, 2)") + do := s.do + is := do.InfoSchema() + tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + c.Assert(err, IsNil) + tableInfo := tbl.Meta() + statsTbl := do.StatsHandle().GetTableStats(tableInfo) + c.Assert(statsTbl.Pseudo, IsTrue) + testKit.MustExec("analyze table t") + statsTbl = do.StatsHandle().GetTableStats(tableInfo) + c.Assert(statsTbl.Pseudo, IsFalse) + testKit.MustExec("create index idx_t on t(c1)") + do.InfoSchema() + statsTbl = do.StatsHandle().GetTableStats(tableInfo) + // If index is build, but stats is not updated. statsTbl can also work. + c.Assert(statsTbl.Pseudo, IsFalse) + // But the added index will not work. + c.Assert(statsTbl.Indices[int64(1)], IsNil) + + testKit.MustExec("analyze table t") + statsTbl = do.StatsHandle().GetTableStats(tableInfo) + c.Assert(statsTbl.Pseudo, IsFalse) + // If the new schema drop a column, the table stats can still work. + testKit.MustExec("alter table t drop column c2") + is = do.InfoSchema() + do.StatsHandle().Clear() + do.StatsHandle().Update(is) + statsTbl = do.StatsHandle().GetTableStats(tableInfo) + c.Assert(statsTbl.Pseudo, IsFalse) + + // If the new schema add a column, the table stats can still work. + testKit.MustExec("alter table t add column c10 int") + is = do.InfoSchema() + + do.StatsHandle().Clear() + do.StatsHandle().Update(is) + statsTbl = do.StatsHandle().GetTableStats(tableInfo) + c.Assert(statsTbl.Pseudo, IsFalse) +} + +func assertTableEqual(c *C, a *statistics.Table, b *statistics.Table) { + c.Assert(a.Count, Equals, b.Count) + c.Assert(a.ModifyCount, Equals, b.ModifyCount) + c.Assert(len(a.Columns), Equals, len(b.Columns)) + for i := range a.Columns { + c.Assert(a.Columns[i].Count, Equals, b.Columns[i].Count) + c.Assert(statistics.HistogramEqual(&a.Columns[i].Histogram, &b.Columns[i].Histogram, false), IsTrue) + if a.Columns[i].CMSketch == nil { + c.Assert(b.Columns[i].CMSketch, IsNil) + } else { + c.Assert(a.Columns[i].CMSketch.Equal(b.Columns[i].CMSketch), IsTrue) + } + } + c.Assert(len(a.Indices), Equals, len(b.Indices)) + for i := range a.Indices { + c.Assert(statistics.HistogramEqual(&a.Indices[i].Histogram, &b.Indices[i].Histogram, false), IsTrue) + if a.Columns[i].CMSketch == nil { + c.Assert(b.Columns[i].CMSketch, IsNil) + } else { + c.Assert(a.Columns[i].CMSketch.Equal(b.Columns[i].CMSketch), IsTrue) + } + } +} + +func (s *testStatsSuite) TestStatsStoreAndLoad(c *C) { + defer cleanEnv(c, s.store, s.do) + testKit := testkit.NewTestKit(c, s.store) + testKit.MustExec("use test") + testKit.MustExec("create table t (c1 int, c2 int)") + recordCount := 1000 + for i := 0; i < recordCount; i++ { + testKit.MustExec(fmt.Sprintf("insert into t values (%d, %d)", i, i+1)) + } + testKit.MustExec("create index idx_t on t(c2)") + do := s.do + is := do.InfoSchema() + tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + c.Assert(err, IsNil) + tableInfo := tbl.Meta() + + testKit.MustExec("analyze table t") + statsTbl1 := do.StatsHandle().GetTableStats(tableInfo) + + do.StatsHandle().Clear() + do.StatsHandle().Update(is) + statsTbl2 := do.StatsHandle().GetTableStats(tableInfo) + c.Assert(statsTbl2.Pseudo, IsFalse) + c.Assert(statsTbl2.Count, Equals, int64(recordCount)) + assertTableEqual(c, statsTbl1, statsTbl2) +} + +func (s *testStatsSuite) TestEmptyTable(c *C) { + defer cleanEnv(c, s.store, s.do) + testKit := testkit.NewTestKit(c, s.store) + testKit.MustExec("use test") + testKit.MustExec("create table t (c1 int, c2 int, key cc1(c1), key cc2(c2))") + testKit.MustExec("analyze table t") + do := s.do + is := do.InfoSchema() + tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + c.Assert(err, IsNil) + tableInfo := tbl.Meta() + statsTbl := do.StatsHandle().GetTableStats(tableInfo) + sc := new(stmtctx.StatementContext) + count := statsTbl.ColumnGreaterRowCount(sc, types.NewDatum(1), tableInfo.Columns[0].ID) + c.Assert(count, Equals, 0.0) +} + +func (s *testStatsSuite) TestColumnIDs(c *C) { + defer cleanEnv(c, s.store, s.do) + testKit := testkit.NewTestKit(c, s.store) + testKit.MustExec("use test") + testKit.MustExec("create table t (c1 int, c2 int)") + testKit.MustExec("insert into t values(1, 2)") + testKit.MustExec("analyze table t") + do := s.do + is := do.InfoSchema() + tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + c.Assert(err, IsNil) + tableInfo := tbl.Meta() + statsTbl := do.StatsHandle().GetTableStats(tableInfo) + sc := new(stmtctx.StatementContext) + count := statsTbl.ColumnLessRowCount(sc, types.NewDatum(2), tableInfo.Columns[0].ID) + c.Assert(count, Equals, float64(1)) + + // Drop a column and the offset changed, + testKit.MustExec("alter table t drop column c1") + is = do.InfoSchema() + do.StatsHandle().Clear() + do.StatsHandle().Update(is) + tbl, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + c.Assert(err, IsNil) + tableInfo = tbl.Meta() + statsTbl = do.StatsHandle().GetTableStats(tableInfo) + // At that time, we should get c2's stats instead of c1's. + count = statsTbl.ColumnLessRowCount(sc, types.NewDatum(2), tableInfo.Columns[0].ID) + c.Assert(count, Equals, 0.0) +} + +func (s *testStatsSuite) TestAvgColLen(c *C) { + defer cleanEnv(c, s.store, s.do) + testKit := testkit.NewTestKit(c, s.store) + testKit.MustExec("use test") + testKit.MustExec("create table t (c1 int, c2 varchar(100), c3 float)") + testKit.MustExec("insert into t values(1, '1234567', 12.3)") + testKit.MustExec("analyze table t") + do := s.do + is := do.InfoSchema() + tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + c.Assert(err, IsNil) + tableInfo := tbl.Meta() + statsTbl := do.StatsHandle().GetTableStats(tableInfo) + c.Assert(statsTbl.Columns[tableInfo.Columns[0].ID].AvgColSize(statsTbl.Count, false), Equals, 1.0) + c.Assert(statsTbl.Columns[tableInfo.Columns[0].ID].AvgColSizeListInDisk(statsTbl.Count), Equals, 8.0) + + // The size of varchar type is LEN + BYTE, here is 1 + 7 = 8 + c.Assert(statsTbl.Columns[tableInfo.Columns[1].ID].AvgColSize(statsTbl.Count, false), Equals, 8.0) + c.Assert(statsTbl.Columns[tableInfo.Columns[2].ID].AvgColSize(statsTbl.Count, false), Equals, 8.0) + c.Assert(statsTbl.Columns[tableInfo.Columns[1].ID].AvgColSizeListInDisk(statsTbl.Count), Equals, 8.0-1) + c.Assert(statsTbl.Columns[tableInfo.Columns[2].ID].AvgColSizeListInDisk(statsTbl.Count), Equals, float64(unsafe.Sizeof(float32(12.3)))) + testKit.MustExec("insert into t values(132, '123456789112', 1232.3)") + testKit.MustExec("analyze table t") + statsTbl = do.StatsHandle().GetTableStats(tableInfo) + c.Assert(statsTbl.Columns[tableInfo.Columns[0].ID].AvgColSize(statsTbl.Count, false), Equals, 1.5) + c.Assert(statsTbl.Columns[tableInfo.Columns[1].ID].AvgColSize(statsTbl.Count, false), Equals, 10.5) + c.Assert(statsTbl.Columns[tableInfo.Columns[2].ID].AvgColSize(statsTbl.Count, false), Equals, 8.0) + c.Assert(statsTbl.Columns[tableInfo.Columns[0].ID].AvgColSizeListInDisk(statsTbl.Count), Equals, 8.0) + c.Assert(statsTbl.Columns[tableInfo.Columns[1].ID].AvgColSizeListInDisk(statsTbl.Count), Equals, 10.5-1) + c.Assert(statsTbl.Columns[tableInfo.Columns[2].ID].AvgColSizeListInDisk(statsTbl.Count), Equals, float64(unsafe.Sizeof(float32(12.3)))) +} + +func (s *testStatsSuite) TestDurationToTS(c *C) { + tests := []time.Duration{time.Millisecond, time.Second, time.Minute, time.Hour} + for _, t := range tests { + ts := statistics.DurationToTS(t) + c.Assert(oracle.ExtractPhysical(ts)*int64(time.Millisecond), Equals, int64(t)) + } +} diff --git a/statistics/histogram.go b/statistics/histogram.go new file mode 100644 index 0000000..5addf13 --- /dev/null +++ b/statistics/histogram.go @@ -0,0 +1,732 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package statistics + +import ( + "bytes" + "fmt" + "math" + "strings" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/codec" + "github.com/pingcap/tidb/util/ranger" + "github.com/pingcap/tipb/go-tipb" +) + +// Histogram represents statistics for a column or index. +type Histogram struct { + ID int64 // Column ID. + NDV int64 // Number of distinct values. + NullCount int64 // Number of null values. + // LastUpdateVersion is the version that this histogram updated last time. + LastUpdateVersion uint64 + + Tp *types.FieldType + + // Histogram elements. + // + // A bucket bound is the smallest and greatest values stored in the bucket. The lower and upper bound + // are stored in one column. + // + // A bucket count is the number of items stored in all previous buckets and the current bucket. + // Bucket counts are always in increasing order. + // + // A bucket repeat is the number of repeats of the bucket value, it can be used to find popular values. + Bounds *chunk.Chunk + Buckets []Bucket + + // Used for estimating fraction of the interval [lower, upper] that lies within the [lower, value]. + // For some types like `Int`, we do not build it because we can get them directly from `Bounds`. + scalars []scalar + // TotColSize is the total column size for the histogram. + TotColSize int64 +} + +// Bucket store the bucket count and repeat. +type Bucket struct { + Count int64 + Repeat int64 +} + +type scalar struct { + lower float64 + upper float64 + commonPfxLen int // commonPfxLen is the common prefix length of the lower bound and upper bound when the value type is KindString or KindBytes. +} + +// NewHistogram creates a new histogram. +func NewHistogram(id, ndv, nullCount int64, version uint64, tp *types.FieldType, bucketSize int, totColSize int64) *Histogram { + return &Histogram{ + ID: id, + NDV: ndv, + NullCount: nullCount, + LastUpdateVersion: version, + Tp: tp, + Bounds: chunk.NewChunkWithCapacity([]*types.FieldType{tp}, 2*bucketSize), + Buckets: make([]Bucket, 0, bucketSize), + TotColSize: totColSize, + } +} + +// GetLower gets the lower bound of bucket `idx`. +func (hg *Histogram) GetLower(idx int) *types.Datum { + d := hg.Bounds.GetRow(2*idx).GetDatum(0, hg.Tp) + return &d +} + +// GetUpper gets the upper bound of bucket `idx`. +func (hg *Histogram) GetUpper(idx int) *types.Datum { + d := hg.Bounds.GetRow(2*idx+1).GetDatum(0, hg.Tp) + return &d +} + +// AvgColSize is the average column size of the histogram. These sizes are derived from function `encode` +// and `Datum::ConvertTo`, so we need to update them if those 2 functions are changed. +func (c *Column) AvgColSize(count int64, isKey bool) float64 { + if count == 0 { + return 0 + } + // Note that, if the handle column is encoded as value, instead of key, i.e, + // when the handle column is in a unique index, the real column size may be + // smaller than 8 because it is encoded using `EncodeVarint`. Since we don't + // know the exact value size now, use 8 as approximation. + if c.IsHandle { + return 8 + } + histCount := c.TotalRowCount() + notNullRatio := 1.0 + if histCount > 0 { + notNullRatio = 1.0 - float64(c.NullCount)/histCount + } + switch c.Histogram.Tp.Tp { + case mysql.TypeFloat, mysql.TypeDouble, mysql.TypeDuration, mysql.TypeDate, mysql.TypeDatetime, mysql.TypeTimestamp: + return 8 * notNullRatio + case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong, mysql.TypeYear, mysql.TypeEnum, mysql.TypeBit, mysql.TypeSet: + if isKey { + return 8 * notNullRatio + } + } + // Keep two decimal place. + return math.Round(float64(c.TotColSize)/float64(count)*100) / 100 +} + +// AvgColSizeListInDisk is the average column size of the histogram. These sizes are derived +// from `chunk.ListInDisk` so we need to update them if those 2 functions are changed. +func (c *Column) AvgColSizeListInDisk(count int64) float64 { + if count == 0 { + return 0 + } + histCount := c.TotalRowCount() + notNullRatio := 1.0 + if histCount > 0 { + notNullRatio = 1.0 - float64(c.NullCount)/histCount + } + size := chunk.GetFixedLen(c.Histogram.Tp) + if size != -1 { + return float64(size) * notNullRatio + } + // Keep two decimal place. + // size of varchar type is LEN + BYTE, so we minus 1 here. + return math.Round(float64(c.TotColSize)/float64(count)*100)/100 - 1 +} + +// AppendBucket appends a bucket into `hg`. +func (hg *Histogram) AppendBucket(lower *types.Datum, upper *types.Datum, count, repeat int64) { + hg.Buckets = append(hg.Buckets, Bucket{Count: count, Repeat: repeat}) + hg.Bounds.AppendDatum(0, lower) + hg.Bounds.AppendDatum(0, upper) +} + +func (hg *Histogram) updateLastBucket(upper *types.Datum, count, repeat int64) { + len := hg.Len() + hg.Bounds.TruncateTo(2*len - 1) + hg.Bounds.AppendDatum(0, upper) + hg.Buckets[len-1] = Bucket{Count: count, Repeat: repeat} +} + +// DecodeTo decodes the histogram bucket values into `Tp`. +func (hg *Histogram) DecodeTo(tp *types.FieldType, timeZone *time.Location) error { + oldIter := chunk.NewIterator4Chunk(hg.Bounds) + hg.Bounds = chunk.NewChunkWithCapacity([]*types.FieldType{tp}, oldIter.Len()) + hg.Tp = tp + for row := oldIter.Begin(); row != oldIter.End(); row = oldIter.Next() { + datum, err := tablecodec.DecodeColumnValue(row.GetBytes(0), tp, timeZone) + if err != nil { + return errors.Trace(err) + } + hg.Bounds.AppendDatum(0, &datum) + } + return nil +} + +// ConvertTo converts the histogram bucket values into `Tp`. +func (hg *Histogram) ConvertTo(sc *stmtctx.StatementContext, tp *types.FieldType) (*Histogram, error) { + hist := NewHistogram(hg.ID, hg.NDV, hg.NullCount, hg.LastUpdateVersion, tp, hg.Len(), hg.TotColSize) + iter := chunk.NewIterator4Chunk(hg.Bounds) + for row := iter.Begin(); row != iter.End(); row = iter.Next() { + d := row.GetDatum(0, hg.Tp) + d, err := d.ConvertTo(sc, tp) + if err != nil { + return nil, errors.Trace(err) + } + hist.Bounds.AppendDatum(0, &d) + } + hist.Buckets = hg.Buckets + return hist, nil +} + +// Len is the number of buckets in the histogram. +func (hg *Histogram) Len() int { + return len(hg.Buckets) +} + +// HistogramEqual tests if two histograms are equal. +func HistogramEqual(a, b *Histogram, ignoreID bool) bool { + if ignoreID { + old := b.ID + b.ID = a.ID + defer func() { b.ID = old }() + } + return bytes.Equal([]byte(a.ToString(0)), []byte(b.ToString(0))) +} + +// ValueToString converts a possible encoded value to a formatted string. If the value is encoded, then +// idxCols equals to number of origin values, else idxCols is 0. +func ValueToString(value *types.Datum, idxCols int) (string, error) { + if idxCols == 0 { + return value.ToString() + } + // Ignore the error and treat remaining part that cannot decode successfully as bytes. + decodedVals, remained, err := codec.DecodeRange(value.GetBytes(), idxCols) + // Ignore err explicit to pass errcheck. + _ = err + if len(remained) > 0 { + decodedVals = append(decodedVals, types.NewBytesDatum(remained)) + } + str, err := types.DatumsToString(decodedVals, true) + return str, err +} + +// BucketToString change the given bucket to string format. +func (hg *Histogram) BucketToString(bktID, idxCols int) string { + upperVal, err := ValueToString(hg.GetUpper(bktID), idxCols) + terror.Log(errors.Trace(err)) + lowerVal, err := ValueToString(hg.GetLower(bktID), idxCols) + terror.Log(errors.Trace(err)) + return fmt.Sprintf("num: %d lower_bound: %s upper_bound: %s repeats: %d", hg.bucketCount(bktID), lowerVal, upperVal, hg.Buckets[bktID].Repeat) +} + +// ToString gets the string representation for the histogram. +func (hg *Histogram) ToString(idxCols int) string { + strs := make([]string, 0, hg.Len()+1) + if idxCols > 0 { + strs = append(strs, fmt.Sprintf("index:%d ndv:%d", hg.ID, hg.NDV)) + } else { + strs = append(strs, fmt.Sprintf("column:%d ndv:%d totColSize:%d", hg.ID, hg.NDV, hg.TotColSize)) + } + for i := 0; i < hg.Len(); i++ { + strs = append(strs, hg.BucketToString(i, idxCols)) + } + return strings.Join(strs, "\n") +} + +// equalRowCount estimates the row count where the column equals to value. +func (hg *Histogram) equalRowCount(value types.Datum) float64 { + index, match := hg.Bounds.LowerBound(0, &value) + // Since we store the lower and upper bound together, if the index is an odd number, then it points to a upper bound. + if index%2 == 1 { + if match { + return float64(hg.Buckets[index/2].Repeat) + } + return hg.notNullCount() / float64(hg.NDV) + } + if match { + cmp := chunk.GetCompareFunc(hg.Tp) + if cmp(hg.Bounds.GetRow(index), 0, hg.Bounds.GetRow(index+1), 0) == 0 { + return float64(hg.Buckets[index/2].Repeat) + } + return hg.notNullCount() / float64(hg.NDV) + } + return 0 +} + +// greaterRowCount estimates the row count where the column greater than value. +func (hg *Histogram) greaterRowCount(value types.Datum) float64 { + gtCount := hg.notNullCount() - hg.lessRowCount(value) - hg.equalRowCount(value) + return math.Max(0, gtCount) +} + +// LessRowCountWithBktIdx estimates the row count where the column less than value. +func (hg *Histogram) LessRowCountWithBktIdx(value types.Datum) (float64, int) { + // All the values are null. + if hg.Bounds.NumRows() == 0 { + return 0, 0 + } + index, match := hg.Bounds.LowerBound(0, &value) + if index == hg.Bounds.NumRows() { + return hg.notNullCount(), hg.Len() - 1 + } + // Since we store the lower and upper bound together, so dividing the index by 2 will get the bucket index. + bucketIdx := index / 2 + curCount, curRepeat := float64(hg.Buckets[bucketIdx].Count), float64(hg.Buckets[bucketIdx].Repeat) + preCount := float64(0) + if bucketIdx > 0 { + preCount = float64(hg.Buckets[bucketIdx-1].Count) + } + if index%2 == 1 { + if match { + return curCount - curRepeat, bucketIdx + } + return preCount + hg.calcFraction(bucketIdx, &value)*(curCount-curRepeat-preCount), bucketIdx + } + return preCount, bucketIdx +} + +func (hg *Histogram) lessRowCount(value types.Datum) float64 { + result, _ := hg.LessRowCountWithBktIdx(value) + return result +} + +// BetweenRowCount estimates the row count where column greater or equal to a and less than b. +func (hg *Histogram) BetweenRowCount(a, b types.Datum) float64 { + lessCountA := hg.lessRowCount(a) + lessCountB := hg.lessRowCount(b) + // If lessCountA is not less than lessCountB, it may be that they fall to the same bucket and we cannot estimate + // the fraction, so we use `totalCount / NDV` to estimate the row count, but the result should not greater than + // lessCountB or notNullCount-lessCountA. + if lessCountA >= lessCountB && hg.NDV > 0 { + result := math.Min(lessCountB, hg.notNullCount()-lessCountA) + return math.Min(result, hg.notNullCount()/float64(hg.NDV)) + } + return lessCountB - lessCountA +} + +// TotalRowCount returns the total count of this histogram. +func (hg *Histogram) TotalRowCount() float64 { + return hg.notNullCount() + float64(hg.NullCount) +} + +// notNullCount indicates the count of non-null values in column histogram and single-column index histogram, +// for multi-column index histogram, since we cannot define null for the row, we treat all rows as non-null, that means, +// notNullCount would return same value as TotalRowCount for multi-column index histograms. +func (hg *Histogram) notNullCount() float64 { + if hg.Len() == 0 { + return 0 + } + return float64(hg.Buckets[hg.Len()-1].Count) +} + +// mergeBuckets is used to Merge every two neighbor buckets. +func (hg *Histogram) mergeBuckets(bucketIdx int) { + curBuck := 0 + c := chunk.NewChunkWithCapacity([]*types.FieldType{hg.Tp}, bucketIdx) + for i := 0; i+1 <= bucketIdx; i += 2 { + hg.Buckets[curBuck] = hg.Buckets[i+1] + c.AppendDatum(0, hg.GetLower(i)) + c.AppendDatum(0, hg.GetUpper(i+1)) + curBuck++ + } + if bucketIdx%2 == 0 { + hg.Buckets[curBuck] = hg.Buckets[bucketIdx] + c.AppendDatum(0, hg.GetLower(bucketIdx)) + c.AppendDatum(0, hg.GetUpper(bucketIdx)) + curBuck++ + } + hg.Bounds = c + hg.Buckets = hg.Buckets[:curBuck] +} + +// GetIncreaseFactor will return a factor of data increasing after the last analysis. +func (hg *Histogram) GetIncreaseFactor(totalCount int64) float64 { + columnCount := hg.TotalRowCount() + if columnCount == 0 { + // avoid dividing by 0 + return 1.0 + } + return float64(totalCount) / columnCount +} + +func (hg *Histogram) bucketCount(idx int) int64 { + if idx == 0 { + return hg.Buckets[0].Count + } + return hg.Buckets[idx].Count - hg.Buckets[idx-1].Count +} + +// HistogramToProto converts Histogram to its protobuf representation. +// Note that when this is used, the lower/upper bound in the bucket must be BytesDatum. +func HistogramToProto(hg *Histogram) *tipb.Histogram { + protoHg := &tipb.Histogram{ + Ndv: hg.NDV, + } + for i := 0; i < hg.Len(); i++ { + bkt := &tipb.Bucket{ + Count: hg.Buckets[i].Count, + LowerBound: hg.GetLower(i).GetBytes(), + UpperBound: hg.GetUpper(i).GetBytes(), + Repeats: hg.Buckets[i].Repeat, + } + protoHg.Buckets = append(protoHg.Buckets, bkt) + } + return protoHg +} + +// HistogramFromProto converts Histogram from its protobuf representation. +// Note that we will set BytesDatum for the lower/upper bound in the bucket, the decode will +// be after all histograms merged. +func HistogramFromProto(protoHg *tipb.Histogram) *Histogram { + tp := types.NewFieldType(mysql.TypeBlob) + hg := NewHistogram(0, protoHg.Ndv, 0, 0, tp, len(protoHg.Buckets), 0) + for _, bucket := range protoHg.Buckets { + lower, upper := types.NewBytesDatum(bucket.LowerBound), types.NewBytesDatum(bucket.UpperBound) + hg.AppendBucket(&lower, &upper, bucket.Count, bucket.Repeats) + } + return hg +} + +func (hg *Histogram) popFirstBucket() { + hg.Buckets = hg.Buckets[1:] + c := chunk.NewChunkWithCapacity([]*types.FieldType{hg.Tp, hg.Tp}, hg.Bounds.NumRows()-2) + c.Append(hg.Bounds, 2, hg.Bounds.NumRows()) + hg.Bounds = c +} + +// IsIndexHist checks whether current histogram is one for index. +func (hg *Histogram) IsIndexHist() bool { + return hg.Tp.Tp == mysql.TypeBlob +} + +// MergeHistograms merges two histograms. +func MergeHistograms(sc *stmtctx.StatementContext, lh *Histogram, rh *Histogram, bucketSize int) (*Histogram, error) { + if lh.Len() == 0 { + return rh, nil + } + if rh.Len() == 0 { + return lh, nil + } + lh.NDV += rh.NDV + lLen := lh.Len() + cmp, err := lh.GetUpper(lLen-1).CompareDatum(sc, rh.GetLower(0)) + if err != nil { + return nil, errors.Trace(err) + } + offset := int64(0) + if cmp == 0 { + lh.NDV-- + lh.updateLastBucket(rh.GetUpper(0), lh.Buckets[lLen-1].Count+rh.Buckets[0].Count, rh.Buckets[0].Repeat) + offset = rh.Buckets[0].Count + rh.popFirstBucket() + } + for lh.Len() > bucketSize { + lh.mergeBuckets(lh.Len() - 1) + } + if rh.Len() == 0 { + return lh, nil + } + for rh.Len() > bucketSize { + rh.mergeBuckets(rh.Len() - 1) + } + lCount := lh.Buckets[lh.Len()-1].Count + rCount := rh.Buckets[rh.Len()-1].Count - offset + lAvg := float64(lCount) / float64(lh.Len()) + rAvg := float64(rCount) / float64(rh.Len()) + for lh.Len() > 1 && lAvg*2 <= rAvg { + lh.mergeBuckets(lh.Len() - 1) + lAvg *= 2 + } + for rh.Len() > 1 && rAvg*2 <= lAvg { + rh.mergeBuckets(rh.Len() - 1) + rAvg *= 2 + } + for i := 0; i < rh.Len(); i++ { + lh.AppendBucket(rh.GetLower(i), rh.GetUpper(i), rh.Buckets[i].Count+lCount-offset, rh.Buckets[i].Repeat) + } + for lh.Len() > bucketSize { + lh.mergeBuckets(lh.Len() - 1) + } + return lh, nil +} + +// AvgCountPerNotNullValue gets the average row count per value by the data of histogram. +func (hg *Histogram) AvgCountPerNotNullValue(totalCount int64) float64 { + factor := hg.GetIncreaseFactor(totalCount) + totalNotNull := hg.notNullCount() * factor + curNDV := float64(hg.NDV) * factor + curNDV = math.Max(curNDV, 1) + return totalNotNull / curNDV +} + +func (hg *Histogram) outOfRange(val types.Datum) bool { + if hg.Len() == 0 { + return true + } + return chunk.Compare(hg.Bounds.GetRow(0), 0, &val) > 0 || + chunk.Compare(hg.Bounds.GetRow(hg.Bounds.NumRows()-1), 0, &val) < 0 +} + +// Copy deep copies the histogram. +func (hg *Histogram) Copy() *Histogram { + newHist := *hg + newHist.Bounds = hg.Bounds.CopyConstruct() + newHist.Buckets = make([]Bucket, 0, len(hg.Buckets)) + newHist.Buckets = append(newHist.Buckets, hg.Buckets...) + return &newHist +} + +// RemoveUpperBound removes the upper bound from histogram. +// It is used when merge stats for incremental analyze. +func (hg *Histogram) RemoveUpperBound() *Histogram { + hg.Buckets[hg.Len()-1].Count -= hg.Buckets[hg.Len()-1].Repeat + hg.Buckets[hg.Len()-1].Repeat = 0 + return hg +} + +// TruncateHistogram truncates the histogram to `numBkt` buckets. +func (hg *Histogram) TruncateHistogram(numBkt int) *Histogram { + hist := hg.Copy() + hist.Buckets = hist.Buckets[:numBkt] + hist.Bounds.TruncateTo(numBkt * 2) + return hist +} + +// Column represents a column histogram. +type Column struct { + Histogram + *CMSketch + PhysicalID int64 + Count int64 + Info *model.ColumnInfo + IsHandle bool +} + +func (c *Column) String() string { + return c.Histogram.ToString(0) +} + +// IsInvalid checks if this column is invalid. +func (c *Column) IsInvalid(sc *stmtctx.StatementContext, collPseudo bool) bool { + if collPseudo { + return true + } + return c.TotalRowCount() == 0 || (c.NDV > 0 && c.Len() == 0) +} + +func (c *Column) equalRowCount(sc *stmtctx.StatementContext, val types.Datum, modifyCount int64) (float64, error) { + if val.IsNull() { + return float64(c.NullCount), nil + } + // All the values are null. + if c.Histogram.Bounds.NumRows() == 0 { + return 0.0, nil + } + if c.NDV > 0 && c.outOfRange(val) { + return float64(modifyCount) / float64(c.NDV), nil + } + if c.CMSketch != nil { + count, err := c.CMSketch.queryValue(sc, val) + return float64(count), errors.Trace(err) + } + return c.Histogram.equalRowCount(val), nil +} + +// GetColumnRowCount estimates the row count by a slice of Range. +func (c *Column) GetColumnRowCount(sc *stmtctx.StatementContext, ranges []*ranger.Range, modifyCount int64, pkIsHandle bool) (float64, error) { + var rowCount float64 + for _, rg := range ranges { + cmp, err := rg.LowVal[0].CompareDatum(sc, &rg.HighVal[0]) + if err != nil { + return 0, errors.Trace(err) + } + if cmp == 0 { + // the point case. + if !rg.LowExclude && !rg.HighExclude { + // In this case, the row count is at most 1. + if pkIsHandle { + rowCount += 1 + continue + } + var cnt float64 + cnt, err = c.equalRowCount(sc, rg.LowVal[0], modifyCount) + if err != nil { + return 0, errors.Trace(err) + } + rowCount += cnt + } + continue + } + rangeVals := enumRangeValues(rg.LowVal[0], rg.HighVal[0], rg.LowExclude, rg.HighExclude) + // The small range case. + if rangeVals != nil { + for _, val := range rangeVals { + cnt, err := c.equalRowCount(sc, val, modifyCount) + if err != nil { + return 0, err + } + rowCount += cnt + } + continue + } + // The interval case. + cnt := c.BetweenRowCount(rg.LowVal[0], rg.HighVal[0]) + if (c.outOfRange(rg.LowVal[0]) && !rg.LowVal[0].IsNull()) || c.outOfRange(rg.HighVal[0]) { + cnt += float64(modifyCount) / outOfRangeBetweenRate + } + // `betweenRowCount` returns count for [l, h) range, we adjust cnt for boudaries here. + // Note that, `cnt` does not include null values, we need specially handle cases + // where null is the lower bound. + if rg.LowExclude && !rg.LowVal[0].IsNull() { + lowCnt, err := c.equalRowCount(sc, rg.LowVal[0], modifyCount) + if err != nil { + return 0, errors.Trace(err) + } + cnt -= lowCnt + } + if !rg.LowExclude && rg.LowVal[0].IsNull() { + cnt += float64(c.NullCount) + } + if !rg.HighExclude { + highCnt, err := c.equalRowCount(sc, rg.HighVal[0], modifyCount) + if err != nil { + return 0, errors.Trace(err) + } + cnt += highCnt + } + rowCount += cnt + } + if rowCount > c.TotalRowCount() { + rowCount = c.TotalRowCount() + } else if rowCount < 0 { + rowCount = 0 + } + return rowCount, nil +} + +// Index represents an index histogram. +type Index struct { + Histogram + *CMSketch + Info *model.IndexInfo +} + +func (idx *Index) String() string { + return idx.Histogram.ToString(len(idx.Info.Columns)) +} + +// IsInvalid checks if this index is invalid. +func (idx *Index) IsInvalid(collPseudo bool) bool { + return collPseudo || idx.TotalRowCount() == 0 +} + +var nullKeyBytes, _ = codec.EncodeKey(nil, nil, types.NewDatum(nil)) + +func (idx *Index) equalRowCount(sc *stmtctx.StatementContext, b []byte, modifyCount int64) (float64, error) { + if len(idx.Info.Columns) == 1 { + if bytes.Equal(b, nullKeyBytes) { + return float64(idx.NullCount), nil + } + } + val := types.NewBytesDatum(b) + if idx.NDV > 0 && idx.outOfRange(val) { + return float64(modifyCount) / (float64(idx.NDV)), nil + } + if idx.CMSketch != nil { + return float64(idx.CMSketch.QueryBytes(b)), nil + } + return idx.Histogram.equalRowCount(val), nil +} + +// GetRowCount returns the row count of the given ranges. +// It uses the modifyCount to adjust the influence of modifications on the table. +func (idx *Index) GetRowCount(sc *stmtctx.StatementContext, indexRanges []*ranger.Range, modifyCount int64) (float64, error) { + totalCount := float64(0) + isSingleCol := len(idx.Info.Columns) == 1 + for _, indexRange := range indexRanges { + lb, err := codec.EncodeKey(sc, nil, indexRange.LowVal...) + if err != nil { + return 0, err + } + rb, err := codec.EncodeKey(sc, nil, indexRange.HighVal...) + if err != nil { + return 0, err + } + fullLen := len(indexRange.LowVal) == len(indexRange.HighVal) && len(indexRange.LowVal) == len(idx.Info.Columns) + if bytes.Equal(lb, rb) { + if indexRange.LowExclude || indexRange.HighExclude { + continue + } + if fullLen { + // At most 1 in this case. + if idx.Info.Unique { + totalCount += 1 + continue + } + count, err := idx.equalRowCount(sc, lb, modifyCount) + if err != nil { + return 0, err + } + totalCount += count + continue + } + } + if indexRange.LowExclude { + lb = kv.Key(lb).PrefixNext() + } + if !indexRange.HighExclude { + rb = kv.Key(rb).PrefixNext() + } + l := types.NewBytesDatum(lb) + r := types.NewBytesDatum(rb) + totalCount += idx.BetweenRowCount(l, r) + lowIsNull := bytes.Equal(lb, nullKeyBytes) + if (idx.outOfRange(l) && !(isSingleCol && lowIsNull)) || idx.outOfRange(r) { + totalCount += float64(modifyCount) / outOfRangeBetweenRate + } + if isSingleCol && lowIsNull { + totalCount += float64(idx.NullCount) + } + } + if totalCount > idx.TotalRowCount() { + totalCount = idx.TotalRowCount() + } + return totalCount, nil +} + +func (idx *Index) outOfRange(val types.Datum) bool { + if idx.Histogram.Len() == 0 { + return true + } + withInLowBoundOrPrefixMatch := chunk.Compare(idx.Bounds.GetRow(0), 0, &val) <= 0 || + matchPrefix(idx.Bounds.GetRow(0), 0, &val) + withInHighBound := chunk.Compare(idx.Bounds.GetRow(idx.Bounds.NumRows()-1), 0, &val) >= 0 + return !withInLowBoundOrPrefixMatch || !withInHighBound +} + +// matchPrefix checks whether ad is the prefix of value +func matchPrefix(row chunk.Row, colIdx int, ad *types.Datum) bool { + switch ad.Kind() { + case types.KindString, types.KindBytes: + return strings.HasPrefix(row.GetString(colIdx), ad.GetString()) + } + return false +} diff --git a/statistics/sample.go b/statistics/sample.go new file mode 100644 index 0000000..19e42d1 --- /dev/null +++ b/statistics/sample.go @@ -0,0 +1,259 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package statistics + +import ( + "context" + "math/rand" + "sort" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/sqlexec" + "github.com/pingcap/tipb/go-tipb" +) + +// SampleItem is an item of sampled column value. +type SampleItem struct { + // Value is the sampled column value. + Value types.Datum + // Ordinal is original position of this item in SampleCollector before sorting. This + // is used for computing correlation. + Ordinal int + // RowID is the row id of the sample in its key. + // This property is used to calculate Ordinal in fast analyze. + RowID int64 +} + +// SortSampleItems sorts a slice of SampleItem. +func SortSampleItems(sc *stmtctx.StatementContext, items []*SampleItem) error { + sorter := sampleItemSorter{items: items, sc: sc} + sort.Stable(&sorter) + return sorter.err +} + +type sampleItemSorter struct { + items []*SampleItem + sc *stmtctx.StatementContext + err error +} + +func (s *sampleItemSorter) Len() int { + return len(s.items) +} + +func (s *sampleItemSorter) Less(i, j int) bool { + var cmp int + cmp, s.err = s.items[i].Value.CompareDatum(s.sc, &s.items[j].Value) + if s.err != nil { + return true + } + return cmp < 0 +} + +func (s *sampleItemSorter) Swap(i, j int) { + s.items[i], s.items[j] = s.items[j], s.items[i] +} + +// SampleCollector will collect Samples and calculate the count and ndv of an attribute. +type SampleCollector struct { + Samples []*SampleItem + seenValues int64 // seenValues is the current seen values. + IsMerger bool + NullCount int64 + Count int64 // Count is the number of non-null rows. + MaxSampleSize int64 + FMSketch *FMSketch + CMSketch *CMSketch + TotalSize int64 // TotalSize is the total size of column. +} + +// MergeSampleCollector merges two sample collectors. +func (c *SampleCollector) MergeSampleCollector(sc *stmtctx.StatementContext, rc *SampleCollector) { + c.NullCount += rc.NullCount + c.Count += rc.Count + c.TotalSize += rc.TotalSize + c.FMSketch.mergeFMSketch(rc.FMSketch) + if rc.CMSketch != nil { + err := c.CMSketch.MergeCMSketch(rc.CMSketch) + terror.Log(errors.Trace(err)) + } + for _, item := range rc.Samples { + err := c.collect(sc, item.Value) + terror.Log(errors.Trace(err)) + } +} + +// SampleCollectorToProto converts SampleCollector to its protobuf representation. +func SampleCollectorToProto(c *SampleCollector) *tipb.SampleCollector { + collector := &tipb.SampleCollector{ + NullCount: c.NullCount, + Count: c.Count, + FmSketch: FMSketchToProto(c.FMSketch), + TotalSize: &c.TotalSize, + } + if c.CMSketch != nil { + collector.CmSketch = CMSketchToProto(c.CMSketch) + } + for _, item := range c.Samples { + collector.Samples = append(collector.Samples, item.Value.GetBytes()) + } + return collector +} + +const maxSampleValueLength = mysql.MaxFieldVarCharLength / 2 + +// SampleCollectorFromProto converts SampleCollector from its protobuf representation. +func SampleCollectorFromProto(collector *tipb.SampleCollector) *SampleCollector { + s := &SampleCollector{ + NullCount: collector.NullCount, + Count: collector.Count, + FMSketch: FMSketchFromProto(collector.FmSketch), + } + if collector.TotalSize != nil { + s.TotalSize = *collector.TotalSize + } + s.CMSketch = CMSketchFromProto(collector.CmSketch) + for _, val := range collector.Samples { + // When store the histogram bucket boundaries to kv, we need to limit the length of the value. + if len(val) <= maxSampleValueLength { + item := &SampleItem{Value: types.NewBytesDatum(val)} + s.Samples = append(s.Samples, item) + } + } + return s +} + +func (c *SampleCollector) collect(sc *stmtctx.StatementContext, d types.Datum) error { + if !c.IsMerger { + if d.IsNull() { + c.NullCount++ + return nil + } + c.Count++ + if err := c.FMSketch.InsertValue(sc, d); err != nil { + return errors.Trace(err) + } + if c.CMSketch != nil { + c.CMSketch.InsertBytes(d.GetBytes()) + } + // Minus one is to remove the flag byte. + c.TotalSize += int64(len(d.GetBytes()) - 1) + } + c.seenValues++ + // The following code use types.CloneDatum(d) because d may have a deep reference + // to the underlying slice, GC can't free them which lead to memory leak eventually. + // TODO: Refactor the proto to avoid copying here. + if len(c.Samples) < int(c.MaxSampleSize) { + newItem := &SampleItem{Value: types.CloneDatum(d)} + c.Samples = append(c.Samples, newItem) + } else { + shouldAdd := rand.Int63n(c.seenValues) < c.MaxSampleSize + if shouldAdd { + idx := rand.Intn(int(c.MaxSampleSize)) + newItem := &SampleItem{Value: types.CloneDatum(d)} + // To keep the order of the elements, we use delete and append, not direct replacement. + c.Samples = append(c.Samples[:idx], c.Samples[idx+1:]...) + c.Samples = append(c.Samples, newItem) + } + } + return nil +} + +// CalcTotalSize is to calculate total size based on samples. +func (c *SampleCollector) CalcTotalSize() { + c.TotalSize = 0 + for _, item := range c.Samples { + c.TotalSize += int64(len(item.Value.GetBytes())) + } +} + +// SampleBuilder is used to build samples for columns. +// Also, if primary key is handle, it will directly build histogram for it. +type SampleBuilder struct { + Sc *stmtctx.StatementContext + RecordSet sqlexec.RecordSet + ColLen int // ColLen is the number of columns need to be sampled. + PkBuilder *SortedBuilder + MaxBucketSize int64 + MaxSampleSize int64 + MaxFMSketchSize int64 + CMSketchDepth int32 + CMSketchWidth int32 +} + +// CollectColumnStats collects sample from the result set using Reservoir Sampling algorithm, +// and estimates NDVs using FM Sketch during the collecting process. +// It returns the sample collectors which contain total count, null count, distinct values count and CM Sketch. +// It also returns the statistic builder for PK which contains the histogram. +// See https://en.wikipedia.org/wiki/Reservoir_sampling +func (s SampleBuilder) CollectColumnStats() ([]*SampleCollector, *SortedBuilder, error) { + collectors := make([]*SampleCollector, s.ColLen) + for i := range collectors { + collectors[i] = &SampleCollector{ + MaxSampleSize: s.MaxSampleSize, + FMSketch: NewFMSketch(int(s.MaxFMSketchSize)), + } + } + if s.CMSketchDepth > 0 && s.CMSketchWidth > 0 { + for i := range collectors { + collectors[i].CMSketch = NewCMSketch(s.CMSketchDepth, s.CMSketchWidth) + } + } + ctx := context.TODO() + req := s.RecordSet.NewChunk() + it := chunk.NewIterator4Chunk(req) + for { + err := s.RecordSet.Next(ctx, req) + if err != nil { + return nil, nil, errors.Trace(err) + } + if req.NumRows() == 0 { + return collectors, s.PkBuilder, nil + } + if len(s.RecordSet.Fields()) == 0 { + return nil, nil, errors.Errorf("collect column stats failed: record set has 0 field") + } + for row := it.Begin(); row != it.End(); row = it.Next() { + datums := RowToDatums(row, s.RecordSet.Fields()) + if s.PkBuilder != nil { + err = s.PkBuilder.Iterate(datums[0]) + if err != nil { + return nil, nil, errors.Trace(err) + } + datums = datums[1:] + } + for i, val := range datums { + err = collectors[i].collect(s.Sc, val) + if err != nil { + return nil, nil, errors.Trace(err) + } + } + } + } +} + +// RowToDatums converts row to datum slice. +func RowToDatums(row chunk.Row, fields []*ast.ResultField) []types.Datum { + datums := make([]types.Datum, len(fields)) + for i, f := range fields { + datums[i] = row.GetDatum(i, &f.Column.FieldType) + } + return datums +} diff --git a/statistics/sample_test.go b/statistics/sample_test.go new file mode 100644 index 0000000..c682093 --- /dev/null +++ b/statistics/sample_test.go @@ -0,0 +1,130 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package statistics + +import ( + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/mock" + "github.com/pingcap/tidb/util/sqlexec" +) + +var _ = Suite(&testSampleSuite{}) + +type testSampleSuite struct { + count int + rs sqlexec.RecordSet +} + +func (s *testSampleSuite) SetUpSuite(c *C) { + s.count = 10000 + rs := &recordSet{ + data: make([]types.Datum, s.count), + count: s.count, + cursor: 0, + firstIsID: true, + } + rs.setFields(mysql.TypeLonglong, mysql.TypeLonglong) + start := 1000 // 1000 values is null + for i := start; i < rs.count; i++ { + rs.data[i].SetInt64(int64(i)) + } + for i := start; i < rs.count; i += 3 { + rs.data[i].SetInt64(rs.data[i].GetInt64() + 1) + } + for i := start; i < rs.count; i += 5 { + rs.data[i].SetInt64(rs.data[i].GetInt64() + 2) + } + s.rs = rs +} + +func (s *testSampleSuite) TestCollectColumnStats(c *C) { + sc := mock.NewContext().GetSessionVars().StmtCtx + builder := SampleBuilder{ + Sc: sc, + RecordSet: s.rs, + ColLen: 1, + PkBuilder: NewSortedBuilder(sc, 256, 1, types.NewFieldType(mysql.TypeLonglong)), + MaxSampleSize: 10000, + MaxBucketSize: 256, + MaxFMSketchSize: 1000, + CMSketchWidth: 2048, + CMSketchDepth: 8, + } + c.Assert(s.rs.Close(), IsNil) + collectors, pkBuilder, err := builder.CollectColumnStats() + c.Assert(err, IsNil) + c.Assert(collectors[0].NullCount+collectors[0].Count, Equals, int64(s.count)) + c.Assert(collectors[0].FMSketch.NDV(), Equals, int64(6232)) + c.Assert(collectors[0].CMSketch.TotalCount(), Equals, uint64(collectors[0].Count)) + c.Assert(int64(pkBuilder.Count), Equals, int64(s.count)) + c.Assert(pkBuilder.Hist().NDV, Equals, int64(s.count)) +} + +func (s *testSampleSuite) TestMergeSampleCollector(c *C) { + builder := SampleBuilder{ + Sc: mock.NewContext().GetSessionVars().StmtCtx, + RecordSet: s.rs, + ColLen: 2, + MaxSampleSize: 1000, + MaxBucketSize: 256, + MaxFMSketchSize: 1000, + CMSketchWidth: 2048, + CMSketchDepth: 8, + } + c.Assert(s.rs.Close(), IsNil) + sc := &stmtctx.StatementContext{TimeZone: time.Local} + collectors, pkBuilder, err := builder.CollectColumnStats() + c.Assert(err, IsNil) + c.Assert(pkBuilder, IsNil) + c.Assert(len(collectors), Equals, 2) + collectors[0].IsMerger = true + collectors[0].MergeSampleCollector(sc, collectors[1]) + c.Assert(collectors[0].FMSketch.NDV(), Equals, int64(9280)) + c.Assert(len(collectors[0].Samples), Equals, 1000) + c.Assert(collectors[0].NullCount, Equals, int64(1000)) + c.Assert(collectors[0].Count, Equals, int64(19000)) + c.Assert(collectors[0].CMSketch.TotalCount(), Equals, uint64(collectors[0].Count)) +} + +func (s *testSampleSuite) TestCollectorProtoConversion(c *C) { + builder := SampleBuilder{ + Sc: mock.NewContext().GetSessionVars().StmtCtx, + RecordSet: s.rs, + ColLen: 2, + MaxSampleSize: 10000, + MaxBucketSize: 256, + MaxFMSketchSize: 1000, + CMSketchWidth: 2048, + CMSketchDepth: 8, + } + c.Assert(s.rs.Close(), IsNil) + collectors, pkBuilder, err := builder.CollectColumnStats() + c.Assert(err, IsNil) + c.Assert(pkBuilder, IsNil) + for _, collector := range collectors { + p := SampleCollectorToProto(collector) + s := SampleCollectorFromProto(p) + c.Assert(collector.Count, Equals, s.Count) + c.Assert(collector.NullCount, Equals, s.NullCount) + c.Assert(collector.CMSketch.TotalCount(), Equals, s.CMSketch.TotalCount()) + c.Assert(collector.FMSketch.NDV(), Equals, s.FMSketch.NDV()) + c.Assert(collector.TotalSize, Equals, s.TotalSize) + c.Assert(len(collector.Samples), Equals, len(s.Samples)) + } +} diff --git a/statistics/scalar.go b/statistics/scalar.go new file mode 100644 index 0000000..446f61b --- /dev/null +++ b/statistics/scalar.go @@ -0,0 +1,175 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package statistics + +import ( + "encoding/binary" + "github.com/pingcap/tidb/types" + "math" +) + +// calcFraction is used to calculate the fraction of the interval [lower, upper] that lies within the [lower, value] +// using the continuous-value assumption. +func calcFraction(lower, upper, value float64) float64 { + if upper <= lower { + return 0.5 + } + if value <= lower { + return 0 + } + if value >= upper { + return 1 + } + frac := (value - lower) / (upper - lower) + if math.IsNaN(frac) || math.IsInf(frac, 0) || frac < 0 || frac > 1 { + return 0.5 + } + return frac +} + +func convertDatumToScalar(value *types.Datum, commonPfxLen int) float64 { + switch value.Kind() { + case types.KindString, types.KindBytes: + bytes := value.GetBytes() + if len(bytes) <= commonPfxLen { + return 0 + } + return convertBytesToScalar(bytes[commonPfxLen:]) + default: + // do not know how to convert + return 0 + } +} + +// PreCalculateScalar converts the lower and upper to scalar. When the datum type is KindString or KindBytes, we also +// calculate their common prefix length, because when a value falls between lower and upper, the common prefix +// of lower and upper equals to the common prefix of the lower, upper and the value. For some simple types like `Int64`, +// we do not convert it because we can directly infer the scalar value. +func (hg *Histogram) PreCalculateScalar() { + len := hg.Len() + if len == 0 { + return + } + switch hg.GetLower(0).Kind() { + case types.KindBytes, types.KindString: + hg.scalars = make([]scalar, len) + for i := 0; i < len; i++ { + lower, upper := hg.GetLower(i), hg.GetUpper(i) + common := commonPrefixLength(lower.GetBytes(), upper.GetBytes()) + hg.scalars[i] = scalar{ + commonPfxLen: common, + lower: convertDatumToScalar(lower, common), + upper: convertDatumToScalar(upper, common), + } + } + } +} + +func (hg *Histogram) calcFraction(index int, value *types.Datum) float64 { + lower, upper := hg.Bounds.GetRow(2*index), hg.Bounds.GetRow(2*index+1) + switch value.Kind() { + case types.KindFloat32: + return calcFraction(float64(lower.GetFloat32(0)), float64(upper.GetFloat32(0)), float64(value.GetFloat32())) + case types.KindFloat64: + return calcFraction(lower.GetFloat64(0), upper.GetFloat64(0), value.GetFloat64()) + case types.KindInt64: + return calcFraction(float64(lower.GetInt64(0)), float64(upper.GetInt64(0)), float64(value.GetInt64())) + case types.KindUint64: + return calcFraction(float64(lower.GetUint64(0)), float64(upper.GetUint64(0)), float64(value.GetUint64())) + case types.KindBytes, types.KindString: + return calcFraction(hg.scalars[index].lower, hg.scalars[index].upper, convertDatumToScalar(value, hg.scalars[index].commonPfxLen)) + } + return 0.5 +} + +func commonPrefixLength(lower, upper []byte) int { + minLen := len(lower) + if minLen > len(upper) { + minLen = len(upper) + } + for i := 0; i < minLen; i++ { + if lower[i] != upper[i] { + return i + } + } + return minLen +} + +func convertBytesToScalar(value []byte) float64 { + // Bytes type is viewed as a base-256 value, so we only consider at most 8 bytes. + var buf [8]byte + copy(buf[:], value) + return float64(binary.BigEndian.Uint64(buf[:])) +} + +const maxNumStep = 10 + +func enumRangeValues(low, high types.Datum, lowExclude, highExclude bool) []types.Datum { + if low.Kind() != high.Kind() { + return nil + } + exclude := 0 + if lowExclude { + exclude++ + } + if highExclude { + exclude++ + } + switch low.Kind() { + case types.KindInt64: + // Overflow check. + lowVal, highVal := low.GetInt64(), high.GetInt64() + if lowVal <= 0 && highVal >= 0 { + if lowVal < -maxNumStep || highVal > maxNumStep { + return nil + } + } + remaining := highVal - lowVal + if remaining >= maxNumStep+1 { + return nil + } + remaining = remaining + 1 - int64(exclude) + if remaining >= maxNumStep { + return nil + } + values := make([]types.Datum, 0, remaining) + startValue := lowVal + if lowExclude { + startValue++ + } + for i := int64(0); i < remaining; i++ { + values = append(values, types.NewIntDatum(startValue+i)) + } + return values + case types.KindUint64: + remaining := high.GetUint64() - low.GetUint64() + if remaining >= maxNumStep+1 { + return nil + } + remaining = remaining + 1 - uint64(exclude) + if remaining >= maxNumStep { + return nil + } + values := make([]types.Datum, 0, remaining) + startValue := low.GetUint64() + if lowExclude { + startValue++ + } + for i := uint64(0); i < remaining; i++ { + values = append(values, types.NewUintDatum(startValue+i)) + } + return values + } + return nil +} diff --git a/statistics/scalar_test.go b/statistics/scalar_test.go new file mode 100644 index 0000000..c3a2079 --- /dev/null +++ b/statistics/scalar_test.go @@ -0,0 +1,150 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package statistics + +import ( + "math" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" +) + +const eps = 1e-9 + +func getUnsignedFieldType() *types.FieldType { + tp := types.NewFieldType(mysql.TypeLonglong) + tp.Flag |= mysql.UnsignedFlag + return tp +} + +func (s *testStatisticsSuite) TestCalcFraction(c *C) { + tests := []struct { + lower types.Datum + upper types.Datum + value types.Datum + fraction float64 + tp *types.FieldType + }{ + { + lower: types.NewIntDatum(0), + upper: types.NewIntDatum(4), + value: types.NewIntDatum(1), + fraction: 0.25, + tp: types.NewFieldType(mysql.TypeLonglong), + }, + { + lower: types.NewIntDatum(0), + upper: types.NewIntDatum(4), + value: types.NewIntDatum(4), + fraction: 1, + tp: types.NewFieldType(mysql.TypeLonglong), + }, + { + lower: types.NewIntDatum(0), + upper: types.NewIntDatum(4), + value: types.NewIntDatum(-1), + fraction: 0, + tp: types.NewFieldType(mysql.TypeLonglong), + }, + { + lower: types.NewUintDatum(0), + upper: types.NewUintDatum(4), + value: types.NewUintDatum(1), + fraction: 0.25, + tp: getUnsignedFieldType(), + }, + { + lower: types.NewFloat64Datum(0), + upper: types.NewFloat64Datum(4), + value: types.NewFloat64Datum(1), + fraction: 0.25, + tp: types.NewFieldType(mysql.TypeDouble), + }, + { + lower: types.NewFloat32Datum(0), + upper: types.NewFloat32Datum(4), + value: types.NewFloat32Datum(1), + fraction: 0.25, + tp: types.NewFieldType(mysql.TypeFloat), + }, + { + lower: types.NewStringDatum("aasad"), + upper: types.NewStringDatum("addad"), + value: types.NewStringDatum("abfsd"), + fraction: 0.32280253984063745, + tp: types.NewFieldType(mysql.TypeString), + }, + { + lower: types.NewBytesDatum([]byte("aasad")), + upper: types.NewBytesDatum([]byte("asdff")), + value: types.NewBytesDatum([]byte("abfsd")), + fraction: 0.0529216802217269, + tp: types.NewFieldType(mysql.TypeBlob), + }, + } + for _, test := range tests { + hg := NewHistogram(0, 0, 0, 0, test.tp, 1, 0) + hg.AppendBucket(&test.lower, &test.upper, 0, 0) + hg.PreCalculateScalar() + fraction := hg.calcFraction(0, &test.value) + c.Check(math.Abs(fraction-test.fraction) < eps, IsTrue) + } +} + +func (s *testStatisticsSuite) TestEnumRangeValues(c *C) { + tests := []struct { + low types.Datum + high types.Datum + lowExclude bool + highExclude bool + res string + }{ + { + low: types.NewIntDatum(0), + high: types.NewIntDatum(5), + lowExclude: false, + highExclude: true, + res: "(0, 1, 2, 3, 4)", + }, + { + low: types.NewIntDatum(math.MinInt64), + high: types.NewIntDatum(math.MaxInt64), + lowExclude: false, + highExclude: false, + res: "", + }, + { + low: types.NewUintDatum(0), + high: types.NewUintDatum(5), + lowExclude: false, + highExclude: true, + res: "(0, 1, 2, 3, 4)", + }, + // fix issue 11610 + { + low: types.NewIntDatum(math.MinInt64), + high: types.NewIntDatum(0), + lowExclude: false, + highExclude: false, + res: "", + }, + } + for _, t := range tests { + vals := enumRangeValues(t.low, t.high, t.lowExclude, t.highExclude) + str, err := types.DatumsToString(vals, true) + c.Assert(err, IsNil) + c.Assert(str, Equals, t.res) + } +} diff --git a/statistics/selectivity.go b/statistics/selectivity.go new file mode 100644 index 0000000..e84941b --- /dev/null +++ b/statistics/selectivity.go @@ -0,0 +1,314 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package statistics + +import ( + "math" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" + planutil "github.com/pingcap/tidb/planner/util" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/ranger" +) + +// If one condition can't be calculated, we will assume that the selectivity of this condition is 0.8. +const selectionFactor = 0.8 + +// StatsNode is used for calculating selectivity. +type StatsNode struct { + Tp int + ID int64 + // mask is a bit pattern whose ith bit will indicate whether the ith expression is covered by this index/column. + mask int64 + // Selectivity indicates the Selectivity of this column/index. + Selectivity float64 + // numCols is the number of columns contained in the index or column(which is always 1). + numCols int + // partCover indicates whether the bit in the mask is for a full cover or partial cover. It is only true + // when the condition is a DNF expression on index, and the expression is not totally extracted as access condition. + partCover bool +} + +// The type of the StatsNode. +const ( + IndexType = iota + PkType + ColType +) + +const unknownColumnID = math.MinInt64 + +// getConstantColumnID receives two expressions and if one of them is column and another is constant, it returns the +// ID of the column. +func getConstantColumnID(e []expression.Expression) int64 { + if len(e) != 2 { + return unknownColumnID + } + col, ok1 := e[0].(*expression.Column) + _, ok2 := e[1].(*expression.Constant) + if ok1 && ok2 { + return col.ID + } + col, ok1 = e[1].(*expression.Column) + _, ok2 = e[0].(*expression.Constant) + if ok1 && ok2 { + return col.ID + } + return unknownColumnID +} + +func pseudoSelectivity(coll *HistColl, exprs []expression.Expression) float64 { + minFactor := selectionFactor + colExists := make(map[string]bool) + for _, expr := range exprs { + fun, ok := expr.(*expression.ScalarFunction) + if !ok { + continue + } + colID := getConstantColumnID(fun.GetArgs()) + if colID == unknownColumnID { + continue + } + switch fun.FuncName.L { + case ast.EQ, ast.In: + minFactor = math.Min(minFactor, 1.0/pseudoEqualRate) + col, ok := coll.Columns[colID] + if !ok { + continue + } + colExists[col.Info.Name.L] = true + if mysql.HasUniKeyFlag(col.Info.Flag) { + return 1.0 / float64(coll.Count) + } + case ast.GE, ast.GT, ast.LE, ast.LT: + minFactor = math.Min(minFactor, 1.0/pseudoLessRate) + // FIXME: To resolve the between case. + } + } + if len(colExists) == 0 { + return minFactor + } + // use the unique key info + for _, idx := range coll.Indices { + if !idx.Info.Unique { + continue + } + unique := true + for _, col := range idx.Info.Columns { + if !colExists[col.Name.L] { + unique = false + break + } + } + if unique { + return 1.0 / float64(coll.Count) + } + } + return minFactor +} + +// Selectivity is a function calculate the selectivity of the expressions. +// The definition of selectivity is (row count after filter / row count before filter). +// And exprs must be CNF now, in other words, `exprs[0] and exprs[1] and ... and exprs[len - 1]` should be held when you call this. +// Currently the time complexity is o(n^2). +func (coll *HistColl) Selectivity(ctx sessionctx.Context, exprs []expression.Expression, filledPaths []*planutil.AccessPath) (float64, error) { + // If table's count is zero or conditions are empty, we should return 100% selectivity. + if coll.Count == 0 || len(exprs) == 0 { + return 1, nil + } + // TODO: If len(exprs) is bigger than 63, we could use bitset structure to replace the int64. + // This will simplify some code and speed up if we use this rather than a boolean slice. + if len(exprs) > 63 || (len(coll.Columns) == 0 && len(coll.Indices) == 0) { + return pseudoSelectivity(coll, exprs), nil + } + ret := 1.0 + var nodes []*StatsNode + sc := ctx.GetSessionVars().StmtCtx + + remainedExprs := make([]expression.Expression, 0, len(exprs)) + remainedExprs = append(remainedExprs, exprs...) + + extractedCols := make([]*expression.Column, 0, len(coll.Columns)) + extractedCols = expression.ExtractColumnsFromExpressions(extractedCols, remainedExprs, nil) + for id, colInfo := range coll.Columns { + col := expression.ColInfo2Col(extractedCols, colInfo.Info) + if col != nil { + maskCovered, ranges, _, err := getMaskAndRanges(ctx, remainedExprs, ranger.ColumnRangeType, nil, nil, col) + if err != nil { + return 0, errors.Trace(err) + } + nodes = append(nodes, &StatsNode{Tp: ColType, ID: id, mask: maskCovered, numCols: 1}) + if colInfo.IsHandle { + nodes[len(nodes)-1].Tp = PkType + var cnt float64 + cnt, err = coll.GetRowCountByIntColumnRanges(sc, id, ranges) + if err != nil { + return 0, errors.Trace(err) + } + nodes[len(nodes)-1].Selectivity = cnt / float64(coll.Count) + continue + } + cnt, err := coll.GetRowCountByColumnRanges(sc, id, ranges) + if err != nil { + return 0, errors.Trace(err) + } + nodes[len(nodes)-1].Selectivity = cnt / float64(coll.Count) + } + } + id2Paths := make(map[int64]*planutil.AccessPath) + for _, path := range filledPaths { + if path.IsTablePath { + continue + } + id2Paths[path.Index.ID] = path + } + for id, idxInfo := range coll.Indices { + idxCols := expression.FindPrefixOfIndex(extractedCols, coll.Idx2ColumnIDs[id]) + if len(idxCols) > 0 { + lengths := make([]int, 0, len(idxCols)) + for i := 0; i < len(idxCols); i++ { + lengths = append(lengths, idxInfo.Info.Columns[i].Length) + } + maskCovered, ranges, partCover, err := getMaskAndRanges(ctx, remainedExprs, ranger.IndexRangeType, lengths, id2Paths[idxInfo.ID], idxCols...) + if err != nil { + return 0, errors.Trace(err) + } + cnt, err := coll.GetRowCountByIndexRanges(sc, id, ranges) + if err != nil { + return 0, errors.Trace(err) + } + selectivity := cnt / float64(coll.Count) + nodes = append(nodes, &StatsNode{ + Tp: IndexType, + ID: id, + mask: maskCovered, + numCols: len(idxInfo.Info.Columns), + Selectivity: selectivity, + partCover: partCover, + }) + } + } + usedSets := getUsableSetsByGreedy(nodes) + // Initialize the mask with the full set. + mask := (int64(1) << uint(len(remainedExprs))) - 1 + for _, set := range usedSets { + mask &^= set.mask + ret *= set.Selectivity + // If `partCover` is true, it means that the conditions are in DNF form, and only part + // of the DNF expressions are extracted as access conditions, so besides from the selectivity + // of the extracted access conditions, we multiply another selectionFactor for the residual + // conditions. + if set.partCover { + ret *= selectionFactor + } + } + // If there's still conditions which cannot be calculated, we will multiply a selectionFactor. + if mask > 0 { + ret *= selectionFactor + } + return ret, nil +} + +func getMaskAndRanges(ctx sessionctx.Context, exprs []expression.Expression, rangeType ranger.RangeType, lengths []int, cachedPath *planutil.AccessPath, cols ...*expression.Column) (mask int64, ranges []*ranger.Range, partCover bool, err error) { + sc := ctx.GetSessionVars().StmtCtx + isDNF := false + var accessConds, remainedConds []expression.Expression + switch rangeType { + case ranger.ColumnRangeType: + accessConds = ranger.ExtractAccessConditionsForColumn(exprs, cols[0].UniqueID) + ranges, err = ranger.BuildColumnRange(accessConds, sc, cols[0].RetType, types.UnspecifiedLength) + case ranger.IndexRangeType: + if cachedPath != nil { + ranges, accessConds, remainedConds, isDNF = cachedPath.Ranges, cachedPath.AccessConds, cachedPath.TableFilters, cachedPath.IsDNFCond + break + } + var res *ranger.DetachRangeResult + res, err = ranger.DetachCondAndBuildRangeForIndex(ctx, exprs, cols, lengths) + ranges, accessConds, remainedConds, isDNF = res.Ranges, res.AccessConds, res.RemainedConds, res.IsDNFCond + if err != nil { + return 0, nil, false, err + } + default: + panic("should never be here") + } + if err != nil { + return 0, nil, false, err + } + if isDNF && len(accessConds) > 0 { + mask |= 1 + return mask, ranges, len(remainedConds) > 0, nil + } + for i := range exprs { + for j := range accessConds { + if exprs[i].Equal(ctx, accessConds[j]) { + mask |= 1 << uint64(i) + break + } + } + } + return mask, ranges, false, nil +} + +// getUsableSetsByGreedy will select the indices and pk used for calculate selectivity by greedy algorithm. +func getUsableSetsByGreedy(nodes []*StatsNode) (newBlocks []*StatsNode) { + marked := make([]bool, len(nodes)) + mask := int64(math.MaxInt64) + for { + // Choose the index that covers most. + bestID, bestCount, bestTp, bestNumCols, bestMask := -1, 0, ColType, 0, int64(0) + for i, set := range nodes { + if marked[i] { + continue + } + curMask := set.mask & mask + bits := popCount(curMask) + // This set cannot cover any thing, just skip it. + if bits == 0 { + continue + } + // We greedy select the stats info based on: + // (1): The stats type, always prefer the primary key or index. + // (2): The number of expression that it covers, the more the better. + // (3): The number of columns that it contains, the less the better. + if (bestTp == ColType && set.Tp != ColType) || bestCount < bits || (bestCount == bits && bestNumCols > set.numCols) { + bestID, bestCount, bestTp, bestNumCols, bestMask = i, bits, set.Tp, set.numCols, curMask + } + } + if bestCount == 0 { + break + } + + // Update the mask, remove the bit that nodes[bestID].mask has. + mask &^= bestMask + + newBlocks = append(newBlocks, nodes[bestID]) + marked[bestID] = true + } + return +} + +// popCount is the digit sum of the binary representation of the number x. +func popCount(x int64) int { + ret := 0 + // x -= x & -x, remove the lowest bit of the x. + // e.g. result will be 2 if x is 3. + for ; x > 0; x -= x & -x { + ret++ + } + return ret +} diff --git a/statistics/selectivity_test.go b/statistics/selectivity_test.go new file mode 100644 index 0000000..26676fe --- /dev/null +++ b/statistics/selectivity_test.go @@ -0,0 +1,468 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package statistics_test + +import ( + "context" + "fmt" + "math" + "os" + "runtime/pprof" + "strings" + "testing" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/log" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/statistics" + "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/codec" + "github.com/pingcap/tidb/util/testkit" + "github.com/pingcap/tidb/util/testleak" + "github.com/pingcap/tidb/util/testutil" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +const eps = 1e-9 + +var _ = Suite(&testStatsSuite{}) + +type testStatsSuite struct { + store kv.Storage + do *domain.Domain + hook *logHook + testData testutil.TestData +} + +func (s *testStatsSuite) SetUpSuite(c *C) { + testleak.BeforeTest() + // Add the hook here to avoid data race. + s.registerHook() + var err error + s.store, s.do, err = newStoreWithBootstrap() + c.Assert(err, IsNil) + s.testData, err = testutil.LoadTestSuiteData("testdata", "stats_suite") + c.Assert(err, IsNil) +} + +func (s *testStatsSuite) TearDownSuite(c *C) { + s.do.Close() + c.Assert(s.store.Close(), IsNil) + testleak.AfterTest(c)() + c.Assert(s.testData.GenerateOutputIfNeeded(), IsNil) +} + +func (s *testStatsSuite) registerHook() { + conf := &log.Config{Level: os.Getenv("log_level"), File: log.FileLogConfig{}} + _, r, _ := log.InitLogger(conf) + s.hook = &logHook{r.Core, ""} + lg := zap.New(s.hook) + log.ReplaceGlobals(lg, r) +} + +type logHook struct { + zapcore.Core + results string +} + +func (h *logHook) Write(entry zapcore.Entry, fields []zapcore.Field) error { + message := entry.Message + if idx := strings.Index(message, "[stats"); idx != -1 { + h.results = h.results + message + for _, f := range fields { + h.results = h.results + ", " + f.Key + "=" + h.field2String(f) + } + } + return nil +} + +func (h *logHook) field2String(field zapcore.Field) string { + switch field.Type { + case zapcore.StringType: + return field.String + case zapcore.Int64Type, zapcore.Int32Type, zapcore.Uint32Type: + return fmt.Sprintf("%v", field.Integer) + case zapcore.Float64Type: + return fmt.Sprintf("%v", math.Float64frombits(uint64(field.Integer))) + case zapcore.StringerType: + return field.Interface.(fmt.Stringer).String() + } + return "not support" +} + +func (h *logHook) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { + if h.Enabled(e.Level) { + return ce.AddCore(e, h) + } + return ce +} + +func newStoreWithBootstrap() (kv.Storage, *domain.Domain, error) { + store, err := mockstore.NewMockTikvStore() + if err != nil { + return nil, nil, errors.Trace(err) + } + session.SetSchemaLease(0) + session.DisableStats4Test() + do, err := session.BootstrapSession(store) + return store, do, errors.Trace(err) +} + +func cleanEnv(c *C, store kv.Storage, do *domain.Domain) { + tk := testkit.NewTestKit(c, store) + tk.MustExec("use test") + r := tk.MustQuery("show tables") + for _, tb := range r.Rows() { + tableName := tb[0] + tk.MustExec(fmt.Sprintf("drop table %v", tableName)) + } + tk.MustExec("delete from mysql.stats_meta") + tk.MustExec("delete from mysql.stats_histograms") + tk.MustExec("delete from mysql.stats_buckets") + do.StatsHandle().Clear() +} + +// generateIntDatum will generate a datum slice, every dimension is begin from 0, end with num - 1. +// If dimension is x, num is y, the total number of datum is y^x. And This slice is sorted. +func (s *testStatsSuite) generateIntDatum(dimension, num int) ([]types.Datum, error) { + length := int(math.Pow(float64(num), float64(dimension))) + ret := make([]types.Datum, length) + if dimension == 1 { + for i := 0; i < num; i++ { + ret[i] = types.NewIntDatum(int64(i)) + } + } else { + sc := &stmtctx.StatementContext{TimeZone: time.Local} + // In this way, we can guarantee the datum is in order. + for i := 0; i < length; i++ { + data := make([]types.Datum, dimension) + j := i + for k := 0; k < dimension; k++ { + data[dimension-k-1].SetInt64(int64(j % num)) + j = j / num + } + bytes, err := codec.EncodeKey(sc, nil, data...) + if err != nil { + return nil, err + } + ret[i].SetBytes(bytes) + } + } + return ret, nil +} + +// mockStatsHistogram will create a statistics.Histogram, of which the data is uniform distribution. +func mockStatsHistogram(id int64, values []types.Datum, repeat int64, tp *types.FieldType) *statistics.Histogram { + ndv := len(values) + histogram := statistics.NewHistogram(id, int64(ndv), 0, 0, tp, ndv, 0) + for i := 0; i < ndv; i++ { + histogram.AppendBucket(&values[i], &values[i], repeat*int64(i+1), repeat) + } + return histogram +} + +func mockStatsTable(tbl *model.TableInfo, rowCount int64) *statistics.Table { + histColl := statistics.HistColl{ + PhysicalID: tbl.ID, + HavePhysicalID: true, + Count: rowCount, + Columns: make(map[int64]*statistics.Column, len(tbl.Columns)), + Indices: make(map[int64]*statistics.Index, len(tbl.Indices)), + } + statsTbl := &statistics.Table{ + HistColl: histColl, + } + return statsTbl +} + +func (s *testStatsSuite) prepareSelectivity(testKit *testkit.TestKit, c *C) *statistics.Table { + testKit.MustExec("use test") + testKit.MustExec("drop table if exists t") + testKit.MustExec("create table t(a int primary key, b int, c int, d int, e int, index idx_cd(c, d), index idx_de(d, e))") + + is := s.do.InfoSchema() + tb, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + c.Assert(err, IsNil) + tbl := tb.Meta() + + // mock the statistic table + statsTbl := mockStatsTable(tbl, 540) + + // Set the value of columns' histogram. + colValues, err := s.generateIntDatum(1, 54) + c.Assert(err, IsNil) + for i := 1; i <= 5; i++ { + statsTbl.Columns[int64(i)] = &statistics.Column{Histogram: *mockStatsHistogram(int64(i), colValues, 10, types.NewFieldType(mysql.TypeLonglong)), Info: tbl.Columns[i-1]} + } + + // Set the value of two indices' histograms. + idxValues, err := s.generateIntDatum(2, 3) + c.Assert(err, IsNil) + tp := types.NewFieldType(mysql.TypeBlob) + statsTbl.Indices[1] = &statistics.Index{Histogram: *mockStatsHistogram(1, idxValues, 60, tp), Info: tbl.Indices[0]} + statsTbl.Indices[2] = &statistics.Index{Histogram: *mockStatsHistogram(2, idxValues, 60, tp), Info: tbl.Indices[1]} + return statsTbl +} + +func (s *testStatsSuite) TestSelectivity(c *C) { + defer cleanEnv(c, s.store, s.do) + testKit := testkit.NewTestKit(c, s.store) + statsTbl := s.prepareSelectivity(testKit, c) + is := s.do.InfoSchema() + + longExpr := "0 < a and a = 1 " + for i := 1; i < 64; i++ { + longExpr += fmt.Sprintf(" and a > %d ", i) + } + tests := []struct { + exprs string + selectivity float64 + }{ + { + exprs: "a > 0 and a < 2", + selectivity: 0.01851851851, + }, + { + exprs: "a >= 1 and a < 2", + selectivity: 0.01851851851, + }, + { + exprs: "a >= 1 and b > 1 and a < 2", + selectivity: 0.01783264746, + }, + { + exprs: "a >= 1 and c > 1 and a < 2", + selectivity: 0.00617283950, + }, + { + exprs: "a >= 1 and c >= 1 and a < 2", + selectivity: 0.01234567901, + }, + { + exprs: "d = 0 and e = 1", + selectivity: 0.11111111111, + }, + { + exprs: "b > 1", + selectivity: 0.96296296296, + }, + { + exprs: "a > 1 and b < 2 and c > 3 and d < 4 and e > 5", + selectivity: 0, + }, + { + exprs: longExpr, + selectivity: 0.001, + }, + } + + ctx := context.Background() + for _, tt := range tests { + sql := "select * from t where " + tt.exprs + comment := Commentf("for %s", tt.exprs) + sctx := testKit.Se.(sessionctx.Context) + stmts, err := session.Parse(sctx, sql) + c.Assert(err, IsNil, Commentf("error %v, for expr %s", err, tt.exprs)) + c.Assert(stmts, HasLen, 1) + + err = plannercore.Preprocess(sctx, stmts[0], is) + c.Assert(err, IsNil, comment) + p, _, err := plannercore.BuildLogicalPlan(ctx, sctx, stmts[0], is) + c.Assert(err, IsNil, Commentf("error %v, for building plan, expr %s", err, tt.exprs)) + + sel := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) + ds := sel.Children()[0].(*plannercore.DataSource) + + histColl := statsTbl.GenerateHistCollFromColumnInfo(ds.Columns, ds.Schema().Columns) + + ratio, err := histColl.Selectivity(sctx, sel.Conditions, nil) + c.Assert(err, IsNil, comment) + c.Assert(math.Abs(ratio-tt.selectivity) < eps, IsTrue, Commentf("for %s, needed: %v, got: %v", tt.exprs, tt.selectivity, ratio)) + + histColl.Count *= 10 + ratio, err = histColl.Selectivity(sctx, sel.Conditions, nil) + c.Assert(err, IsNil, comment) + c.Assert(math.Abs(ratio-tt.selectivity) < eps, IsTrue, Commentf("for %s, needed: %v, got: %v", tt.exprs, tt.selectivity, ratio)) + } +} + +// TestDiscreteDistribution tests the estimation for discrete data distribution. This is more common when the index +// consists several columns, and the first column has small NDV. +func (s *testStatsSuite) TestDiscreteDistribution(c *C) { + defer cleanEnv(c, s.store, s.do) + testKit := testkit.NewTestKit(c, s.store) + testKit.MustExec("use test") + testKit.MustExec("drop table if exists t") + testKit.MustExec("create table t(a char(10), b int, key idx(a, b))") + for i := 0; i < 499; i++ { + testKit.MustExec(fmt.Sprintf("insert into t values ('cn', %d)", i)) + } + for i := 0; i < 10; i++ { + testKit.MustExec("insert into t values ('tw', 0)") + } + testKit.MustExec("analyze table t") + var ( + input []string + output [][]string + ) + s.testData.GetTestCases(c, &input, &output) + for i, tt := range input { + s.testData.OnRecord(func() { + output[i] = s.testData.ConvertRowsToStrings(testKit.MustQuery(tt).Rows()) + }) + testKit.MustQuery(tt).Check(testkit.Rows(output[i]...)) + } +} + +func (s *testStatsSuite) TestSelectCombinedLowBound(c *C) { + defer cleanEnv(c, s.store, s.do) + testKit := testkit.NewTestKit(c, s.store) + testKit.MustExec("use test") + testKit.MustExec("drop table if exists t") + testKit.MustExec("create table t(id int auto_increment, kid int, pid int, primary key(id), key(kid, pid))") + testKit.MustExec("insert into t (kid, pid) values (1,2), (1,3), (1,4),(1, 11), (1, 12), (1, 13), (1, 14), (2, 2), (2, 3), (2, 4)") + testKit.MustExec("analyze table t") + var ( + input []string + output [][]string + ) + s.testData.GetTestCases(c, &input, &output) + for i, tt := range input { + s.testData.OnRecord(func() { + output[i] = s.testData.ConvertRowsToStrings(testKit.MustQuery(tt).Rows()) + }) + testKit.MustQuery(tt).Check(testkit.Rows(output[i]...)) + } +} + +func (s *testStatsSuite) TestPrimaryKeySelectivity(c *C) { + defer cleanEnv(c, s.store, s.do) + testKit := testkit.NewTestKit(c, s.store) + testKit.MustExec("use test") + testKit.MustExec("drop table if exists t") + testKit.MustExec("create table t(a char(10) primary key, b int)") + var input, output [][]string + s.testData.GetTestCases(c, &input, &output) + for i, ts := range input { + for j, tt := range ts { + if j != len(ts)-1 { + testKit.MustExec(tt) + } + s.testData.OnRecord(func() { + if j == len(ts)-1 { + output[i] = s.testData.ConvertRowsToStrings(testKit.MustQuery(tt).Rows()) + } + }) + if j == len(ts)-1 { + testKit.MustQuery(tt).Check(testkit.Rows(output[i]...)) + } + } + } +} + +func BenchmarkSelectivity(b *testing.B) { + c := &C{} + s := &testStatsSuite{} + s.SetUpSuite(c) + defer s.TearDownSuite(c) + + testKit := testkit.NewTestKit(c, s.store) + statsTbl := s.prepareSelectivity(testKit, c) + is := s.do.InfoSchema() + exprs := "a > 1 and b < 2 and c > 3 and d < 4 and e > 5" + sql := "select * from t where " + exprs + comment := Commentf("for %s", exprs) + sctx := testKit.Se.(sessionctx.Context) + stmts, err := session.Parse(sctx, sql) + c.Assert(err, IsNil, Commentf("error %v, for expr %s", err, exprs)) + c.Assert(stmts, HasLen, 1) + err = plannercore.Preprocess(sctx, stmts[0], is) + c.Assert(err, IsNil, comment) + p, _, err := plannercore.BuildLogicalPlan(context.Background(), sctx, stmts[0], is) + c.Assert(err, IsNil, Commentf("error %v, for building plan, expr %s", err, exprs)) + + file, err := os.Create("cpu.profile") + c.Assert(err, IsNil) + defer file.Close() + pprof.StartCPUProfile(file) + + b.Run("Selectivity", func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := statsTbl.Selectivity(sctx, p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection).Conditions, nil) + c.Assert(err, IsNil) + } + b.ReportAllocs() + }) + pprof.StopCPUProfile() +} + +func (s *testStatsSuite) TestColumnIndexNullEstimation(c *C) { + defer cleanEnv(c, s.store, s.do) + testKit := testkit.NewTestKit(c, s.store) + testKit.MustExec("use test") + testKit.MustExec("drop table if exists t") + testKit.MustExec("create table t(a int, b int, c int, index idx_b(b), index idx_c_a(c, a))") + testKit.MustExec("insert into t values(1,null,1),(2,null,2),(3,3,3),(4,null,4),(null,null,null);") + testKit.MustExec("analyze table t") + var ( + input []string + output [][]string + ) + s.testData.GetTestCases(c, &input, &output) + for i := 0; i < 5; i++ { + s.testData.OnRecord(func() { + output[i] = s.testData.ConvertRowsToStrings(testKit.MustQuery(input[i]).Rows()) + }) + testKit.MustQuery(input[i]).Check(testkit.Rows(output[i]...)) + } + for i := 5; i < len(input); i++ { + s.testData.OnRecord(func() { + output[i] = s.testData.ConvertRowsToStrings(testKit.MustQuery(input[i]).Rows()) + }) + testKit.MustQuery(input[i]).Check(testkit.Rows(output[i]...)) + } +} + +func (s *testStatsSuite) TestUniqCompEqualEst(c *C) { + defer cleanEnv(c, s.store, s.do) + testKit := testkit.NewTestKit(c, s.store) + testKit.MustExec("use test") + testKit.MustExec("drop table if exists t") + testKit.MustExec("create table t(a int, b int, primary key(a, b))") + testKit.MustExec("insert into t values(1,1),(1,2),(1,3),(1,4),(1,5),(1,6),(1,7),(1,8),(1,9),(1,10)") + testKit.MustExec("analyze table t") + var ( + input []string + output [][]string + ) + s.testData.GetTestCases(c, &input, &output) + for i := 0; i < 1; i++ { + s.testData.OnRecord(func() { + output[i] = s.testData.ConvertRowsToStrings(testKit.MustQuery(input[i]).Rows()) + }) + testKit.MustQuery(input[i]).Check(testkit.Rows(output[i]...)) + } +} diff --git a/statistics/statistics_test.go b/statistics/statistics_test.go new file mode 100644 index 0000000..0e576c6 --- /dev/null +++ b/statistics/statistics_test.go @@ -0,0 +1,661 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package statistics + +import ( + "context" + "math" + "testing" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/codec" + "github.com/pingcap/tidb/util/mock" + "github.com/pingcap/tidb/util/ranger" + "github.com/pingcap/tidb/util/sqlexec" +) + +func TestT(t *testing.T) { + TestingT(t) +} + +var _ = Suite(&testStatisticsSuite{}) + +type testStatisticsSuite struct { + count int + samples []*SampleItem + rc sqlexec.RecordSet + pk sqlexec.RecordSet +} + +type recordSet struct { + firstIsID bool + data []types.Datum + count int + cursor int + fields []*ast.ResultField +} + +func (r *recordSet) Fields() []*ast.ResultField { + return r.fields +} + +func (r *recordSet) setFields(tps ...uint8) { + r.fields = make([]*ast.ResultField, len(tps)) + for i := 0; i < len(tps); i++ { + rf := new(ast.ResultField) + rf.Column = new(model.ColumnInfo) + rf.Column.FieldType = *types.NewFieldType(tps[i]) + r.fields[i] = rf + } +} + +func (r *recordSet) getNext() []types.Datum { + if r.cursor == r.count { + return nil + } + r.cursor++ + row := make([]types.Datum, 0, len(r.fields)) + if r.firstIsID { + row = append(row, types.NewIntDatum(int64(r.cursor))) + } + row = append(row, r.data[r.cursor-1]) + return row +} + +func (r *recordSet) Next(ctx context.Context, req *chunk.Chunk) error { + req.Reset() + row := r.getNext() + if row != nil { + for i := 0; i < len(row); i++ { + req.AppendDatum(i, &row[i]) + } + } + return nil +} + +func (r *recordSet) NewChunk() *chunk.Chunk { + fields := make([]*types.FieldType, 0, len(r.fields)) + for _, field := range r.fields { + fields = append(fields, &field.Column.FieldType) + } + return chunk.NewChunkWithCapacity(fields, 32) +} + +func (r *recordSet) Close() error { + r.cursor = 0 + return nil +} + +func (s *testStatisticsSuite) SetUpSuite(c *C) { + s.count = 100000 + samples := make([]*SampleItem, 10000) + for i := 0; i < len(samples); i++ { + samples[i] = &SampleItem{} + } + start := 1000 + samples[0].Value.SetInt64(0) + for i := 1; i < start; i++ { + samples[i].Value.SetInt64(2) + } + for i := start; i < len(samples); i++ { + samples[i].Value.SetInt64(int64(i)) + } + for i := start; i < len(samples); i += 3 { + samples[i].Value.SetInt64(samples[i].Value.GetInt64() + 1) + } + for i := start; i < len(samples); i += 5 { + samples[i].Value.SetInt64(samples[i].Value.GetInt64() + 2) + } + sc := new(stmtctx.StatementContext) + err := SortSampleItems(sc, samples) + c.Check(err, IsNil) + s.samples = samples + + rc := &recordSet{ + data: make([]types.Datum, s.count), + count: s.count, + cursor: 0, + } + rc.setFields(mysql.TypeLonglong) + rc.data[0].SetInt64(0) + for i := 1; i < start; i++ { + rc.data[i].SetInt64(2) + } + for i := start; i < rc.count; i++ { + rc.data[i].SetInt64(int64(i)) + } + for i := start; i < rc.count; i += 3 { + rc.data[i].SetInt64(rc.data[i].GetInt64() + 1) + } + for i := start; i < rc.count; i += 5 { + rc.data[i].SetInt64(rc.data[i].GetInt64() + 2) + } + err = types.SortDatums(sc, rc.data) + c.Check(err, IsNil) + s.rc = rc + + pk := &recordSet{ + data: make([]types.Datum, s.count), + count: s.count, + cursor: 0, + } + pk.setFields(mysql.TypeLonglong) + for i := 0; i < rc.count; i++ { + pk.data[i].SetInt64(int64(i)) + } + s.pk = pk +} + +func encodeKey(key types.Datum) types.Datum { + sc := &stmtctx.StatementContext{TimeZone: time.Local} + buf, _ := codec.EncodeKey(sc, nil, key) + return types.NewBytesDatum(buf) +} + +func buildPK(sctx sessionctx.Context, numBuckets, id int64, records sqlexec.RecordSet) (int64, *Histogram, error) { + b := NewSortedBuilder(sctx.GetSessionVars().StmtCtx, numBuckets, id, types.NewFieldType(mysql.TypeLonglong)) + ctx := context.Background() + for { + req := records.NewChunk() + err := records.Next(ctx, req) + if err != nil { + return 0, nil, errors.Trace(err) + } + if req.NumRows() == 0 { + break + } + it := chunk.NewIterator4Chunk(req) + for row := it.Begin(); row != it.End(); row = it.Next() { + datums := RowToDatums(row, records.Fields()) + err = b.Iterate(datums[0]) + if err != nil { + return 0, nil, errors.Trace(err) + } + } + } + return b.Count, b.hist, nil +} + +func buildIndex(sctx sessionctx.Context, numBuckets, id int64, records sqlexec.RecordSet) (int64, *Histogram, *CMSketch, error) { + b := NewSortedBuilder(sctx.GetSessionVars().StmtCtx, numBuckets, id, types.NewFieldType(mysql.TypeBlob)) + cms := NewCMSketch(8, 2048) + ctx := context.Background() + req := records.NewChunk() + it := chunk.NewIterator4Chunk(req) + for { + err := records.Next(ctx, req) + if err != nil { + return 0, nil, nil, errors.Trace(err) + } + if req.NumRows() == 0 { + break + } + for row := it.Begin(); row != it.End(); row = it.Next() { + datums := RowToDatums(row, records.Fields()) + buf, err := codec.EncodeKey(sctx.GetSessionVars().StmtCtx, nil, datums...) + if err != nil { + return 0, nil, nil, errors.Trace(err) + } + data := types.NewBytesDatum(buf) + err = b.Iterate(data) + if err != nil { + return 0, nil, nil, errors.Trace(err) + } + cms.InsertBytes(buf) + } + } + return b.Count, b.Hist(), cms, nil +} + +func checkRepeats(c *C, hg *Histogram) { + for _, bkt := range hg.Buckets { + c.Assert(bkt.Repeat, Greater, int64(0)) + } +} + +func (s *testStatisticsSuite) TestBuild(c *C) { + bucketCount := int64(256) + ctx := mock.NewContext() + sc := ctx.GetSessionVars().StmtCtx + sketch, _, err := buildFMSketch(sc, s.rc.(*recordSet).data, 1000) + c.Assert(err, IsNil) + + collector := &SampleCollector{ + Count: int64(s.count), + NullCount: 0, + Samples: s.samples, + FMSketch: sketch, + } + col, err := BuildColumn(ctx, bucketCount, 2, collector, types.NewFieldType(mysql.TypeLonglong)) + c.Check(err, IsNil) + checkRepeats(c, col) + col.PreCalculateScalar() + c.Check(col.Len(), Equals, 226) + count := col.equalRowCount(types.NewIntDatum(1000)) + c.Check(int(count), Equals, 0) + count = col.lessRowCount(types.NewIntDatum(1000)) + c.Check(int(count), Equals, 10000) + count = col.lessRowCount(types.NewIntDatum(2000)) + c.Check(int(count), Equals, 19999) + count = col.greaterRowCount(types.NewIntDatum(2000)) + c.Check(int(count), Equals, 80000) + count = col.lessRowCount(types.NewIntDatum(200000000)) + c.Check(int(count), Equals, 100000) + count = col.greaterRowCount(types.NewIntDatum(200000000)) + c.Check(count, Equals, 0.0) + count = col.equalRowCount(types.NewIntDatum(200000000)) + c.Check(count, Equals, 0.0) + count = col.BetweenRowCount(types.NewIntDatum(3000), types.NewIntDatum(3500)) + c.Check(int(count), Equals, 4994) + count = col.lessRowCount(types.NewIntDatum(1)) + c.Check(int(count), Equals, 9) + + builder := SampleBuilder{ + Sc: mock.NewContext().GetSessionVars().StmtCtx, + RecordSet: s.pk, + ColLen: 1, + MaxSampleSize: 1000, + MaxFMSketchSize: 1000, + } + c.Assert(s.pk.Close(), IsNil) + collectors, _, err := builder.CollectColumnStats() + c.Assert(err, IsNil) + c.Assert(len(collectors), Equals, 1) + col, err = BuildColumn(mock.NewContext(), 256, 2, collectors[0], types.NewFieldType(mysql.TypeLonglong)) + c.Assert(err, IsNil) + checkRepeats(c, col) + c.Assert(col.Len(), Equals, 250) + + tblCount, col, _, err := buildIndex(ctx, bucketCount, 1, sqlexec.RecordSet(s.rc)) + c.Check(err, IsNil) + checkRepeats(c, col) + col.PreCalculateScalar() + c.Check(int(tblCount), Equals, 100000) + count = col.equalRowCount(encodeKey(types.NewIntDatum(10000))) + c.Check(int(count), Equals, 1) + count = col.lessRowCount(encodeKey(types.NewIntDatum(20000))) + c.Check(int(count), Equals, 19999) + count = col.BetweenRowCount(encodeKey(types.NewIntDatum(30000)), encodeKey(types.NewIntDatum(35000))) + c.Check(int(count), Equals, 4999) + count = col.BetweenRowCount(encodeKey(types.MinNotNullDatum()), encodeKey(types.NewIntDatum(0))) + c.Check(int(count), Equals, 0) + count = col.lessRowCount(encodeKey(types.NewIntDatum(0))) + c.Check(int(count), Equals, 0) + + s.pk.(*recordSet).cursor = 0 + tblCount, col, err = buildPK(ctx, bucketCount, 4, sqlexec.RecordSet(s.pk)) + c.Check(err, IsNil) + checkRepeats(c, col) + col.PreCalculateScalar() + c.Check(int(tblCount), Equals, 100000) + count = col.equalRowCount(types.NewIntDatum(10000)) + c.Check(int(count), Equals, 1) + count = col.lessRowCount(types.NewIntDatum(20000)) + c.Check(int(count), Equals, 20000) + count = col.BetweenRowCount(types.NewIntDatum(30000), types.NewIntDatum(35000)) + c.Check(int(count), Equals, 5000) + count = col.greaterRowCount(types.NewIntDatum(1001)) + c.Check(int(count), Equals, 98998) + count = col.lessRowCount(types.NewIntDatum(99999)) + c.Check(int(count), Equals, 99999) +} + +func (s *testStatisticsSuite) TestHistogramProtoConversion(c *C) { + ctx := mock.NewContext() + c.Assert(s.rc.Close(), IsNil) + tblCount, col, _, err := buildIndex(ctx, 256, 1, sqlexec.RecordSet(s.rc)) + c.Check(err, IsNil) + c.Check(int(tblCount), Equals, 100000) + + p := HistogramToProto(col) + h := HistogramFromProto(p) + c.Assert(HistogramEqual(col, h, true), IsTrue) +} + +func mockHistogram(lower, num int64) *Histogram { + h := NewHistogram(0, num, 0, 0, types.NewFieldType(mysql.TypeLonglong), int(num), 0) + for i := int64(0); i < num; i++ { + lower, upper := types.NewIntDatum(lower+i), types.NewIntDatum(lower+i) + h.AppendBucket(&lower, &upper, i+1, 1) + } + return h +} + +func (s *testStatisticsSuite) TestMergeHistogram(c *C) { + tests := []struct { + leftLower int64 + leftNum int64 + rightLower int64 + rightNum int64 + bucketNum int + ndv int64 + }{ + { + leftLower: 0, + leftNum: 0, + rightLower: 0, + rightNum: 1, + bucketNum: 1, + ndv: 1, + }, + { + leftLower: 0, + leftNum: 200, + rightLower: 200, + rightNum: 200, + bucketNum: 200, + ndv: 400, + }, + { + leftLower: 0, + leftNum: 200, + rightLower: 199, + rightNum: 200, + bucketNum: 200, + ndv: 399, + }, + } + sc := mock.NewContext().GetSessionVars().StmtCtx + bucketCount := 256 + for _, t := range tests { + lh := mockHistogram(t.leftLower, t.leftNum) + rh := mockHistogram(t.rightLower, t.rightNum) + h, err := MergeHistograms(sc, lh, rh, bucketCount) + c.Assert(err, IsNil) + c.Assert(h.NDV, Equals, t.ndv) + c.Assert(h.Len(), Equals, t.bucketNum) + c.Assert(int64(h.TotalRowCount()), Equals, t.leftNum+t.rightNum) + expectLower := types.NewIntDatum(t.leftLower) + cmp, err := h.GetLower(0).CompareDatum(sc, &expectLower) + c.Assert(err, IsNil) + c.Assert(cmp, Equals, 0) + expectUpper := types.NewIntDatum(t.rightLower + t.rightNum - 1) + cmp, err = h.GetUpper(h.Len()-1).CompareDatum(sc, &expectUpper) + c.Assert(err, IsNil) + c.Assert(cmp, Equals, 0) + } +} + +func (s *testStatisticsSuite) TestPseudoTable(c *C) { + ti := &model.TableInfo{} + colInfo := &model.ColumnInfo{ + ID: 1, + FieldType: *types.NewFieldType(mysql.TypeLonglong), + } + ti.Columns = append(ti.Columns, colInfo) + tbl := PseudoTable(ti) + c.Assert(tbl.Count, Greater, int64(0)) + sc := new(stmtctx.StatementContext) + count := tbl.ColumnLessRowCount(sc, types.NewIntDatum(100), colInfo.ID) + c.Assert(int(count), Equals, 3333) + count, err := tbl.ColumnEqualRowCount(sc, types.NewIntDatum(1000), colInfo.ID) + c.Assert(err, IsNil) + c.Assert(int(count), Equals, 10) + count = tbl.ColumnBetweenRowCount(sc, types.NewIntDatum(1000), types.NewIntDatum(5000), colInfo.ID) + c.Assert(int(count), Equals, 250) +} + +func buildCMSketch(values []types.Datum) *CMSketch { + cms := NewCMSketch(8, 2048) + for _, val := range values { + cms.insert(&val) + } + return cms +} + +func (s *testStatisticsSuite) TestColumnRange(c *C) { + bucketCount := int64(256) + ctx := mock.NewContext() + sc := ctx.GetSessionVars().StmtCtx + sketch, _, err := buildFMSketch(sc, s.rc.(*recordSet).data, 1000) + c.Assert(err, IsNil) + + collector := &SampleCollector{ + Count: int64(s.count), + NullCount: 0, + Samples: s.samples, + FMSketch: sketch, + } + hg, err := BuildColumn(ctx, bucketCount, 2, collector, types.NewFieldType(mysql.TypeLonglong)) + hg.PreCalculateScalar() + c.Check(err, IsNil) + col := &Column{Histogram: *hg, CMSketch: buildCMSketch(s.rc.(*recordSet).data), Info: &model.ColumnInfo{}} + tbl := &Table{ + HistColl: HistColl{ + Count: int64(col.TotalRowCount()), + Columns: make(map[int64]*Column), + }, + } + ran := []*ranger.Range{{ + LowVal: []types.Datum{{}}, + HighVal: []types.Datum{types.MaxValueDatum()}, + }} + count, err := tbl.GetRowCountByColumnRanges(sc, 0, ran) + c.Assert(err, IsNil) + c.Assert(int(count), Equals, 100000) + ran[0].LowVal[0] = types.MinNotNullDatum() + count, err = tbl.GetRowCountByColumnRanges(sc, 0, ran) + c.Assert(err, IsNil) + c.Assert(int(count), Equals, 99900) + ran[0].LowVal[0] = types.NewIntDatum(1000) + ran[0].LowExclude = true + ran[0].HighVal[0] = types.NewIntDatum(2000) + ran[0].HighExclude = true + count, err = tbl.GetRowCountByColumnRanges(sc, 0, ran) + c.Assert(err, IsNil) + c.Assert(int(count), Equals, 2500) + ran[0].LowExclude = false + ran[0].HighExclude = false + count, err = tbl.GetRowCountByColumnRanges(sc, 0, ran) + c.Assert(err, IsNil) + c.Assert(int(count), Equals, 2500) + ran[0].LowVal[0] = ran[0].HighVal[0] + count, err = tbl.GetRowCountByColumnRanges(sc, 0, ran) + c.Assert(err, IsNil) + c.Assert(int(count), Equals, 100) + + tbl.Columns[0] = col + ran[0].LowVal[0] = types.Datum{} + ran[0].HighVal[0] = types.MaxValueDatum() + count, err = tbl.GetRowCountByColumnRanges(sc, 0, ran) + c.Assert(err, IsNil) + c.Assert(int(count), Equals, 100000) + ran[0].LowVal[0] = types.NewIntDatum(1000) + ran[0].LowExclude = true + ran[0].HighVal[0] = types.NewIntDatum(2000) + ran[0].HighExclude = true + count, err = tbl.GetRowCountByColumnRanges(sc, 0, ran) + c.Assert(err, IsNil) + c.Assert(int(count), Equals, 9998) + ran[0].LowExclude = false + ran[0].HighExclude = false + count, err = tbl.GetRowCountByColumnRanges(sc, 0, ran) + c.Assert(err, IsNil) + c.Assert(int(count), Equals, 10000) + ran[0].LowVal[0] = ran[0].HighVal[0] + count, err = tbl.GetRowCountByColumnRanges(sc, 0, ran) + c.Assert(err, IsNil) + c.Assert(int(count), Equals, 1) +} + +func (s *testStatisticsSuite) TestIntColumnRanges(c *C) { + bucketCount := int64(256) + ctx := mock.NewContext() + sc := ctx.GetSessionVars().StmtCtx + + s.pk.(*recordSet).cursor = 0 + rowCount, hg, err := buildPK(ctx, bucketCount, 0, s.pk) + hg.PreCalculateScalar() + c.Check(err, IsNil) + c.Check(rowCount, Equals, int64(100000)) + col := &Column{Histogram: *hg, Info: &model.ColumnInfo{}} + tbl := &Table{ + HistColl: HistColl{ + Count: int64(col.TotalRowCount()), + Columns: make(map[int64]*Column), + }, + } + ran := []*ranger.Range{{ + LowVal: []types.Datum{types.NewIntDatum(math.MinInt64)}, + HighVal: []types.Datum{types.NewIntDatum(math.MaxInt64)}, + }} + count, err := tbl.GetRowCountByIntColumnRanges(sc, 0, ran) + c.Assert(err, IsNil) + c.Assert(int(count), Equals, 100000) + ran[0].LowVal[0].SetInt64(1000) + ran[0].HighVal[0].SetInt64(2000) + count, err = tbl.GetRowCountByIntColumnRanges(sc, 0, ran) + c.Assert(err, IsNil) + c.Assert(int(count), Equals, 1000) + ran[0].LowVal[0].SetInt64(1001) + ran[0].HighVal[0].SetInt64(1999) + count, err = tbl.GetRowCountByIntColumnRanges(sc, 0, ran) + c.Assert(err, IsNil) + c.Assert(int(count), Equals, 998) + ran[0].LowVal[0].SetInt64(1000) + ran[0].HighVal[0].SetInt64(1000) + count, err = tbl.GetRowCountByIntColumnRanges(sc, 0, ran) + c.Assert(err, IsNil) + c.Assert(int(count), Equals, 1) + + ran = []*ranger.Range{{ + LowVal: []types.Datum{types.NewUintDatum(0)}, + HighVal: []types.Datum{types.NewUintDatum(math.MaxUint64)}, + }} + count, err = tbl.GetRowCountByIntColumnRanges(sc, 0, ran) + c.Assert(err, IsNil) + c.Assert(int(count), Equals, 100000) + ran[0].LowVal[0].SetUint64(1000) + ran[0].HighVal[0].SetUint64(2000) + count, err = tbl.GetRowCountByIntColumnRanges(sc, 0, ran) + c.Assert(err, IsNil) + c.Assert(int(count), Equals, 1000) + ran[0].LowVal[0].SetUint64(1001) + ran[0].HighVal[0].SetUint64(1999) + count, err = tbl.GetRowCountByIntColumnRanges(sc, 0, ran) + c.Assert(err, IsNil) + c.Assert(int(count), Equals, 998) + ran[0].LowVal[0].SetUint64(1000) + ran[0].HighVal[0].SetUint64(1000) + count, err = tbl.GetRowCountByIntColumnRanges(sc, 0, ran) + c.Assert(err, IsNil) + c.Assert(int(count), Equals, 1) + + tbl.Columns[0] = col + ran[0].LowVal[0].SetInt64(math.MinInt64) + ran[0].HighVal[0].SetInt64(math.MaxInt64) + count, err = tbl.GetRowCountByIntColumnRanges(sc, 0, ran) + c.Assert(err, IsNil) + c.Assert(int(count), Equals, 100000) + ran[0].LowVal[0].SetInt64(1000) + ran[0].HighVal[0].SetInt64(2000) + count, err = tbl.GetRowCountByIntColumnRanges(sc, 0, ran) + c.Assert(err, IsNil) + c.Assert(int(count), Equals, 1001) + ran[0].LowVal[0].SetInt64(1001) + ran[0].HighVal[0].SetInt64(1999) + count, err = tbl.GetRowCountByIntColumnRanges(sc, 0, ran) + c.Assert(err, IsNil) + c.Assert(int(count), Equals, 999) + ran[0].LowVal[0].SetInt64(1000) + ran[0].HighVal[0].SetInt64(1000) + count, err = tbl.GetRowCountByIntColumnRanges(sc, 0, ran) + c.Assert(err, IsNil) + c.Assert(int(count), Equals, 1) + + tbl.Count *= 10 + count, err = tbl.GetRowCountByIntColumnRanges(sc, 0, ran) + c.Assert(err, IsNil) + c.Assert(int(count), Equals, 10) +} + +func (s *testStatisticsSuite) TestIndexRanges(c *C) { + bucketCount := int64(256) + ctx := mock.NewContext() + sc := ctx.GetSessionVars().StmtCtx + + s.rc.(*recordSet).cursor = 0 + rowCount, hg, cms, err := buildIndex(ctx, bucketCount, 0, s.rc) + hg.PreCalculateScalar() + c.Check(err, IsNil) + c.Check(rowCount, Equals, int64(100000)) + idxInfo := &model.IndexInfo{Columns: []*model.IndexColumn{{Offset: 0}}} + idx := &Index{Histogram: *hg, CMSketch: cms, Info: idxInfo} + tbl := &Table{ + HistColl: HistColl{ + Count: int64(idx.TotalRowCount()), + Indices: make(map[int64]*Index), + }, + } + ran := []*ranger.Range{{ + LowVal: []types.Datum{types.MinNotNullDatum()}, + HighVal: []types.Datum{types.MaxValueDatum()}, + }} + count, err := tbl.GetRowCountByIndexRanges(sc, 0, ran) + c.Assert(err, IsNil) + c.Assert(int(count), Equals, 99900) + ran[0].LowVal[0] = types.NewIntDatum(1000) + ran[0].HighVal[0] = types.NewIntDatum(2000) + count, err = tbl.GetRowCountByIndexRanges(sc, 0, ran) + c.Assert(err, IsNil) + c.Assert(int(count), Equals, 2500) + ran[0].LowVal[0] = types.NewIntDatum(1001) + ran[0].HighVal[0] = types.NewIntDatum(1999) + count, err = tbl.GetRowCountByIndexRanges(sc, 0, ran) + c.Assert(err, IsNil) + c.Assert(int(count), Equals, 2500) + ran[0].LowVal[0] = types.NewIntDatum(1000) + ran[0].HighVal[0] = types.NewIntDatum(1000) + count, err = tbl.GetRowCountByIndexRanges(sc, 0, ran) + c.Assert(err, IsNil) + c.Assert(int(count), Equals, 100) + + tbl.Indices[0] = &Index{Info: &model.IndexInfo{Columns: []*model.IndexColumn{{Offset: 0}}, Unique: true}} + ran[0].LowVal[0] = types.NewIntDatum(1000) + ran[0].HighVal[0] = types.NewIntDatum(1000) + count, err = tbl.GetRowCountByIndexRanges(sc, 0, ran) + c.Assert(err, IsNil) + c.Assert(int(count), Equals, 1) + + tbl.Indices[0] = idx + ran[0].LowVal[0] = types.MinNotNullDatum() + ran[0].HighVal[0] = types.MaxValueDatum() + count, err = tbl.GetRowCountByIndexRanges(sc, 0, ran) + c.Assert(err, IsNil) + c.Assert(int(count), Equals, 100000) + ran[0].LowVal[0] = types.NewIntDatum(1000) + ran[0].HighVal[0] = types.NewIntDatum(2000) + count, err = tbl.GetRowCountByIndexRanges(sc, 0, ran) + c.Assert(err, IsNil) + c.Assert(int(count), Equals, 1000) + ran[0].LowVal[0] = types.NewIntDatum(1001) + ran[0].HighVal[0] = types.NewIntDatum(1990) + count, err = tbl.GetRowCountByIndexRanges(sc, 0, ran) + c.Assert(err, IsNil) + c.Assert(int(count), Equals, 989) + ran[0].LowVal[0] = types.NewIntDatum(1000) + ran[0].HighVal[0] = types.NewIntDatum(1000) + count, err = tbl.GetRowCountByIndexRanges(sc, 0, ran) + c.Assert(err, IsNil) + c.Assert(int(count), Equals, 0) +} diff --git a/statistics/table.go b/statistics/table.go new file mode 100644 index 0000000..8f3f4ae --- /dev/null +++ b/statistics/table.go @@ -0,0 +1,704 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package statistics + +import ( + "fmt" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/codec" + "github.com/pingcap/tidb/util/ranger" + "math" + "sort" + "strings" +) + +const ( + // When we haven't analyzed a table, we use pseudo statistics to estimate costs. + // It has row count 10000, equal condition selects 1/1000 of total rows, less condition selects 1/3 of total rows, + // between condition selects 1/40 of total rows. + pseudoRowCount = 10000 + pseudoEqualRate = 1000 + pseudoLessRate = 3 + pseudoBetweenRate = 40 + pseudoColSize = 8.0 + + outOfRangeBetweenRate = 100 +) + +// PseudoVersion means the pseudo statistics version is 0. +const PseudoVersion uint64 = 0 + +// Table represents statistics for a table. +type Table struct { + HistColl + Version uint64 + Name string +} + +// HistColl is a collection of histogram. It collects enough information for plan to calculate the selectivity. +type HistColl struct { + PhysicalID int64 + Columns map[int64]*Column + Indices map[int64]*Index + // Idx2ColumnIDs maps the index id to its column ids. It's used to calculate the selectivity in planner. + Idx2ColumnIDs map[int64][]int64 + // ColID2IdxID maps the column id to index id whose first column is it. It's used to calculate the selectivity in planner. + ColID2IdxID map[int64]int64 + Count int64 + ModifyCount int64 // Total modify count in a table. + + // HavePhysicalID is true means this HistColl is from single table and have its ID's information. + // The physical id is used when try to load column stats from storage. + HavePhysicalID bool + Pseudo bool +} + +// Copy copies the current table. +func (t *Table) Copy() *Table { + newHistColl := HistColl{ + PhysicalID: t.PhysicalID, + HavePhysicalID: t.HavePhysicalID, + Count: t.Count, + Columns: make(map[int64]*Column), + Indices: make(map[int64]*Index), + Pseudo: t.Pseudo, + ModifyCount: t.ModifyCount, + } + for id, col := range t.Columns { + newHistColl.Columns[id] = col + } + for id, idx := range t.Indices { + newHistColl.Indices[id] = idx + } + nt := &Table{ + HistColl: newHistColl, + Version: t.Version, + Name: t.Name, + } + return nt +} + +// String implements Stringer interface. +func (t *Table) String() string { + strs := make([]string, 0, len(t.Columns)+1) + strs = append(strs, fmt.Sprintf("Table:%d Count:%d", t.PhysicalID, t.Count)) + cols := make([]*Column, 0, len(t.Columns)) + for _, col := range t.Columns { + cols = append(cols, col) + } + sort.Slice(cols, func(i, j int) bool { return cols[i].ID < cols[j].ID }) + for _, col := range cols { + strs = append(strs, col.String()) + } + idxs := make([]*Index, 0, len(t.Indices)) + for _, idx := range t.Indices { + idxs = append(idxs, idx) + } + sort.Slice(idxs, func(i, j int) bool { return idxs[i].ID < idxs[j].ID }) + for _, idx := range idxs { + strs = append(strs, idx.String()) + } + return strings.Join(strs, "\n") +} + +// IndexStartWithColumn finds the first index whose first column is the given column. +func (t *Table) IndexStartWithColumn(colName string) *Index { + for _, index := range t.Indices { + if index.Info.Columns[0].Name.L == colName { + return index + } + } + return nil +} + +// ColumnByName finds the statistics.Column for the given column. +func (t *Table) ColumnByName(colName string) *Column { + for _, c := range t.Columns { + if c.Info.Name.L == colName { + return c + } + } + return nil +} + +// RatioOfPseudoEstimate means if modifyCount / statsTblCount is greater than this ratio, we think the stats is invalid +// and use pseudo estimation. +const RatioOfPseudoEstimate = 0.7 + +// IsOutdated returns true if the table stats is outdated. +func (t *Table) IsOutdated() bool { + if t.Count > 0 && float64(t.ModifyCount)/float64(t.Count) > RatioOfPseudoEstimate { + return true + } + return false +} + +// ColumnGreaterRowCount estimates the row count where the column greater than value. +func (t *Table) ColumnGreaterRowCount(sc *stmtctx.StatementContext, value types.Datum, colID int64) float64 { + c, ok := t.Columns[colID] + if !ok || c.IsInvalid(sc, t.Pseudo) { + return float64(t.Count) / pseudoLessRate + } + return c.greaterRowCount(value) * c.GetIncreaseFactor(t.Count) +} + +// ColumnLessRowCount estimates the row count where the column less than value. Note that null values are not counted. +func (t *Table) ColumnLessRowCount(sc *stmtctx.StatementContext, value types.Datum, colID int64) float64 { + c, ok := t.Columns[colID] + if !ok || c.IsInvalid(sc, t.Pseudo) { + return float64(t.Count) / pseudoLessRate + } + return c.lessRowCount(value) * c.GetIncreaseFactor(t.Count) +} + +// ColumnBetweenRowCount estimates the row count where column greater or equal to a and less than b. +func (t *Table) ColumnBetweenRowCount(sc *stmtctx.StatementContext, a, b types.Datum, colID int64) float64 { + c, ok := t.Columns[colID] + if !ok || c.IsInvalid(sc, t.Pseudo) { + return float64(t.Count) / pseudoBetweenRate + } + count := c.BetweenRowCount(a, b) + if a.IsNull() { + count += float64(c.NullCount) + } + return count * c.GetIncreaseFactor(t.Count) +} + +// ColumnEqualRowCount estimates the row count where the column equals to value. +func (t *Table) ColumnEqualRowCount(sc *stmtctx.StatementContext, value types.Datum, colID int64) (float64, error) { + c, ok := t.Columns[colID] + if !ok || c.IsInvalid(sc, t.Pseudo) { + return float64(t.Count) / pseudoEqualRate, nil + } + result, err := c.equalRowCount(sc, value, t.ModifyCount) + result *= c.GetIncreaseFactor(t.Count) + return result, errors.Trace(err) +} + +// GetRowCountByIntColumnRanges estimates the row count by a slice of IntColumnRange. +func (coll *HistColl) GetRowCountByIntColumnRanges(sc *stmtctx.StatementContext, colID int64, intRanges []*ranger.Range) (float64, error) { + c, ok := coll.Columns[colID] + if !ok || c.IsInvalid(sc, coll.Pseudo) { + if len(intRanges) == 0 { + return 0, nil + } + if intRanges[0].LowVal[0].Kind() == types.KindInt64 { + return getPseudoRowCountBySignedIntRanges(intRanges, float64(coll.Count)), nil + } + return getPseudoRowCountByUnsignedIntRanges(intRanges, float64(coll.Count)), nil + } + result, err := c.GetColumnRowCount(sc, intRanges, coll.ModifyCount, true) + result *= c.GetIncreaseFactor(coll.Count) + return result, errors.Trace(err) +} + +// GetRowCountByColumnRanges estimates the row count by a slice of Range. +func (coll *HistColl) GetRowCountByColumnRanges(sc *stmtctx.StatementContext, colID int64, colRanges []*ranger.Range) (float64, error) { + c, ok := coll.Columns[colID] + if !ok || c.IsInvalid(sc, coll.Pseudo) { + return GetPseudoRowCountByColumnRanges(sc, float64(coll.Count), colRanges, 0) + } + result, err := c.GetColumnRowCount(sc, colRanges, coll.ModifyCount, false) + result *= c.GetIncreaseFactor(coll.Count) + return result, errors.Trace(err) +} + +// GetRowCountByIndexRanges estimates the row count by a slice of Range. +func (coll *HistColl) GetRowCountByIndexRanges(sc *stmtctx.StatementContext, idxID int64, indexRanges []*ranger.Range) (float64, error) { + idx := coll.Indices[idxID] + if idx == nil || idx.IsInvalid(coll.Pseudo) { + colsLen := -1 + if idx != nil && idx.Info.Unique { + colsLen = len(idx.Info.Columns) + } + return getPseudoRowCountByIndexRanges(sc, indexRanges, float64(coll.Count), colsLen) + } + var result float64 + var err error + if idx.CMSketch != nil { + result, err = coll.getIndexRowCount(sc, idxID, indexRanges) + } else { + result, err = idx.GetRowCount(sc, indexRanges, coll.ModifyCount) + } + result *= idx.GetIncreaseFactor(coll.Count) + return result, errors.Trace(err) +} + +// PseudoAvgCountPerValue gets a pseudo average count if histogram not exists. +func (t *Table) PseudoAvgCountPerValue() float64 { + return float64(t.Count) / pseudoEqualRate +} + +// GetOrdinalOfRangeCond gets the ordinal of the position range condition, +// if not exist, it returns the end position. +func GetOrdinalOfRangeCond(sc *stmtctx.StatementContext, ran *ranger.Range) int { + for i := range ran.LowVal { + a, b := ran.LowVal[i], ran.HighVal[i] + cmp, err := a.CompareDatum(sc, &b) + if err != nil { + return 0 + } + if cmp != 0 { + return i + } + } + return len(ran.LowVal) +} + +// ID2UniqueID generates a new HistColl whose `Columns` is built from UniqueID of given columns. +func (coll *HistColl) ID2UniqueID(columns []*expression.Column) *HistColl { + cols := make(map[int64]*Column) + for _, col := range columns { + colHist, ok := coll.Columns[col.ID] + if ok { + cols[col.UniqueID] = colHist + } + } + newColl := &HistColl{ + PhysicalID: coll.PhysicalID, + HavePhysicalID: coll.HavePhysicalID, + Pseudo: coll.Pseudo, + Count: coll.Count, + ModifyCount: coll.ModifyCount, + Columns: cols, + } + return newColl +} + +// GenerateHistCollFromColumnInfo generates a new HistColl whose ColID2IdxID and IdxID2ColIDs is built from the given parameter. +func (coll *HistColl) GenerateHistCollFromColumnInfo(infos []*model.ColumnInfo, columns []*expression.Column) *HistColl { + newColHistMap := make(map[int64]*Column) + colInfoID2UniqueID := make(map[int64]int64) + colNames2UniqueID := make(map[string]int64) + for _, col := range columns { + colInfoID2UniqueID[col.ID] = col.UniqueID + } + for _, colInfo := range infos { + uniqueID, ok := colInfoID2UniqueID[colInfo.ID] + if ok { + colNames2UniqueID[colInfo.Name.L] = uniqueID + } + } + for id, colHist := range coll.Columns { + uniqueID, ok := colInfoID2UniqueID[id] + // Collect the statistics by the given columns. + if ok { + newColHistMap[uniqueID] = colHist + } + } + newIdxHistMap := make(map[int64]*Index) + idx2Columns := make(map[int64][]int64) + colID2IdxID := make(map[int64]int64) + for _, idxHist := range coll.Indices { + ids := make([]int64, 0, len(idxHist.Info.Columns)) + for _, idxCol := range idxHist.Info.Columns { + uniqueID, ok := colNames2UniqueID[idxCol.Name.L] + if !ok { + break + } + ids = append(ids, uniqueID) + } + // If the length of the id list is 0, this index won't be used in this query. + if len(ids) == 0 { + continue + } + colID2IdxID[ids[0]] = idxHist.ID + newIdxHistMap[idxHist.ID] = idxHist + idx2Columns[idxHist.ID] = ids + } + newColl := &HistColl{ + PhysicalID: coll.PhysicalID, + HavePhysicalID: coll.HavePhysicalID, + Pseudo: coll.Pseudo, + Count: coll.Count, + ModifyCount: coll.ModifyCount, + Columns: newColHistMap, + Indices: newIdxHistMap, + ColID2IdxID: colID2IdxID, + Idx2ColumnIDs: idx2Columns, + } + return newColl +} + +// isSingleColIdxNullRange checks if a range is [NULL, NULL] on a single-column index. +func isSingleColIdxNullRange(idx *Index, ran *ranger.Range) bool { + if len(idx.Info.Columns) > 1 { + return false + } + l, h := ran.LowVal[0], ran.HighVal[0] + if l.IsNull() && h.IsNull() { + return true + } + return false +} + +// getEqualCondSelectivity gets the selectivity of the equal conditions. `coverAll` means if the conditions +// have covered all the index columns. +func (coll *HistColl) getEqualCondSelectivity(idx *Index, bytes []byte, coverAll bool, unique bool) float64 { + // In this case, the row count is at most 1. + if unique && coverAll { + return 1.0 / float64(idx.TotalRowCount()) + } + val := types.NewBytesDatum(bytes) + if idx.outOfRange(val) { + // When the value is out of range, we could not found this value in the CM Sketch, + // so we use heuristic methods to estimate the selectivity. + if idx.NDV > 0 && coverAll { + // for equality queries + return float64(coll.ModifyCount) / float64(idx.NDV) / idx.TotalRowCount() + } + // for range queries + return float64(coll.ModifyCount) / outOfRangeBetweenRate / idx.TotalRowCount() + } + return float64(idx.CMSketch.QueryBytes(bytes)) / float64(idx.TotalRowCount()) +} + +func (coll *HistColl) getIndexRowCount(sc *stmtctx.StatementContext, idxID int64, indexRanges []*ranger.Range) (float64, error) { + idx := coll.Indices[idxID] + totalCount := float64(0) + for _, ran := range indexRanges { + rangePosition := GetOrdinalOfRangeCond(sc, ran) + var rangeVals []types.Datum + // Try to enum the last range values. + if rangePosition != len(ran.LowVal) { + rangeVals = enumRangeValues(ran.LowVal[rangePosition], ran.HighVal[rangePosition], ran.LowExclude, ran.HighExclude) + if rangeVals != nil { + rangePosition++ + } + } + // If first one is range, just use the previous way to estimate; if it is [NULL, NULL] range + // on single-column index, use previous way as well, because CMSketch does not contain null + // values in this case. + if rangePosition == 0 || isSingleColIdxNullRange(idx, ran) { + count, err := idx.GetRowCount(sc, []*ranger.Range{ran}, coll.ModifyCount) + if err != nil { + return 0, errors.Trace(err) + } + totalCount += count + continue + } + var selectivity float64 + coverAll := len(ran.LowVal) == len(idx.Info.Columns) && rangePosition == len(ran.LowVal) + // use CM Sketch to estimate the equal conditions + if rangeVals == nil { + bytes, err := codec.EncodeKey(sc, nil, ran.LowVal[:rangePosition]...) + if err != nil { + return 0, errors.Trace(err) + } + selectivity = coll.getEqualCondSelectivity(idx, bytes, coverAll, idx.Info.Unique) + } else { + bytes, err := codec.EncodeKey(sc, nil, ran.LowVal[:rangePosition-1]...) + if err != nil { + return 0, errors.Trace(err) + } + prefixLen := len(bytes) + for _, val := range rangeVals { + bytes = bytes[:prefixLen] + bytes, err = codec.EncodeKey(sc, bytes, val) + if err != nil { + return 0, err + } + selectivity += coll.getEqualCondSelectivity(idx, bytes, coverAll, idx.Info.Unique) + } + } + // use histogram to estimate the range condition + if rangePosition != len(ran.LowVal) { + rang := ranger.Range{ + LowVal: []types.Datum{ran.LowVal[rangePosition]}, + LowExclude: ran.LowExclude, + HighVal: []types.Datum{ran.HighVal[rangePosition]}, + HighExclude: ran.HighExclude, + } + var count float64 + var err error + colIDs := coll.Idx2ColumnIDs[idxID] + var colID int64 + if rangePosition >= len(colIDs) { + colID = -1 + } else { + colID = colIDs[rangePosition] + } + // prefer index stats over column stats + if idx, ok := coll.ColID2IdxID[colID]; ok { + count, err = coll.GetRowCountByIndexRanges(sc, idx, []*ranger.Range{&rang}) + } else { + count, err = coll.GetRowCountByColumnRanges(sc, colID, []*ranger.Range{&rang}) + } + if err != nil { + return 0, errors.Trace(err) + } + selectivity = selectivity * count / float64(idx.TotalRowCount()) + } + totalCount += selectivity * float64(idx.TotalRowCount()) + } + if totalCount > idx.TotalRowCount() { + totalCount = idx.TotalRowCount() + } + return totalCount, nil +} + +const fakePhysicalID int64 = -1 + +// PseudoTable creates a pseudo table statistics. +func PseudoTable(tblInfo *model.TableInfo) *Table { + pseudoHistColl := HistColl{ + Count: pseudoRowCount, + PhysicalID: tblInfo.ID, + HavePhysicalID: true, + Columns: make(map[int64]*Column, len(tblInfo.Columns)), + Indices: make(map[int64]*Index, len(tblInfo.Indices)), + Pseudo: true, + } + t := &Table{ + HistColl: pseudoHistColl, + } + for _, col := range tblInfo.Columns { + if col.State == model.StatePublic { + t.Columns[col.ID] = &Column{ + PhysicalID: fakePhysicalID, + Info: col, + IsHandle: tblInfo.PKIsHandle && mysql.HasPriKeyFlag(col.Flag), + Histogram: *NewHistogram(col.ID, 0, 0, 0, &col.FieldType, 0, 0), + } + } + } + for _, idx := range tblInfo.Indices { + if idx.State == model.StatePublic { + t.Indices[idx.ID] = &Index{ + Info: idx, + Histogram: *NewHistogram(idx.ID, 0, 0, 0, types.NewFieldType(mysql.TypeBlob), 0, 0)} + } + } + return t +} + +func getPseudoRowCountByIndexRanges(sc *stmtctx.StatementContext, indexRanges []*ranger.Range, + tableRowCount float64, colsLen int) (float64, error) { + if tableRowCount == 0 { + return 0, nil + } + var totalCount float64 + for _, indexRange := range indexRanges { + count := tableRowCount + i, err := indexRange.PrefixEqualLen(sc) + if err != nil { + return 0, errors.Trace(err) + } + if i == colsLen && !indexRange.LowExclude && !indexRange.HighExclude { + totalCount += 1.0 + continue + } + if i >= len(indexRange.LowVal) { + i = len(indexRange.LowVal) - 1 + } + rowCount, err := GetPseudoRowCountByColumnRanges(sc, tableRowCount, []*ranger.Range{indexRange}, i) + if err != nil { + return 0, errors.Trace(err) + } + count = count / tableRowCount * rowCount + // If the condition is a = 1, b = 1, c = 1, d = 1, we think every a=1, b=1, c=1 only filtrate 1/100 data, + // so as to avoid collapsing too fast. + for j := 0; j < i; j++ { + count = count / float64(100) + } + totalCount += count + } + if totalCount > tableRowCount { + totalCount = tableRowCount / 3.0 + } + return totalCount, nil +} + +// GetPseudoRowCountByColumnRanges calculate the row count by the ranges if there's no statistics information for this column. +func GetPseudoRowCountByColumnRanges(sc *stmtctx.StatementContext, tableRowCount float64, columnRanges []*ranger.Range, colIdx int) (float64, error) { + var rowCount float64 + var err error + for _, ran := range columnRanges { + if ran.LowVal[colIdx].Kind() == types.KindNull && ran.HighVal[colIdx].Kind() == types.KindMaxValue { + rowCount += tableRowCount + } else if ran.LowVal[colIdx].Kind() == types.KindMinNotNull { + nullCount := tableRowCount / pseudoEqualRate + if ran.HighVal[colIdx].Kind() == types.KindMaxValue { + rowCount += tableRowCount - nullCount + } else if err == nil { + lessCount := tableRowCount / pseudoLessRate + rowCount += lessCount - nullCount + } + } else if ran.HighVal[colIdx].Kind() == types.KindMaxValue { + rowCount += tableRowCount / pseudoLessRate + } else { + compare, err1 := ran.LowVal[colIdx].CompareDatum(sc, &ran.HighVal[colIdx]) + if err1 != nil { + return 0, errors.Trace(err1) + } + if compare == 0 { + rowCount += tableRowCount / pseudoEqualRate + } else { + rowCount += tableRowCount / pseudoBetweenRate + } + } + if err != nil { + return 0, errors.Trace(err) + } + } + if rowCount > tableRowCount { + rowCount = tableRowCount + } + return rowCount, nil +} + +func getPseudoRowCountBySignedIntRanges(intRanges []*ranger.Range, tableRowCount float64) float64 { + var rowCount float64 + for _, rg := range intRanges { + var cnt float64 + low := rg.LowVal[0].GetInt64() + if rg.LowVal[0].Kind() == types.KindNull || rg.LowVal[0].Kind() == types.KindMinNotNull { + low = math.MinInt64 + } + high := rg.HighVal[0].GetInt64() + if rg.HighVal[0].Kind() == types.KindMaxValue { + high = math.MaxInt64 + } + if low == math.MinInt64 && high == math.MaxInt64 { + cnt = tableRowCount + } else if low == math.MinInt64 { + cnt = tableRowCount / pseudoLessRate + } else if high == math.MaxInt64 { + cnt = tableRowCount / pseudoLessRate + } else { + if low == high { + cnt = 1 // When primary key is handle, the equal row count is at most one. + } else { + cnt = tableRowCount / pseudoBetweenRate + } + } + if high-low > 0 && cnt > float64(high-low) { + cnt = float64(high - low) + } + rowCount += cnt + } + if rowCount > tableRowCount { + rowCount = tableRowCount + } + return rowCount +} + +func getPseudoRowCountByUnsignedIntRanges(intRanges []*ranger.Range, tableRowCount float64) float64 { + var rowCount float64 + for _, rg := range intRanges { + var cnt float64 + low := rg.LowVal[0].GetUint64() + if rg.LowVal[0].Kind() == types.KindNull || rg.LowVal[0].Kind() == types.KindMinNotNull { + low = 0 + } + high := rg.HighVal[0].GetUint64() + if rg.HighVal[0].Kind() == types.KindMaxValue { + high = math.MaxUint64 + } + if low == 0 && high == math.MaxUint64 { + cnt = tableRowCount + } else if low == 0 { + cnt = tableRowCount / pseudoLessRate + } else if high == math.MaxUint64 { + cnt = tableRowCount / pseudoLessRate + } else { + if low == high { + cnt = 1 // When primary key is handle, the equal row count is at most one. + } else { + cnt = tableRowCount / pseudoBetweenRate + } + } + if high > low && cnt > float64(high-low) { + cnt = float64(high - low) + } + rowCount += cnt + } + if rowCount > tableRowCount { + rowCount = tableRowCount + } + return rowCount +} + +// GetAvgRowSize computes average row size for given columns. +func (coll *HistColl) GetAvgRowSize(cols []*expression.Column, isEncodedKey bool) (size float64) { + if coll.Pseudo || len(coll.Columns) == 0 || coll.Count == 0 { + size = pseudoColSize * float64(len(cols)) + } else { + for _, col := range cols { + colHist, ok := coll.Columns[col.UniqueID] + // Normally this would not happen, it is for compatibility with old version stats which + // does not include TotColSize. + if !ok || (!colHist.IsHandle && colHist.TotColSize == 0 && (colHist.NullCount != coll.Count)) { + size += pseudoColSize + continue + } + // We differentiate if the column is encoded as key or value, because the resulted size + // is different. + size += colHist.AvgColSize(coll.Count, isEncodedKey) + } + } + // Add 1 byte for each column's flag byte. See `encode` for details. + return size + float64(len(cols)) +} + +// GetAvgRowSizeListInDisk computes average row size for given columns. +func (coll *HistColl) GetAvgRowSizeListInDisk(cols []*expression.Column, padChar bool) (size float64) { + if coll.Pseudo || len(coll.Columns) == 0 || coll.Count == 0 { + for _, col := range cols { + size += float64(chunk.EstimateTypeWidth(padChar, col.GetType())) + } + } else { + for _, col := range cols { + colHist, ok := coll.Columns[col.UniqueID] + // Normally this would not happen, it is for compatibility with old version stats which + // does not include TotColSize. + if !ok || (!colHist.IsHandle && colHist.TotColSize == 0 && (colHist.NullCount != coll.Count)) { + size += float64(chunk.EstimateTypeWidth(padChar, col.GetType())) + continue + } + size += colHist.AvgColSizeListInDisk(coll.Count) + } + } + // Add 8 byte for each column's size record. See `ListInDisk` for details. + return size + float64(8*len(cols)) +} + +// GetTableAvgRowSize computes average row size for a table scan, exclude the index key-value pairs. +func (coll *HistColl) GetTableAvgRowSize(cols []*expression.Column) (size float64) { + size = coll.GetAvgRowSize(cols, false) + size += tablecodec.RecordRowKeyLen + // The `cols` for TiKV always contain the row_id, so prefix row size subtract its length. + size -= 8 + return +} + +// GetIndexAvgRowSize computes average row size for a index scan. +func (coll *HistColl) GetIndexAvgRowSize(cols []*expression.Column, isUnique bool) (size float64) { + size = coll.GetAvgRowSize(cols, true) + // tablePrefix(1) + tableID(8) + indexPrefix(2) + indexID(8) + // Because the cols for index scan always contain the handle, so we don't add the rowID here. + size += 19 + if !isUnique { + // add the len("_") + size++ + } + return +} diff --git a/statistics/testdata/stats_suite_in.json b/statistics/testdata/stats_suite_in.json new file mode 100644 index 0000000..5bcf1a1 --- /dev/null +++ b/statistics/testdata/stats_suite_in.json @@ -0,0 +1,48 @@ +[ + { + "name": "TestUniqCompEqualEst", + "cases": [ + "explain select * from t where a = 1 and b = 5 and 1 = 1" + ] + }, + { + "name": "TestColumnIndexNullEstimation", + "cases": [ + "explain select b from t where b is null", + "explain select b from t where b is not null", + "explain select b from t where b is null or b > 3", + "explain select b from t use index(idx_b)", + "explain select b from t where b < 4", + "explain select * from t where a is null", + "explain select * from t where a is not null", + "explain select * from t where a is null or a > 3", + "explain select * from t", + "explain select * from t where a < 4" + ] + }, + { + "name": "TestDiscreteDistribution", + "cases": [ + "explain select * from t where a = 'tw' and b < 0" + ] + }, + { + "name": "TestPrimaryKeySelectivity", + "cases": [ + [ + "explain select * from t where a > \"t\"" + ], + [ + "drop table t", + "create table t(a int primary key, b int)", + "explain select * from t where a > 1" + ] + ] + }, + { + "name": "TestSelectCombinedLowBound", + "cases": [ + "explain select * from t where kid = 1" + ] + } +] diff --git a/statistics/testdata/stats_suite_out.json b/statistics/testdata/stats_suite_out.json new file mode 100644 index 0000000..11195db --- /dev/null +++ b/statistics/testdata/stats_suite_out.json @@ -0,0 +1,92 @@ +[ + { + "Name": "TestUniqCompEqualEst", + "Cases": [ + [ + "IndexReader_6 1.00 root index:IndexScan_5", + "└─IndexScan_5 1.00 cop table:t, index:a, b, range:[1 5,1 5], keep order:false" + ] + ] + }, + { + "Name": "TestColumnIndexNullEstimation", + "Cases": [ + [ + "IndexReader_6 4.00 root index:IndexScan_5", + "└─IndexScan_5 4.00 cop table:t, index:b, range:[NULL,NULL], keep order:false" + ], + [ + "IndexReader_6 1.00 root index:IndexScan_5", + "└─IndexScan_5 1.00 cop table:t, index:b, range:[-inf,+inf], keep order:false" + ], + [ + "IndexReader_6 4.00 root index:IndexScan_5", + "└─IndexScan_5 4.00 cop table:t, index:b, range:[NULL,NULL], (3,+inf], keep order:false" + ], + [ + "IndexReader_5 5.00 root index:IndexScan_4", + "└─IndexScan_4 5.00 cop table:t, index:b, range:[NULL,+inf], keep order:false" + ], + [ + "IndexReader_6 1.00 root index:IndexScan_5", + "└─IndexScan_5 1.00 cop table:t, index:b, range:[-inf,4), keep order:false" + ], + [ + "TableReader_7 1.00 root data:Selection_6", + "└─Selection_6 1.00 cop isnull(test.t.a)", + " └─TableScan_5 5.00 cop table:t, range:[-inf,+inf], keep order:false" + ], + [ + "TableReader_7 4.00 root data:Selection_6", + "└─Selection_6 4.00 cop not(isnull(test.t.a))", + " └─TableScan_5 5.00 cop table:t, range:[-inf,+inf], keep order:false" + ], + [ + "TableReader_7 2.00 root data:Selection_6", + "└─Selection_6 2.00 cop or(isnull(test.t.a), gt(test.t.a, 3))", + " └─TableScan_5 5.00 cop table:t, range:[-inf,+inf], keep order:false" + ], + [ + "TableReader_5 5.00 root data:TableScan_4", + "└─TableScan_4 5.00 cop table:t, range:[-inf,+inf], keep order:false" + ], + [ + "TableReader_7 3.00 root data:Selection_6", + "└─Selection_6 3.00 cop lt(test.t.a, 4)", + " └─TableScan_5 5.00 cop table:t, range:[-inf,+inf], keep order:false" + ] + ] + }, + { + "Name": "TestDiscreteDistribution", + "Cases": [ + [ + "IndexReader_6 0.00 root index:IndexScan_5", + "└─IndexScan_5 0.00 cop table:t, index:a, b, range:[\"tw\" -inf,\"tw\" 0), keep order:false" + ] + ] + }, + { + "Name": "TestPrimaryKeySelectivity", + "Cases": [ + [ + "TableReader_7 3333.33 root data:Selection_6", + "└─Selection_6 3333.33 cop gt(test.t.a, \"t\")", + " └─TableScan_5 10000.00 cop table:t, range:[-inf,+inf], keep order:false, stats:pseudo" + ], + [ + "TableReader_6 3333.33 root data:TableScan_5", + "└─TableScan_5 3333.33 cop table:t, range:(1,+inf], keep order:false, stats:pseudo" + ] + ] + }, + { + "Name": "TestSelectCombinedLowBound", + "Cases": [ + [ + "IndexReader_6 7.00 root index:IndexScan_5", + "└─IndexScan_5 7.00 cop table:t, index:kid, pid, range:[1,1], keep order:false" + ] + ] + } +] diff --git a/store/mockoracle/oracle.go b/store/mockoracle/oracle.go new file mode 100644 index 0000000..2b661f7 --- /dev/null +++ b/store/mockoracle/oracle.go @@ -0,0 +1,106 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mockoracle + +import ( + "context" + "sync" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/store/tikv/oracle" +) + +var errStopped = errors.New("stopped") + +// MockOracle is a mock oracle for test. +type MockOracle struct { + sync.RWMutex + stop bool + offset time.Duration + lastTS uint64 +} + +// Enable enables the Oracle +func (o *MockOracle) Enable() { + o.Lock() + defer o.Unlock() + o.stop = false +} + +// Disable disables the Oracle +func (o *MockOracle) Disable() { + o.Lock() + defer o.Unlock() + o.stop = true +} + +// AddOffset adds the offset of the oracle. +func (o *MockOracle) AddOffset(d time.Duration) { + o.Lock() + defer o.Unlock() + + o.offset += d +} + +// GetTimestamp implements oracle.Oracle interface. +func (o *MockOracle) GetTimestamp(context.Context) (uint64, error) { + o.Lock() + defer o.Unlock() + + if o.stop { + return 0, errors.Trace(errStopped) + } + physical := oracle.GetPhysical(time.Now().Add(o.offset)) + ts := oracle.ComposeTS(physical, 0) + if oracle.ExtractPhysical(o.lastTS) == physical { + ts = o.lastTS + 1 + } + o.lastTS = ts + return ts, nil +} + +type mockOracleFuture struct { + o *MockOracle + ctx context.Context +} + +func (m *mockOracleFuture) Wait() (uint64, error) { + return m.o.GetTimestamp(m.ctx) +} + +// GetTimestampAsync implements oracle.Oracle interface. +func (o *MockOracle) GetTimestampAsync(ctx context.Context) oracle.Future { + return &mockOracleFuture{o, ctx} +} + +// IsExpired implements oracle.Oracle interface. +func (o *MockOracle) IsExpired(lockTimestamp uint64, TTL uint64) bool { + o.RLock() + defer o.RUnlock() + + return oracle.GetPhysical(time.Now().Add(o.offset)) >= oracle.ExtractPhysical(lockTimestamp)+int64(TTL) +} + +// UntilExpired implement oracle.Oracle interface. +func (o *MockOracle) UntilExpired(lockTimeStamp uint64, TTL uint64) int64 { + o.RLock() + defer o.RUnlock() + return oracle.ExtractPhysical(lockTimeStamp) + int64(TTL) - oracle.GetPhysical(time.Now().Add(o.offset)) +} + +// Close implements oracle.Oracle interface. +func (o *MockOracle) Close() { + +} diff --git a/store/mockstore/mocktikv/aggregate.go b/store/mockstore/mocktikv/aggregate.go new file mode 100644 index 0000000..e4ed534 --- /dev/null +++ b/store/mockstore/mocktikv/aggregate.go @@ -0,0 +1,182 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mocktikv + +import ( + "context" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/expression/aggregation" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/codec" +) + +type aggCtxsMapper map[string][]*aggregation.AggEvaluateContext + +var _ executor = &hashAggExec{} + +type hashAggExec struct { + evalCtx *evalContext + aggExprs []aggregation.Aggregation + aggCtxsMap aggCtxsMapper + groupByExprs []expression.Expression + relatedColOffsets []int + row []types.Datum + groups map[string]struct{} + groupKeys [][]byte + groupKeyRows [][][]byte + executed bool + currGroupIdx int + count int64 + + src executor +} + +func (e *hashAggExec) SetSrcExec(exec executor) { + e.src = exec +} + +func (e *hashAggExec) GetSrcExec() executor { + return e.src +} + +func (e *hashAggExec) ResetCounts() { + e.src.ResetCounts() +} + +func (e *hashAggExec) Counts() []int64 { + return e.src.Counts() +} + +func (e *hashAggExec) innerNext(ctx context.Context) (bool, error) { + values, err := e.src.Next(ctx) + if err != nil { + return false, errors.Trace(err) + } + if values == nil { + return false, nil + } + err = e.aggregate(values) + if err != nil { + return false, errors.Trace(err) + } + return true, nil +} + +func (e *hashAggExec) Next(ctx context.Context) (value [][]byte, err error) { + e.count++ + if e.aggCtxsMap == nil { + e.aggCtxsMap = make(aggCtxsMapper) + } + if !e.executed { + for { + hasMore, err := e.innerNext(ctx) + if err != nil { + return nil, errors.Trace(err) + } + if !hasMore { + break + } + } + e.executed = true + } + + if e.currGroupIdx >= len(e.groups) { + return nil, nil + } + gk := e.groupKeys[e.currGroupIdx] + value = make([][]byte, 0, len(e.groupByExprs)+2*len(e.aggExprs)) + aggCtxs := e.getContexts(gk) + for i, agg := range e.aggExprs { + partialResults := agg.GetPartialResult(aggCtxs[i]) + for _, result := range partialResults { + data, err := codec.EncodeValue(e.evalCtx.sc, nil, result) + if err != nil { + return nil, errors.Trace(err) + } + value = append(value, data) + } + } + value = append(value, e.groupKeyRows[e.currGroupIdx]...) + e.currGroupIdx++ + + return value, nil +} + +func (e *hashAggExec) getGroupKey() ([]byte, [][]byte, error) { + length := len(e.groupByExprs) + if length == 0 { + return nil, nil, nil + } + bufLen := 0 + row := make([][]byte, 0, length) + for _, item := range e.groupByExprs { + v, err := item.Eval(chunk.MutRowFromDatums(e.row).ToRow()) + if err != nil { + return nil, nil, errors.Trace(err) + } + b, err := codec.EncodeValue(e.evalCtx.sc, nil, v) + if err != nil { + return nil, nil, errors.Trace(err) + } + bufLen += len(b) + row = append(row, b) + } + buf := make([]byte, 0, bufLen) + for _, col := range row { + buf = append(buf, col...) + } + return buf, row, nil +} + +// aggregate updates aggregate functions with row. +func (e *hashAggExec) aggregate(value [][]byte) error { + err := e.evalCtx.decodeRelatedColumnVals(e.relatedColOffsets, value, e.row) + if err != nil { + return errors.Trace(err) + } + // Get group key. + gk, gbyKeyRow, err := e.getGroupKey() + if err != nil { + return errors.Trace(err) + } + if _, ok := e.groups[string(gk)]; !ok { + e.groups[string(gk)] = struct{}{} + e.groupKeys = append(e.groupKeys, gk) + e.groupKeyRows = append(e.groupKeyRows, gbyKeyRow) + } + // Update aggregate expressions. + aggCtxs := e.getContexts(gk) + for i, agg := range e.aggExprs { + err = agg.Update(aggCtxs[i], e.evalCtx.sc, chunk.MutRowFromDatums(e.row).ToRow()) + if err != nil { + return errors.Trace(err) + } + } + return nil +} + +func (e *hashAggExec) getContexts(groupKey []byte) []*aggregation.AggEvaluateContext { + groupKeyString := string(groupKey) + aggCtxs, ok := e.aggCtxsMap[groupKeyString] + if !ok { + aggCtxs = make([]*aggregation.AggEvaluateContext, 0, len(e.aggExprs)) + for _, agg := range e.aggExprs { + aggCtxs = append(aggCtxs, agg.CreateContext(e.evalCtx.sc)) + } + e.aggCtxsMap[groupKeyString] = aggCtxs + } + return aggCtxs +} diff --git a/store/mockstore/mocktikv/analyze.go b/store/mockstore/mocktikv/analyze.go new file mode 100644 index 0000000..2f8a612 --- /dev/null +++ b/store/mockstore/mocktikv/analyze.go @@ -0,0 +1,268 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mocktikv + +import ( + "context" + + "github.com/golang/protobuf/proto" + "github.com/pingcap-incubator/tinykv/proto/pkg/coprocessor" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/statistics" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/codec" + "github.com/pingcap/tidb/util/rowcodec" + "github.com/pingcap/tipb/go-tipb" +) + +func (h *rpcHandler) handleCopAnalyzeRequest(req *coprocessor.Request) *coprocessor.Response { + resp := &coprocessor.Response{} + if len(req.Ranges) == 0 { + return resp + } + if req.GetTp() != kv.ReqTypeAnalyze { + return resp + } + if err := h.checkRequestContext(req.GetContext()); err != nil { + resp.RegionError = err + return resp + } + analyzeReq := new(tipb.AnalyzeReq) + err := proto.Unmarshal(req.Data, analyzeReq) + if err != nil { + resp.OtherError = err.Error() + return resp + } + if analyzeReq.Tp == tipb.AnalyzeType_TypeIndex { + resp, err = h.handleAnalyzeIndexReq(req, analyzeReq) + } else { + resp, err = h.handleAnalyzeColumnsReq(req, analyzeReq) + } + if err != nil { + resp.OtherError = err.Error() + } + return resp +} + +func (h *rpcHandler) handleAnalyzeIndexReq(req *coprocessor.Request, analyzeReq *tipb.AnalyzeReq) (*coprocessor.Response, error) { + ranges, err := h.extractKVRanges(req.Ranges, false) + if err != nil { + return nil, errors.Trace(err) + } + startTS := req.StartTs + if startTS == 0 { + startTS = analyzeReq.GetStartTsFallback() + } + e := &indexScanExec{ + colsLen: int(analyzeReq.IdxReq.NumColumns), + kvRanges: ranges, + startTS: startTS, + mvccStore: h.mvccStore, + IndexScan: &tipb.IndexScan{Desc: false}, + } + statsBuilder := statistics.NewSortedBuilder(flagsToStatementContext(analyzeReq.Flags), analyzeReq.IdxReq.BucketSize, 0, types.NewFieldType(mysql.TypeBlob)) + var cms *statistics.CMSketch + if analyzeReq.IdxReq.CmsketchDepth != nil && analyzeReq.IdxReq.CmsketchWidth != nil { + cms = statistics.NewCMSketch(*analyzeReq.IdxReq.CmsketchDepth, *analyzeReq.IdxReq.CmsketchWidth) + } + ctx := context.TODO() + var values [][]byte + for { + values, err = e.Next(ctx) + if err != nil { + return nil, errors.Trace(err) + } + if values == nil { + break + } + var value []byte + for _, val := range values { + value = append(value, val...) + if cms != nil { + cms.InsertBytes(value) + } + } + err = statsBuilder.Iterate(types.NewBytesDatum(value)) + if err != nil { + return nil, errors.Trace(err) + } + } + hg := statistics.HistogramToProto(statsBuilder.Hist()) + var cm *tipb.CMSketch + if cms != nil { + cm = statistics.CMSketchToProto(cms) + } + data, err := proto.Marshal(&tipb.AnalyzeIndexResp{Hist: hg, Cms: cm}) + if err != nil { + return nil, errors.Trace(err) + } + return &coprocessor.Response{Data: data}, nil +} + +type analyzeColumnsExec struct { + tblExec *tableScanExec + fields []*ast.ResultField +} + +func (h *rpcHandler) handleAnalyzeColumnsReq(req *coprocessor.Request, analyzeReq *tipb.AnalyzeReq) (_ *coprocessor.Response, err error) { + sc := flagsToStatementContext(analyzeReq.Flags) + sc.TimeZone, err = constructTimeZone("", int(analyzeReq.TimeZoneOffset)) + if err != nil { + return nil, errors.Trace(err) + } + + evalCtx := &evalContext{sc: sc} + columns := analyzeReq.ColReq.ColumnsInfo + evalCtx.setColumnInfo(columns) + ranges, err := h.extractKVRanges(req.Ranges, false) + if err != nil { + return nil, errors.Trace(err) + } + startTS := req.StartTs + if startTS == 0 { + startTS = analyzeReq.GetStartTsFallback() + } + colInfos := make([]rowcodec.ColInfo, len(columns)) + for i := range columns { + col := columns[i] + colInfos[i] = rowcodec.ColInfo{ + ID: col.ColumnId, + Tp: col.Tp, + Flag: col.Flag, + IsPKHandle: col.GetPkHandle(), + } + } + defVal := func(i int) ([]byte, error) { + col := columns[i] + if col.DefaultVal == nil { + return nil, nil + } + // col.DefaultVal always be varint `[flag]+[value]`. + if len(col.DefaultVal) < 1 { + panic("invalid default value") + } + return col.DefaultVal, nil + } + rd := rowcodec.NewByteDecoder(colInfos, -1, defVal, nil) + e := &analyzeColumnsExec{ + tblExec: &tableScanExec{ + TableScan: &tipb.TableScan{Columns: columns}, + kvRanges: ranges, + colIDs: evalCtx.colIDs, + startTS: startTS, + mvccStore: h.mvccStore, + rd: rd, + }, + } + e.fields = make([]*ast.ResultField, len(columns)) + for i := range e.fields { + rf := new(ast.ResultField) + rf.Column = new(model.ColumnInfo) + rf.Column.FieldType = types.FieldType{Tp: mysql.TypeBlob, Flen: mysql.MaxBlobWidth, Charset: mysql.DefaultCharset, Collate: mysql.DefaultCollationName} + e.fields[i] = rf + } + + pkID := int64(-1) + numCols := len(columns) + if columns[0].GetPkHandle() { + pkID = columns[0].ColumnId + numCols-- + } + colReq := analyzeReq.ColReq + builder := statistics.SampleBuilder{ + Sc: sc, + RecordSet: e, + ColLen: numCols, + MaxBucketSize: colReq.BucketSize, + MaxFMSketchSize: colReq.SketchSize, + MaxSampleSize: colReq.SampleSize, + } + if pkID != -1 { + builder.PkBuilder = statistics.NewSortedBuilder(sc, builder.MaxBucketSize, pkID, types.NewFieldType(mysql.TypeBlob)) + } + if colReq.CmsketchWidth != nil && colReq.CmsketchDepth != nil { + builder.CMSketchWidth = *colReq.CmsketchWidth + builder.CMSketchDepth = *colReq.CmsketchDepth + } + collectors, pkBuilder, err := builder.CollectColumnStats() + if err != nil { + return nil, errors.Trace(err) + } + colResp := &tipb.AnalyzeColumnsResp{} + if pkID != -1 { + colResp.PkHist = statistics.HistogramToProto(pkBuilder.Hist()) + } + for _, c := range collectors { + colResp.Collectors = append(colResp.Collectors, statistics.SampleCollectorToProto(c)) + } + data, err := proto.Marshal(colResp) + if err != nil { + return nil, errors.Trace(err) + } + return &coprocessor.Response{Data: data}, nil +} + +// Fields implements the sqlexec.RecordSet Fields interface. +func (e *analyzeColumnsExec) Fields() []*ast.ResultField { + return e.fields +} + +func (e *analyzeColumnsExec) getNext(ctx context.Context) ([]types.Datum, error) { + values, err := e.tblExec.Next(ctx) + if err != nil { + return nil, errors.Trace(err) + } + if values == nil { + return nil, nil + } + datumRow := make([]types.Datum, 0, len(values)) + for _, val := range values { + d := types.NewBytesDatum(val) + if len(val) == 1 && val[0] == codec.NilFlag { + d.SetNull() + } + datumRow = append(datumRow, d) + } + return datumRow, nil +} + +func (e *analyzeColumnsExec) Next(ctx context.Context, req *chunk.Chunk) error { + req.Reset() + row, err := e.getNext(ctx) + if row == nil || err != nil { + return errors.Trace(err) + } + for i := 0; i < len(row); i++ { + req.AppendDatum(i, &row[i]) + } + return nil +} + +func (e *analyzeColumnsExec) NewChunk() *chunk.Chunk { + fields := make([]*types.FieldType, 0, len(e.fields)) + for _, field := range e.fields { + fields = append(fields, &field.Column.FieldType) + } + return chunk.NewChunkWithCapacity(fields, 1) +} + +// Close implements the sqlexec.RecordSet Close interface. +func (e *analyzeColumnsExec) Close() error { + return nil +} diff --git a/store/mockstore/mocktikv/cluster.go b/store/mockstore/mocktikv/cluster.go new file mode 100644 index 0000000..04dde68 --- /dev/null +++ b/store/mockstore/mocktikv/cluster.go @@ -0,0 +1,639 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mocktikv + +import ( + "bytes" + "context" + "math" + "sort" + "sync" + "time" + + "github.com/golang/protobuf/proto" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/tablecodec" +) + +// Cluster simulates a TiKV cluster. It focuses on management and the change of +// meta data. A Cluster mainly includes following 3 kinds of meta data: +// 1) Region: A Region is a fragment of TiKV's data whose range is [start, end). +// The data of a Region is duplicated to multiple Peers and distributed in +// multiple Stores. +// 2) Peer: A Peer is a replica of a Region's data. All peers of a Region form +// a group, each group elects a Leader to provide services. +// 3) Store: A Store is a storage/service node. Try to think it as a TiKV server +// process. Only the store with request's Region's leader Peer could respond +// to client's request. +type Cluster struct { + sync.RWMutex + id uint64 + stores map[uint64]*Store + regions map[uint64]*Region + + // delayEvents is used to control the execution sequence of rpc requests for test. + delayEvents map[delayKey]time.Duration + delayMu sync.Mutex +} + +type delayKey struct { + startTS uint64 + regionID uint64 +} + +// NewCluster creates an empty cluster. It needs to be bootstrapped before +// providing service. +func NewCluster() *Cluster { + return &Cluster{ + stores: make(map[uint64]*Store), + regions: make(map[uint64]*Region), + delayEvents: make(map[delayKey]time.Duration), + } +} + +// AllocID creates an unique ID in cluster. The ID could be used as either +// StoreID, RegionID, or PeerID. +func (c *Cluster) AllocID() uint64 { + c.Lock() + defer c.Unlock() + + return c.allocID() +} + +// AllocIDs creates multiple IDs. +func (c *Cluster) AllocIDs(n int) []uint64 { + c.Lock() + defer c.Unlock() + + var ids []uint64 + for len(ids) < n { + ids = append(ids, c.allocID()) + } + return ids +} + +func (c *Cluster) allocID() uint64 { + c.id++ + return c.id +} + +// GetAllRegions gets all the regions in the cluster. +func (c *Cluster) GetAllRegions() []*Region { + regions := make([]*Region, 0, len(c.regions)) + for _, region := range c.regions { + regions = append(regions, region) + } + return regions +} + +// GetStore returns a Store's meta. +func (c *Cluster) GetStore(storeID uint64) *metapb.Store { + c.RLock() + defer c.RUnlock() + + if store := c.stores[storeID]; store != nil { + return proto.Clone(store.meta).(*metapb.Store) + } + return nil +} + +// GetAllStores returns all Stores' meta. +func (c *Cluster) GetAllStores() []*metapb.Store { + c.RLock() + defer c.RUnlock() + + stores := make([]*metapb.Store, 0, len(c.stores)) + for _, store := range c.stores { + stores = append(stores, proto.Clone(store.meta).(*metapb.Store)) + } + return stores +} + +// StopStore stops a store with storeID. +func (c *Cluster) StopStore(storeID uint64) { + c.Lock() + defer c.Unlock() + + if store := c.stores[storeID]; store != nil { + store.meta.State = metapb.StoreState_Offline + } +} + +// StartStore starts a store with storeID. +func (c *Cluster) StartStore(storeID uint64) { + c.Lock() + defer c.Unlock() + + if store := c.stores[storeID]; store != nil { + store.meta.State = metapb.StoreState_Up + } +} + +// CancelStore makes the store with cancel state true. +func (c *Cluster) CancelStore(storeID uint64) { + c.Lock() + defer c.Unlock() + + //A store returns context.Cancelled Error when cancel is true. + if store := c.stores[storeID]; store != nil { + store.cancel = true + } +} + +// UnCancelStore makes the store with cancel state false. +func (c *Cluster) UnCancelStore(storeID uint64) { + c.Lock() + defer c.Unlock() + + if store := c.stores[storeID]; store != nil { + store.cancel = false + } +} + +// GetStoreByAddr returns a Store's meta by an addr. +func (c *Cluster) GetStoreByAddr(addr string) *metapb.Store { + c.RLock() + defer c.RUnlock() + + for _, s := range c.stores { + if s.meta.GetAddress() == addr { + return proto.Clone(s.meta).(*metapb.Store) + } + } + return nil +} + +// GetAndCheckStoreByAddr checks and returns a Store's meta by an addr +func (c *Cluster) GetAndCheckStoreByAddr(addr string) (*metapb.Store, error) { + c.RLock() + defer c.RUnlock() + + for _, s := range c.stores { + if s.cancel { + return nil, context.Canceled + } + if s.meta.GetAddress() == addr { + return proto.Clone(s.meta).(*metapb.Store), nil + } + } + return nil, nil +} + +// AddStore add a new Store to the cluster. +func (c *Cluster) AddStore(storeID uint64, addr string) { + c.Lock() + defer c.Unlock() + + c.stores[storeID] = newStore(storeID, addr) +} + +// RemoveStore removes a Store from the cluster. +func (c *Cluster) RemoveStore(storeID uint64) { + c.Lock() + defer c.Unlock() + + delete(c.stores, storeID) +} + +// UpdateStoreAddr updates store address for cluster. +func (c *Cluster) UpdateStoreAddr(storeID uint64, addr string) { + c.Lock() + defer c.Unlock() + c.stores[storeID] = newStore(storeID, addr) +} + +// GetRegion returns a Region's meta and leader ID. +func (c *Cluster) GetRegion(regionID uint64) (*metapb.Region, uint64) { + c.RLock() + defer c.RUnlock() + + r := c.regions[regionID] + if r == nil { + return nil, 0 + } + return proto.Clone(r.Meta).(*metapb.Region), r.leader +} + +// GetRegionByKey returns the Region and its leader whose range contains the key. +func (c *Cluster) GetRegionByKey(key []byte) (*metapb.Region, *metapb.Peer) { + c.RLock() + defer c.RUnlock() + + for _, r := range c.regions { + if regionContains(r.Meta.StartKey, r.Meta.EndKey, key) { + return proto.Clone(r.Meta).(*metapb.Region), proto.Clone(r.leaderPeer()).(*metapb.Peer) + } + } + return nil, nil +} + +// GetPrevRegionByKey returns the previous Region and its leader whose range contains the key. +func (c *Cluster) GetPrevRegionByKey(key []byte) (*metapb.Region, *metapb.Peer) { + c.RLock() + defer c.RUnlock() + + currentRegion, _ := c.GetRegionByKey(key) + if len(currentRegion.StartKey) == 0 { + return nil, nil + } + for _, r := range c.regions { + if bytes.Equal(r.Meta.EndKey, currentRegion.StartKey) { + return proto.Clone(r.Meta).(*metapb.Region), proto.Clone(r.leaderPeer()).(*metapb.Peer) + } + } + return nil, nil +} + +// GetRegionByID returns the Region and its leader whose ID is regionID. +func (c *Cluster) GetRegionByID(regionID uint64) (*metapb.Region, *metapb.Peer) { + c.RLock() + defer c.RUnlock() + + for _, r := range c.regions { + if r.Meta.GetId() == regionID { + return proto.Clone(r.Meta).(*metapb.Region), proto.Clone(r.leaderPeer()).(*metapb.Peer) + } + } + return nil, nil +} + +// ScanRegions returns at most `limit` regions from given `key` and their leaders. +func (c *Cluster) ScanRegions(startKey, endKey []byte, limit int) ([]*metapb.Region, []*metapb.Peer) { + c.RLock() + defer c.RUnlock() + + regions := make([]*Region, 0, len(c.regions)) + for _, region := range c.regions { + regions = append(regions, region) + } + + sort.Slice(regions, func(i, j int) bool { + return bytes.Compare(regions[i].Meta.GetStartKey(), regions[j].Meta.GetStartKey()) < 0 + }) + + startPos := sort.Search(len(regions), func(i int) bool { + if len(regions[i].Meta.GetEndKey()) == 0 { + return true + } + return bytes.Compare(regions[i].Meta.GetEndKey(), startKey) > 0 + }) + regions = regions[startPos:] + if len(endKey) > 0 { + endPos := sort.Search(len(regions), func(i int) bool { + return bytes.Compare(regions[i].Meta.GetStartKey(), endKey) >= 0 + }) + if endPos > 0 { + regions = regions[:endPos] + } + } + if limit > 0 && len(regions) > limit { + regions = regions[:limit] + } + + metas := make([]*metapb.Region, 0, len(regions)) + leaders := make([]*metapb.Peer, 0, len(regions)) + for _, region := range regions { + leader := region.leaderPeer() + if leader == nil { + leader = &metapb.Peer{} + } else { + leader = proto.Clone(leader).(*metapb.Peer) + } + + metas = append(metas, proto.Clone(region.Meta).(*metapb.Region)) + leaders = append(leaders, leader) + } + + return metas, leaders +} + +// Bootstrap creates the first Region. The Stores should be in the Cluster before +// bootstrap. +func (c *Cluster) Bootstrap(regionID uint64, storeIDs, peerIDs []uint64, leaderPeerID uint64) { + c.Lock() + defer c.Unlock() + + if len(storeIDs) != len(peerIDs) { + panic("len(storeIDs) != len(peerIDs)") + } + c.regions[regionID] = newRegion(regionID, storeIDs, peerIDs, leaderPeerID) +} + +// AddPeer adds a new Peer for the Region on the Store. +func (c *Cluster) AddPeer(regionID, storeID, peerID uint64) { + c.Lock() + defer c.Unlock() + + c.regions[regionID].addPeer(peerID, storeID) +} + +// RemovePeer removes the Peer from the Region. Note that if the Peer is leader, +// the Region will have no leader before calling ChangeLeader(). +func (c *Cluster) RemovePeer(regionID, storeID uint64) { + c.Lock() + defer c.Unlock() + + c.regions[regionID].removePeer(storeID) +} + +// ChangeLeader sets the Region's leader Peer. Caller should guarantee the Peer +// exists. +func (c *Cluster) ChangeLeader(regionID, leaderPeerID uint64) { + c.Lock() + defer c.Unlock() + + c.regions[regionID].changeLeader(leaderPeerID) +} + +// GiveUpLeader sets the Region's leader to 0. The Region will have no leader +// before calling ChangeLeader(). +func (c *Cluster) GiveUpLeader(regionID uint64) { + c.ChangeLeader(regionID, 0) +} + +// Split splits a Region at the key (encoded) and creates new Region. +func (c *Cluster) Split(regionID, newRegionID uint64, key []byte, peerIDs []uint64, leaderPeerID uint64) { + c.SplitRaw(regionID, newRegionID, NewMvccKey(key), peerIDs, leaderPeerID) +} + +// SplitRaw splits a Region at the key (not encoded) and creates new Region. +func (c *Cluster) SplitRaw(regionID, newRegionID uint64, rawKey []byte, peerIDs []uint64, leaderPeerID uint64) *metapb.Region { + c.Lock() + defer c.Unlock() + + newRegion := c.regions[regionID].split(newRegionID, rawKey, peerIDs, leaderPeerID) + c.regions[newRegionID] = newRegion + // The mocktikv should return a deep copy of meta info to avoid data race + meta := proto.Clone(newRegion.Meta) + return meta.(*metapb.Region) +} + +// Merge merges 2 regions, their key ranges should be adjacent. +func (c *Cluster) Merge(regionID1, regionID2 uint64) { + c.Lock() + defer c.Unlock() + + c.regions[regionID1].merge(c.regions[regionID2].Meta.GetEndKey()) + delete(c.regions, regionID2) +} + +// SplitTable evenly splits the data in table into count regions. +// Only works for single store. +func (c *Cluster) SplitTable(mvccStore MVCCStore, tableID int64, count int) { + tableStart := tablecodec.GenTableRecordPrefix(tableID) + tableEnd := tableStart.PrefixNext() + c.splitRange(mvccStore, NewMvccKey(tableStart), NewMvccKey(tableEnd), count) +} + +// SplitIndex evenly splits the data in index into count regions. +// Only works for single store. +func (c *Cluster) SplitIndex(mvccStore MVCCStore, tableID, indexID int64, count int) { + indexStart := tablecodec.EncodeTableIndexPrefix(tableID, indexID) + indexEnd := indexStart.PrefixNext() + c.splitRange(mvccStore, NewMvccKey(indexStart), NewMvccKey(indexEnd), count) +} + +// SplitKeys evenly splits the start, end key into "count" regions. +// Only works for single store. +func (c *Cluster) SplitKeys(mvccStore MVCCStore, start, end kv.Key, count int) { + c.splitRange(mvccStore, NewMvccKey(start), NewMvccKey(end), count) +} + +// ScheduleDelay schedules a delay event for a transaction on a region. +func (c *Cluster) ScheduleDelay(startTS, regionID uint64, dur time.Duration) { + c.delayMu.Lock() + c.delayEvents[delayKey{startTS: startTS, regionID: regionID}] = dur + c.delayMu.Unlock() +} + +func (c *Cluster) splitRange(mvccStore MVCCStore, start, end MvccKey, count int) { + c.Lock() + defer c.Unlock() + c.evacuateOldRegionRanges(start, end) + regionPairs := c.getEntriesGroupByRegions(mvccStore, start, end, count) + c.createNewRegions(regionPairs, start, end) +} + +// getEntriesGroupByRegions groups the key value pairs into splitted regions. +func (c *Cluster) getEntriesGroupByRegions(mvccStore MVCCStore, start, end MvccKey, count int) [][]Pair { + startTS := uint64(math.MaxUint64) + limit := int(math.MaxInt32) + pairs := mvccStore.Scan(start.Raw(), end.Raw(), limit, startTS) + regionEntriesSlice := make([][]Pair, 0, count) + quotient := len(pairs) / count + remainder := len(pairs) % count + i := 0 + for i < len(pairs) { + regionEntryCount := quotient + if remainder > 0 { + remainder-- + regionEntryCount++ + } + regionEntries := pairs[i : i+regionEntryCount] + regionEntriesSlice = append(regionEntriesSlice, regionEntries) + i += regionEntryCount + } + return regionEntriesSlice +} + +func (c *Cluster) createNewRegions(regionPairs [][]Pair, start, end MvccKey) { + for i := range regionPairs { + peerID := c.allocID() + newRegion := newRegion(c.allocID(), []uint64{c.firstStoreID()}, []uint64{peerID}, peerID) + var regionStartKey, regionEndKey MvccKey + if i == 0 { + regionStartKey = start + } else { + regionStartKey = NewMvccKey(regionPairs[i][0].Key) + } + if i == len(regionPairs)-1 { + regionEndKey = end + } else { + // Use the next region's first key as region end key. + regionEndKey = NewMvccKey(regionPairs[i+1][0].Key) + } + newRegion.updateKeyRange(regionStartKey, regionEndKey) + c.regions[newRegion.Meta.Id] = newRegion + } +} + +// evacuateOldRegionRanges evacuate the range [start, end]. +// Old regions has intersection with [start, end) will be updated or deleted. +func (c *Cluster) evacuateOldRegionRanges(start, end MvccKey) { + oldRegions := c.getRegionsCoverRange(start, end) + for _, oldRegion := range oldRegions { + startCmp := bytes.Compare(oldRegion.Meta.StartKey, start) + endCmp := bytes.Compare(oldRegion.Meta.EndKey, end) + if len(oldRegion.Meta.EndKey) == 0 { + endCmp = 1 + } + if startCmp >= 0 && endCmp <= 0 { + // The region is within table data, it will be replaced by new regions. + delete(c.regions, oldRegion.Meta.Id) + } else if startCmp < 0 && endCmp > 0 { + // A single Region covers table data, split into two regions that do not overlap table data. + oldEnd := oldRegion.Meta.EndKey + oldRegion.updateKeyRange(oldRegion.Meta.StartKey, start) + peerID := c.allocID() + newRegion := newRegion(c.allocID(), []uint64{c.firstStoreID()}, []uint64{peerID}, peerID) + newRegion.updateKeyRange(end, oldEnd) + c.regions[newRegion.Meta.Id] = newRegion + } else if startCmp < 0 { + oldRegion.updateKeyRange(oldRegion.Meta.StartKey, start) + } else { + oldRegion.updateKeyRange(end, oldRegion.Meta.EndKey) + } + } +} + +func (c *Cluster) firstStoreID() uint64 { + for id := range c.stores { + return id + } + return 0 +} + +// getRegionsCoverRange gets regions in the cluster that has intersection with [start, end). +func (c *Cluster) getRegionsCoverRange(start, end MvccKey) []*Region { + regions := make([]*Region, 0, len(c.regions)) + for _, region := range c.regions { + onRight := bytes.Compare(end, region.Meta.StartKey) <= 0 + onLeft := bytes.Compare(region.Meta.EndKey, start) <= 0 + if len(region.Meta.EndKey) == 0 { + onLeft = false + } + if onLeft || onRight { + continue + } + regions = append(regions, region) + } + return regions +} + +// Region is the Region meta data. +type Region struct { + Meta *metapb.Region + leader uint64 +} + +func newPeerMeta(peerID, storeID uint64) *metapb.Peer { + return &metapb.Peer{ + Id: peerID, + StoreId: storeID, + } +} + +func newRegion(regionID uint64, storeIDs, peerIDs []uint64, leaderPeerID uint64) *Region { + if len(storeIDs) != len(peerIDs) { + panic("len(storeIDs) != len(peerIds)") + } + peers := make([]*metapb.Peer, 0, len(storeIDs)) + for i := range storeIDs { + peers = append(peers, newPeerMeta(peerIDs[i], storeIDs[i])) + } + meta := &metapb.Region{ + Id: regionID, + Peers: peers, + } + return &Region{ + Meta: meta, + leader: leaderPeerID, + } +} + +func (r *Region) addPeer(peerID, storeID uint64) { + r.Meta.Peers = append(r.Meta.Peers, newPeerMeta(peerID, storeID)) + r.incConfVer() +} + +func (r *Region) removePeer(peerID uint64) { + for i, peer := range r.Meta.Peers { + if peer.GetId() == peerID { + r.Meta.Peers = append(r.Meta.Peers[:i], r.Meta.Peers[i+1:]...) + break + } + } + if r.leader == peerID { + r.leader = 0 + } + r.incConfVer() +} + +func (r *Region) changeLeader(leaderID uint64) { + r.leader = leaderID +} + +func (r *Region) leaderPeer() *metapb.Peer { + for _, p := range r.Meta.Peers { + if p.GetId() == r.leader { + return p + } + } + return nil +} + +func (r *Region) split(newRegionID uint64, key MvccKey, peerIDs []uint64, leaderPeerID uint64) *Region { + if len(r.Meta.Peers) != len(peerIDs) { + panic("len(r.meta.Peers) != len(peerIDs)") + } + storeIDs := make([]uint64, 0, len(r.Meta.Peers)) + for _, peer := range r.Meta.Peers { + storeIDs = append(storeIDs, peer.GetStoreId()) + } + region := newRegion(newRegionID, storeIDs, peerIDs, leaderPeerID) + region.updateKeyRange(key, r.Meta.EndKey) + r.updateKeyRange(r.Meta.StartKey, key) + return region +} + +func (r *Region) merge(endKey MvccKey) { + r.Meta.EndKey = endKey + r.incVersion() +} + +func (r *Region) updateKeyRange(start, end MvccKey) { + r.Meta.StartKey = start + r.Meta.EndKey = end + r.incVersion() +} + +func (r *Region) incConfVer() { + r.Meta.RegionEpoch = &metapb.RegionEpoch{ + ConfVer: r.Meta.GetRegionEpoch().GetConfVer() + 1, + Version: r.Meta.GetRegionEpoch().GetVersion(), + } +} + +func (r *Region) incVersion() { + r.Meta.RegionEpoch = &metapb.RegionEpoch{ + ConfVer: r.Meta.GetRegionEpoch().GetConfVer(), + Version: r.Meta.GetRegionEpoch().GetVersion() + 1, + } +} + +// Store is the Store's meta data. +type Store struct { + meta *metapb.Store + cancel bool // return context.Cancelled error when cancel is true. +} + +func newStore(storeID uint64, addr string) *Store { + return &Store{ + meta: &metapb.Store{ + Id: storeID, + Address: addr, + }, + } +} diff --git a/store/mockstore/mocktikv/cluster_manipulate.go b/store/mockstore/mocktikv/cluster_manipulate.go new file mode 100644 index 0000000..bc7a112 --- /dev/null +++ b/store/mockstore/mocktikv/cluster_manipulate.go @@ -0,0 +1,51 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mocktikv + +import "fmt" + +// BootstrapWithSingleStore initializes a Cluster with 1 Region and 1 Store. +func BootstrapWithSingleStore(cluster *Cluster) (storeID, peerID, regionID uint64) { + ids := cluster.AllocIDs(3) + storeID, peerID, regionID = ids[0], ids[1], ids[2] + cluster.AddStore(storeID, fmt.Sprintf("store%d", storeID)) + cluster.Bootstrap(regionID, []uint64{storeID}, []uint64{peerID}, peerID) + return +} + +// BootstrapWithMultiStores initializes a Cluster with 1 Region and n Stores. +func BootstrapWithMultiStores(cluster *Cluster, n int) (storeIDs, peerIDs []uint64, regionID uint64, leaderPeer uint64) { + storeIDs = cluster.AllocIDs(n) + peerIDs = cluster.AllocIDs(n) + leaderPeer = peerIDs[0] + regionID = cluster.AllocID() + for _, storeID := range storeIDs { + cluster.AddStore(storeID, fmt.Sprintf("store%d", storeID)) + } + cluster.Bootstrap(regionID, storeIDs, peerIDs, leaderPeer) + return +} + +// BootstrapWithMultiRegions initializes a Cluster with multiple Regions and 1 +// Store. The number of Regions will be len(splitKeys) + 1. +func BootstrapWithMultiRegions(cluster *Cluster, splitKeys ...[]byte) (storeID uint64, regionIDs, peerIDs []uint64) { + var firstRegionID, firstPeerID uint64 + storeID, firstPeerID, firstRegionID = BootstrapWithSingleStore(cluster) + regionIDs = append([]uint64{firstRegionID}, cluster.AllocIDs(len(splitKeys))...) + peerIDs = append([]uint64{firstPeerID}, cluster.AllocIDs(len(splitKeys))...) + for i, k := range splitKeys { + cluster.Split(regionIDs[i], regionIDs[i+1], k, []uint64{peerIDs[i]}, peerIDs[i]) + } + return +} diff --git a/store/mockstore/mocktikv/cluster_test.go b/store/mockstore/mocktikv/cluster_test.go new file mode 100644 index 0000000..aaa8549 --- /dev/null +++ b/store/mockstore/mocktikv/cluster_test.go @@ -0,0 +1,120 @@ +// Copyright 2016-present, PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mocktikv_test + +import ( + "bytes" + "context" + "math" + "strconv" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/store/mockstore/mocktikv" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/codec" + "github.com/pingcap/tidb/util/rowcodec" +) + +var _ = Suite(&testClusterSuite{}) + +type testClusterSuite struct { +} + +func (s *testClusterSuite) TestClusterSplit(c *C) { + cluster := mocktikv.NewCluster() + mocktikv.BootstrapWithSingleStore(cluster) + mvccStore := mocktikv.MustNewMVCCStore() + store, err := mockstore.NewMockTikvStore( + mockstore.WithCluster(cluster), + mockstore.WithMVCCStore(mvccStore), + ) + c.Assert(err, IsNil) + + txn, err := store.Begin() + c.Assert(err, IsNil) + + // Mock inserting many rows in a table. + tblID := int64(1) + idxID := int64(2) + colID := int64(3) + handle := int64(1) + sc := &stmtctx.StatementContext{TimeZone: time.UTC} + for i := 0; i < 1000; i++ { + rowKey := tablecodec.EncodeRowKeyWithHandle(tblID, handle) + colValue := types.NewStringDatum(strconv.Itoa(int(handle))) + // TODO: Should use session's TimeZone instead of UTC. + rd := &rowcodec.Encoder{} + rowValue, err1 := tablecodec.EncodeRow(sc, []types.Datum{colValue}, []int64{colID}, nil, nil, rd) + c.Assert(err1, IsNil) + txn.Set(rowKey, rowValue) + + encodedIndexValue, err1 := codec.EncodeKey(sc, nil, []types.Datum{colValue, types.NewIntDatum(handle)}...) + c.Assert(err1, IsNil) + idxKey := tablecodec.EncodeIndexSeekKey(tblID, idxID, encodedIndexValue) + txn.Set(idxKey, []byte{'0'}) + handle++ + } + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + + // Split Table into 10 regions. + cluster.SplitTable(mvccStore, tblID, 10) + + // 10 table regions and first region and last region. + regions := cluster.GetAllRegions() + c.Assert(regions, HasLen, 12) + + allKeysMap := make(map[string]bool) + recordPrefix := tablecodec.GenTableRecordPrefix(tblID) + for _, region := range regions { + startKey := mocktikv.MvccKey(region.Meta.StartKey).Raw() + endKey := mocktikv.MvccKey(region.Meta.EndKey).Raw() + if !bytes.HasPrefix(startKey, recordPrefix) { + continue + } + pairs := mvccStore.Scan(startKey, endKey, math.MaxInt64, math.MaxUint64) + if len(pairs) > 0 { + c.Assert(pairs, HasLen, 100) + } + for _, pair := range pairs { + allKeysMap[string(pair.Key)] = true + } + } + c.Assert(allKeysMap, HasLen, 1000) + + cluster.SplitIndex(mvccStore, tblID, idxID, 10) + + allIndexMap := make(map[string]bool) + indexPrefix := tablecodec.EncodeTableIndexPrefix(tblID, idxID) + regions = cluster.GetAllRegions() + for _, region := range regions { + startKey := mocktikv.MvccKey(region.Meta.StartKey).Raw() + endKey := mocktikv.MvccKey(region.Meta.EndKey).Raw() + if !bytes.HasPrefix(startKey, indexPrefix) { + continue + } + pairs := mvccStore.Scan(startKey, endKey, math.MaxInt64, math.MaxUint64) + if len(pairs) > 0 { + c.Assert(pairs, HasLen, 100) + } + for _, pair := range pairs { + allIndexMap[string(pair.Key)] = true + } + } + c.Assert(allIndexMap, HasLen, 1000) +} diff --git a/store/mockstore/mocktikv/cop_handler_dag.go b/store/mockstore/mocktikv/cop_handler_dag.go new file mode 100644 index 0000000..3f7ece9 --- /dev/null +++ b/store/mockstore/mocktikv/cop_handler_dag.go @@ -0,0 +1,578 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mocktikv + +import ( + "bytes" + "context" + "time" + + "github.com/golang/protobuf/proto" + "github.com/pingcap-incubator/tinykv/proto/pkg/coprocessor" + "github.com/pingcap-incubator/tinykv/proto/pkg/kvrpcpb" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/expression/aggregation" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/codec" + "github.com/pingcap/tidb/util/rowcodec" + + "github.com/pingcap/tipb/go-tipb" +) + +var dummySlice = make([]byte, 0) + +type dagContext struct { + dagReq *tipb.DAGRequest + keyRanges []*coprocessor.KeyRange + startTS uint64 + evalCtx *evalContext +} + +func (h *rpcHandler) handleCopDAGRequest(req *coprocessor.Request) *coprocessor.Response { + resp := &coprocessor.Response{} + if err := h.checkRequestContext(req.GetContext()); err != nil { + resp.RegionError = err + return resp + } + dagCtx, e, dagReq, err := h.buildDAGExecutor(req) + if err != nil { + resp.OtherError = err.Error() + return resp + } + + var rows [][][]byte + ctx := context.TODO() + for { + var row [][]byte + row, err = e.Next(ctx) + if err != nil { + break + } + if row == nil { + break + } + rows = append(rows, row) + } + + selResp := h.initSelectResponse(err, dagCtx.evalCtx.sc.GetWarnings(), e.Counts()) + if err == nil { + err = h.fillUpData4SelectResponse(selResp, dagReq, rows) + } + // FIXME: some err such as (overflow) will be include in Response.OtherError with calling this buildResp. + // Such err should only be marshal in the data but not in OtherError. + // However, we can not distinguish such err now. + return buildResp(selResp, err) +} + +func (h *rpcHandler) buildDAGExecutor(req *coprocessor.Request) (*dagContext, executor, *tipb.DAGRequest, error) { + if len(req.Ranges) == 0 { + return nil, nil, nil, errors.New("request range is null") + } + if req.GetTp() != kv.ReqTypeDAG { + return nil, nil, nil, errors.Errorf("unsupported request type %d", req.GetTp()) + } + + dagReq := new(tipb.DAGRequest) + err := proto.Unmarshal(req.Data, dagReq) + if err != nil { + return nil, nil, nil, errors.Trace(err) + } + + sc := flagsToStatementContext(dagReq.Flags) + sc.TimeZone, err = constructTimeZone(dagReq.TimeZoneName, int(dagReq.TimeZoneOffset)) + if err != nil { + return nil, nil, nil, errors.Trace(err) + } + + ctx := &dagContext{ + dagReq: dagReq, + keyRanges: req.Ranges, + startTS: req.StartTs, + evalCtx: &evalContext{sc: sc}, + } + e, err := h.buildDAG(ctx, dagReq.Executors) + if err != nil { + return nil, nil, nil, errors.Trace(err) + } + return ctx, e, dagReq, err +} + +// constructTimeZone constructs timezone by name first. When the timezone name +// is set, the daylight saving problem must be considered. Otherwise the +// timezone offset in seconds east of UTC is used to constructed the timezone. +func constructTimeZone(name string, offset int) (*time.Location, error) { + return time.Local, nil +} + +func (h *rpcHandler) buildExec(ctx *dagContext, curr *tipb.Executor) (executor, error) { + var currExec executor + var err error + switch curr.GetTp() { + case tipb.ExecType_TypeTableScan: + currExec, err = h.buildTableScan(ctx, curr) + case tipb.ExecType_TypeIndexScan: + currExec, err = h.buildIndexScan(ctx, curr) + case tipb.ExecType_TypeSelection: + currExec, err = h.buildSelection(ctx, curr) + case tipb.ExecType_TypeAggregation: + currExec, err = h.buildHashAgg(ctx, curr) + case tipb.ExecType_TypeTopN: + currExec, err = h.buildTopN(ctx, curr) + case tipb.ExecType_TypeLimit: + currExec = &limitExec{limit: curr.Limit.GetLimit()} + default: + // TODO: Support other types. + err = errors.Errorf("this exec type %v doesn't support yet.", curr.GetTp()) + } + + return currExec, errors.Trace(err) +} + +func (h *rpcHandler) buildDAG(ctx *dagContext, executors []*tipb.Executor) (executor, error) { + var src executor + for i := 0; i < len(executors); i++ { + curr, err := h.buildExec(ctx, executors[i]) + if err != nil { + return nil, errors.Trace(err) + } + curr.SetSrcExec(src) + src = curr + } + return src, nil +} + +func (h *rpcHandler) buildTableScan(ctx *dagContext, executor *tipb.Executor) (*tableScanExec, error) { + columns := executor.TblScan.Columns + ctx.evalCtx.setColumnInfo(columns) + ranges, err := h.extractKVRanges(ctx.keyRanges, executor.TblScan.Desc) + if err != nil { + return nil, errors.Trace(err) + } + + startTS := ctx.startTS + if startTS == 0 { + startTS = ctx.dagReq.GetStartTsFallback() + } + colInfos := make([]rowcodec.ColInfo, len(columns)) + for i := range colInfos { + col := columns[i] + colInfos[i] = rowcodec.ColInfo{ + ID: col.ColumnId, + Tp: col.Tp, + Flag: col.Flag, + IsPKHandle: col.GetPkHandle(), + } + } + defVal := func(i int) ([]byte, error) { + col := columns[i] + if col.DefaultVal == nil { + return nil, nil + } + // col.DefaultVal always be varint `[flag]+[value]`. + if len(col.DefaultVal) < 1 { + panic("invalid default value") + } + return col.DefaultVal, nil + } + rd := rowcodec.NewByteDecoder(colInfos, -1, defVal, nil) + e := &tableScanExec{ + TableScan: executor.TblScan, + kvRanges: ranges, + colIDs: ctx.evalCtx.colIDs, + startTS: startTS, + mvccStore: h.mvccStore, + rd: rd, + } + if ctx.dagReq.CollectRangeCounts != nil && *ctx.dagReq.CollectRangeCounts { + e.counts = make([]int64, len(ranges)) + } + return e, nil +} + +func (h *rpcHandler) buildIndexScan(ctx *dagContext, executor *tipb.Executor) (*indexScanExec, error) { + var err error + columns := executor.IdxScan.Columns + ctx.evalCtx.setColumnInfo(columns) + length := len(columns) + pkStatus := tablecodec.PrimaryKeyNotExists + // The PKHandle column info has been collected in ctx. + if columns[length-1].GetPkHandle() { + if mysql.HasUnsignedFlag(uint(columns[length-1].GetFlag())) { + pkStatus = tablecodec.PrimaryKeyIsUnsigned + } else { + pkStatus = tablecodec.PrimaryKeyIsSigned + } + columns = columns[:length-1] + } else if columns[length-1].ColumnId == model.ExtraHandleID { + pkStatus = tablecodec.PrimaryKeyIsSigned + columns = columns[:length-1] + } + ranges, err := h.extractKVRanges(ctx.keyRanges, executor.IdxScan.Desc) + if err != nil { + return nil, errors.Trace(err) + } + + startTS := ctx.startTS + if startTS == 0 { + startTS = ctx.dagReq.GetStartTsFallback() + } + e := &indexScanExec{ + IndexScan: executor.IdxScan, + kvRanges: ranges, + colsLen: len(columns), + startTS: startTS, + mvccStore: h.mvccStore, + pkStatus: pkStatus, + } + if ctx.dagReq.CollectRangeCounts != nil && *ctx.dagReq.CollectRangeCounts { + e.counts = make([]int64, len(ranges)) + } + return e, nil +} + +func (h *rpcHandler) buildSelection(ctx *dagContext, executor *tipb.Executor) (*selectionExec, error) { + var err error + var relatedColOffsets []int + pbConds := executor.Selection.Conditions + for _, cond := range pbConds { + relatedColOffsets, err = extractOffsetsInExpr(cond, ctx.evalCtx.columnInfos, relatedColOffsets) + if err != nil { + return nil, errors.Trace(err) + } + } + conds, err := convertToExprs(ctx.evalCtx.sc, ctx.evalCtx.fieldTps, pbConds) + if err != nil { + return nil, errors.Trace(err) + } + + return &selectionExec{ + evalCtx: ctx.evalCtx, + relatedColOffsets: relatedColOffsets, + conditions: conds, + row: make([]types.Datum, len(ctx.evalCtx.columnInfos)), + }, nil +} + +func (h *rpcHandler) getAggInfo(ctx *dagContext, executor *tipb.Executor) ([]aggregation.Aggregation, []expression.Expression, []int, error) { + length := len(executor.Aggregation.AggFunc) + aggs := make([]aggregation.Aggregation, 0, length) + var err error + var relatedColOffsets []int + for _, expr := range executor.Aggregation.AggFunc { + var aggExpr aggregation.Aggregation + aggExpr, err = aggregation.NewDistAggFunc(expr, ctx.evalCtx.fieldTps, ctx.evalCtx.sc) + if err != nil { + return nil, nil, nil, errors.Trace(err) + } + aggs = append(aggs, aggExpr) + relatedColOffsets, err = extractOffsetsInExpr(expr, ctx.evalCtx.columnInfos, relatedColOffsets) + if err != nil { + return nil, nil, nil, errors.Trace(err) + } + } + for _, item := range executor.Aggregation.GroupBy { + relatedColOffsets, err = extractOffsetsInExpr(item, ctx.evalCtx.columnInfos, relatedColOffsets) + if err != nil { + return nil, nil, nil, errors.Trace(err) + } + } + groupBys, err := convertToExprs(ctx.evalCtx.sc, ctx.evalCtx.fieldTps, executor.Aggregation.GetGroupBy()) + if err != nil { + return nil, nil, nil, errors.Trace(err) + } + + return aggs, groupBys, relatedColOffsets, nil +} + +func (h *rpcHandler) buildHashAgg(ctx *dagContext, executor *tipb.Executor) (*hashAggExec, error) { + aggs, groupBys, relatedColOffsets, err := h.getAggInfo(ctx, executor) + if err != nil { + return nil, errors.Trace(err) + } + + return &hashAggExec{ + evalCtx: ctx.evalCtx, + aggExprs: aggs, + groupByExprs: groupBys, + groups: make(map[string]struct{}), + groupKeys: make([][]byte, 0), + relatedColOffsets: relatedColOffsets, + row: make([]types.Datum, len(ctx.evalCtx.columnInfos)), + }, nil +} + +func (h *rpcHandler) buildTopN(ctx *dagContext, executor *tipb.Executor) (*topNExec, error) { + topN := executor.TopN + var err error + var relatedColOffsets []int + pbConds := make([]*tipb.Expr, len(topN.OrderBy)) + for i, item := range topN.OrderBy { + relatedColOffsets, err = extractOffsetsInExpr(item.Expr, ctx.evalCtx.columnInfos, relatedColOffsets) + if err != nil { + return nil, errors.Trace(err) + } + pbConds[i] = item.Expr + } + heap := &topNHeap{ + totalCount: int(topN.Limit), + topNSorter: topNSorter{ + orderByItems: topN.OrderBy, + sc: ctx.evalCtx.sc, + }, + } + + conds, err := convertToExprs(ctx.evalCtx.sc, ctx.evalCtx.fieldTps, pbConds) + if err != nil { + return nil, errors.Trace(err) + } + + return &topNExec{ + heap: heap, + evalCtx: ctx.evalCtx, + relatedColOffsets: relatedColOffsets, + orderByExprs: conds, + row: make([]types.Datum, len(ctx.evalCtx.columnInfos)), + }, nil +} + +type evalContext struct { + colIDs map[int64]int + columnInfos []*tipb.ColumnInfo + fieldTps []*types.FieldType + sc *stmtctx.StatementContext +} + +func (e *evalContext) setColumnInfo(cols []*tipb.ColumnInfo) { + e.columnInfos = make([]*tipb.ColumnInfo, len(cols)) + copy(e.columnInfos, cols) + + e.colIDs = make(map[int64]int) + e.fieldTps = make([]*types.FieldType, 0, len(e.columnInfos)) + for i, col := range e.columnInfos { + ft := fieldTypeFromPBColumn(col) + e.fieldTps = append(e.fieldTps, ft) + e.colIDs[col.GetColumnId()] = i + } +} + +// decodeRelatedColumnVals decodes data to Datum slice according to the row information. +func (e *evalContext) decodeRelatedColumnVals(relatedColOffsets []int, value [][]byte, row []types.Datum) error { + var err error + for _, offset := range relatedColOffsets { + row[offset], err = tablecodec.DecodeColumnValue(value[offset], e.fieldTps[offset], e.sc.TimeZone) + if err != nil { + return errors.Trace(err) + } + } + return nil +} + +// flagsToStatementContext creates a StatementContext from a `tipb.SelectRequest.Flags`. +func flagsToStatementContext(flags uint64) *stmtctx.StatementContext { + sc := new(stmtctx.StatementContext) + sc.IgnoreTruncate = (flags & model.FlagIgnoreTruncate) > 0 + sc.TruncateAsWarning = (flags & model.FlagTruncateAsWarning) > 0 + sc.PadCharToFullLength = (flags & model.FlagPadCharToFullLength) > 0 + sc.InInsertStmt = (flags & model.FlagInInsertStmt) > 0 + sc.InSelectStmt = (flags & model.FlagInSelectStmt) > 0 + sc.OverflowAsWarning = (flags & model.FlagOverflowAsWarning) > 0 + sc.IgnoreZeroInDate = (flags & model.FlagIgnoreZeroInDate) > 0 + sc.DividedByZeroAsWarning = (flags & model.FlagDividedByZeroAsWarning) > 0 + // TODO set FlagInUpdateOrDeleteStmt, FlagInUnionStmt, + return sc +} + +func (h *rpcHandler) initSelectResponse(err error, warnings []stmtctx.SQLWarn, counts []int64) *tipb.SelectResponse { + selResp := &tipb.SelectResponse{ + Error: toPBError(err), + OutputCounts: counts, + } + for i := range warnings { + selResp.Warnings = append(selResp.Warnings, toPBError(warnings[i].Err)) + } + return selResp +} + +func (h *rpcHandler) fillUpData4SelectResponse(selResp *tipb.SelectResponse, dagReq *tipb.DAGRequest, rows [][][]byte) error { + var chunks []tipb.Chunk + for i := range rows { + requestedRow := dummySlice + for _, ordinal := range dagReq.OutputOffsets { + requestedRow = append(requestedRow, rows[i][ordinal]...) + } + chunks = appendRow(chunks, requestedRow, i) + } + selResp.Chunks = chunks + return nil +} + +func buildResp(selResp *tipb.SelectResponse, err error) *coprocessor.Response { + resp := &coprocessor.Response{} + + if err != nil { + if locked, ok := errors.Cause(err).(*ErrLocked); ok { + resp.Locked = &kvrpcpb.LockInfo{ + Key: locked.Key, + PrimaryLock: locked.Primary, + LockVersion: locked.StartTS, + LockTtl: locked.TTL, + } + } else { + resp.OtherError = err.Error() + } + } + data, err := proto.Marshal(selResp) + if err != nil { + resp.OtherError = err.Error() + return resp + } + resp.Data = data + return resp +} + +func toPBError(err error) *tipb.Error { + if err == nil { + return nil + } + perr := new(tipb.Error) + switch x := err.(type) { + case *terror.Error: + sqlErr := x.ToSQLError() + perr.Code = int32(sqlErr.Code) + perr.Msg = sqlErr.Message + default: + e := errors.Cause(err) + switch y := e.(type) { + case *terror.Error: + tmp := y.ToSQLError() + perr.Code = int32(tmp.Code) + perr.Msg = tmp.Message + default: + perr.Code = int32(1) + perr.Msg = err.Error() + } + } + return perr +} + +// extractKVRanges extracts kv.KeyRanges slice from a SelectRequest. +func (h *rpcHandler) extractKVRanges(keyRanges []*coprocessor.KeyRange, descScan bool) (kvRanges []kv.KeyRange, err error) { + for _, kran := range keyRanges { + if bytes.Compare(kran.GetStart(), kran.GetEnd()) >= 0 { + err = errors.Errorf("invalid range, start should be smaller than end: %v %v", kran.GetStart(), kran.GetEnd()) + return + } + + upperKey := kran.GetEnd() + if bytes.Compare(upperKey, h.rawStartKey) <= 0 { + continue + } + lowerKey := kran.GetStart() + if len(h.rawEndKey) != 0 && bytes.Compare(lowerKey, h.rawEndKey) >= 0 { + break + } + var kvr kv.KeyRange + kvr.StartKey = kv.Key(maxStartKey(lowerKey, h.rawStartKey)) + kvr.EndKey = kv.Key(minEndKey(upperKey, h.rawEndKey)) + kvRanges = append(kvRanges, kvr) + } + if descScan { + reverseKVRanges(kvRanges) + } + return +} + +func reverseKVRanges(kvRanges []kv.KeyRange) { + for i := 0; i < len(kvRanges)/2; i++ { + j := len(kvRanges) - i - 1 + kvRanges[i], kvRanges[j] = kvRanges[j], kvRanges[i] + } +} + +const rowsPerChunk = 64 + +func appendRow(chunks []tipb.Chunk, data []byte, rowCnt int) []tipb.Chunk { + if rowCnt%rowsPerChunk == 0 { + chunks = append(chunks, tipb.Chunk{}) + } + cur := &chunks[len(chunks)-1] + cur.RowsData = append(cur.RowsData, data...) + return chunks +} + +func maxStartKey(rangeStartKey kv.Key, regionStartKey []byte) []byte { + if bytes.Compare([]byte(rangeStartKey), regionStartKey) > 0 { + return []byte(rangeStartKey) + } + return regionStartKey +} + +func minEndKey(rangeEndKey kv.Key, regionEndKey []byte) []byte { + if len(regionEndKey) == 0 || bytes.Compare([]byte(rangeEndKey), regionEndKey) < 0 { + return []byte(rangeEndKey) + } + return regionEndKey +} + +func isDuplicated(offsets []int, offset int) bool { + for _, idx := range offsets { + if idx == offset { + return true + } + } + return false +} + +func extractOffsetsInExpr(expr *tipb.Expr, columns []*tipb.ColumnInfo, collector []int) ([]int, error) { + if expr == nil { + return nil, nil + } + if expr.GetTp() == tipb.ExprType_ColumnRef { + _, idx, err := codec.DecodeInt(expr.Val) + if err != nil { + return nil, errors.Trace(err) + } + if !isDuplicated(collector, int(idx)) { + collector = append(collector, int(idx)) + } + return collector, nil + } + var err error + for _, child := range expr.Children { + collector, err = extractOffsetsInExpr(child, columns, collector) + if err != nil { + return nil, errors.Trace(err) + } + } + return collector, nil +} + +// fieldTypeFromPBColumn creates a types.FieldType from tipb.ColumnInfo. +func fieldTypeFromPBColumn(col *tipb.ColumnInfo) *types.FieldType { + return &types.FieldType{ + Tp: byte(col.GetTp()), + Flag: uint(col.Flag), + Flen: int(col.GetColumnLen()), + Decimal: int(col.GetDecimal()), + Elems: col.Elems, + Collate: mysql.Collations[uint8(col.GetCollation())], + } +} diff --git a/store/mockstore/mocktikv/errors.go b/store/mockstore/mocktikv/errors.go new file mode 100644 index 0000000..cc43ef7 --- /dev/null +++ b/store/mockstore/mocktikv/errors.go @@ -0,0 +1,81 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mocktikv + +import ( + "fmt" + + "github.com/pingcap-incubator/tinykv/proto/pkg/kvrpcpb" +) + +// ErrLocked is returned when trying to Read/Write on a locked key. Client should +// backoff or cleanup the lock then retry. +type ErrLocked struct { + Key MvccKey + Primary []byte + StartTS uint64 + TTL uint64 + TxnSize uint64 + LockType kvrpcpb.Op +} + +// Error formats the lock to a string. +func (e *ErrLocked) Error() string { + return fmt.Sprintf("key is locked, key: %q, primary: %q, txnStartTS: %v, LockType: %v", + e.Key, e.Primary, e.StartTS, e.LockType) +} + +// ErrKeyAlreadyExist is returned when key exists but this key has a constraint that +// it should not exist. Client should return duplicated entry error. +type ErrKeyAlreadyExist struct { + Key []byte +} + +func (e *ErrKeyAlreadyExist) Error() string { + return fmt.Sprintf("key already exist, key: %q", e.Key) +} + +// ErrRetryable suggests that client may restart the txn. +type ErrRetryable string + +func (e ErrRetryable) Error() string { + return fmt.Sprintf("retryable: %s", string(e)) +} + +// ErrAbort means something is wrong and client should abort the txn. +type ErrAbort string + +func (e ErrAbort) Error() string { + return fmt.Sprintf("abort: %s", string(e)) +} + +// ErrAlreadyCommitted is returned specially when client tries to rollback a +// committed lock. +type ErrAlreadyCommitted uint64 + +func (e ErrAlreadyCommitted) Error() string { + return "txn already committed" +} + +// ErrConflict is returned when the commitTS of key in the DB is greater than startTS. +type ErrConflict struct { + StartTS uint64 + ConflictTS uint64 + ConflictCommitTS uint64 + Key []byte +} + +func (e *ErrConflict) Error() string { + return "write conflict" +} diff --git a/store/mockstore/mocktikv/executor.go b/store/mockstore/mocktikv/executor.go new file mode 100644 index 0000000..4d4afb0 --- /dev/null +++ b/store/mockstore/mocktikv/executor.go @@ -0,0 +1,541 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mocktikv + +import ( + "bytes" + "context" + "sort" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/rowcodec" + "github.com/pingcap/tipb/go-tipb" +) + +var ( + _ executor = &tableScanExec{} + _ executor = &indexScanExec{} + _ executor = &selectionExec{} + _ executor = &limitExec{} + _ executor = &topNExec{} +) + +type executor interface { + SetSrcExec(executor) + GetSrcExec() executor + ResetCounts() + Counts() []int64 + Next(ctx context.Context) ([][]byte, error) +} + +type tableScanExec struct { + *tipb.TableScan + colIDs map[int64]int + kvRanges []kv.KeyRange + startTS uint64 + mvccStore MVCCStore + cursor int + seekKey []byte + start int + counts []int64 + + src executor + + rd *rowcodec.BytesDecoder +} + +func (e *tableScanExec) SetSrcExec(exec executor) { + e.src = exec +} + +func (e *tableScanExec) GetSrcExec() executor { + return e.src +} + +func (e *tableScanExec) ResetCounts() { + if e.counts != nil { + e.start = e.cursor + e.counts[e.start] = 0 + } +} + +func (e *tableScanExec) Counts() []int64 { + if e.counts == nil { + return nil + } + if e.seekKey == nil { + return e.counts[e.start:e.cursor] + } + return e.counts[e.start : e.cursor+1] +} + +func (e *tableScanExec) Next(ctx context.Context) (value [][]byte, err error) { + for e.cursor < len(e.kvRanges) { + ran := e.kvRanges[e.cursor] + if ran.IsPoint() { + value, err = e.getRowFromPoint(ran) + if err != nil { + return nil, errors.Trace(err) + } + e.cursor++ + if value == nil { + continue + } + if e.counts != nil { + e.counts[e.cursor-1]++ + } + return value, nil + } + value, err = e.getRowFromRange(ran) + if err != nil { + return nil, errors.Trace(err) + } + if value == nil { + e.seekKey = nil + e.cursor++ + continue + } + if e.counts != nil { + e.counts[e.cursor]++ + } + return value, nil + } + + return nil, nil +} + +func (e *tableScanExec) getRowFromPoint(ran kv.KeyRange) ([][]byte, error) { + val, err := e.mvccStore.Get(ran.StartKey, e.startTS) + if err != nil { + return nil, errors.Trace(err) + } + if len(val) == 0 { + return nil, nil + } + handle, err := tablecodec.DecodeRowKey(ran.StartKey) + if err != nil { + return nil, errors.Trace(err) + } + row, err := getRowData(e.Columns, e.colIDs, handle, val, e.rd) + if err != nil { + return nil, errors.Trace(err) + } + return row, nil +} + +func (e *tableScanExec) getRowFromRange(ran kv.KeyRange) ([][]byte, error) { + if e.seekKey == nil { + if e.Desc { + e.seekKey = ran.EndKey + } else { + e.seekKey = ran.StartKey + } + } + var pairs []Pair + var pair Pair + if e.Desc { + pairs = e.mvccStore.ReverseScan(ran.StartKey, e.seekKey, 1, e.startTS) + } else { + pairs = e.mvccStore.Scan(e.seekKey, ran.EndKey, 1, e.startTS) + } + if len(pairs) > 0 { + pair = pairs[0] + } + if pair.Err != nil { + // TODO: Handle lock error. + return nil, errors.Trace(pair.Err) + } + if pair.Key == nil { + return nil, nil + } + if e.Desc { + if bytes.Compare(pair.Key, ran.StartKey) < 0 { + return nil, nil + } + e.seekKey = []byte(tablecodec.TruncateToRowKeyLen(kv.Key(pair.Key))) + } else { + if bytes.Compare(pair.Key, ran.EndKey) >= 0 { + return nil, nil + } + e.seekKey = []byte(kv.Key(pair.Key).PrefixNext()) + } + + handle, err := tablecodec.DecodeRowKey(pair.Key) + if err != nil { + return nil, errors.Trace(err) + } + row, err := getRowData(e.Columns, e.colIDs, handle, pair.Value, e.rd) + if err != nil { + return nil, errors.Trace(err) + } + return row, nil +} + +type indexScanExec struct { + *tipb.IndexScan + colsLen int + kvRanges []kv.KeyRange + startTS uint64 + mvccStore MVCCStore + cursor int + seekKey []byte + pkStatus tablecodec.PrimaryKeyStatus + start int + counts []int64 + + src executor +} + +func (e *indexScanExec) SetSrcExec(exec executor) { + e.src = exec +} + +func (e *indexScanExec) GetSrcExec() executor { + return e.src +} + +func (e *indexScanExec) ResetCounts() { + if e.counts != nil { + e.start = e.cursor + e.counts[e.start] = 0 + } +} + +func (e *indexScanExec) Counts() []int64 { + if e.counts == nil { + return nil + } + if e.seekKey == nil { + return e.counts[e.start:e.cursor] + } + return e.counts[e.start : e.cursor+1] +} + +func (e *indexScanExec) isUnique() bool { + return e.Unique != nil && *e.Unique +} + +func (e *indexScanExec) Next(ctx context.Context) (value [][]byte, err error) { + for e.cursor < len(e.kvRanges) { + ran := e.kvRanges[e.cursor] + if ran.IsPoint() && e.isUnique() { + value, err = e.getRowFromPoint(ran) + if err != nil { + return nil, errors.Trace(err) + } + e.cursor++ + if value == nil { + continue + } + if e.counts != nil { + e.counts[e.cursor-1]++ + } + } else { + value, err = e.getRowFromRange(ran) + if err != nil { + return nil, errors.Trace(err) + } + if value == nil { + e.cursor++ + e.seekKey = nil + continue + } + if e.counts != nil { + e.counts[e.cursor]++ + } + } + return value, nil + } + + return nil, nil +} + +// getRowFromPoint is only used for unique key. +func (e *indexScanExec) getRowFromPoint(ran kv.KeyRange) ([][]byte, error) { + val, err := e.mvccStore.Get(ran.StartKey, e.startTS) + if err != nil { + return nil, errors.Trace(err) + } + if len(val) == 0 { + return nil, nil + } + return tablecodec.DecodeIndexKV(ran.StartKey, val, e.colsLen, e.pkStatus) +} + +func (e *indexScanExec) getRowFromRange(ran kv.KeyRange) ([][]byte, error) { + if e.seekKey == nil { + if e.Desc { + e.seekKey = ran.EndKey + } else { + e.seekKey = ran.StartKey + } + } + var pairs []Pair + var pair Pair + if e.Desc { + pairs = e.mvccStore.ReverseScan(ran.StartKey, e.seekKey, 1, e.startTS) + } else { + pairs = e.mvccStore.Scan(e.seekKey, ran.EndKey, 1, e.startTS) + } + if len(pairs) > 0 { + pair = pairs[0] + } + if pair.Err != nil { + // TODO: Handle lock error. + return nil, errors.Trace(pair.Err) + } + if pair.Key == nil { + return nil, nil + } + if e.Desc { + if bytes.Compare(pair.Key, ran.StartKey) < 0 { + return nil, nil + } + e.seekKey = pair.Key + } else { + if bytes.Compare(pair.Key, ran.EndKey) >= 0 { + return nil, nil + } + e.seekKey = []byte(kv.Key(pair.Key).PrefixNext()) + } + + return tablecodec.DecodeIndexKV(pair.Key, pair.Value, e.colsLen, e.pkStatus) +} + +type selectionExec struct { + conditions []expression.Expression + relatedColOffsets []int + row []types.Datum + evalCtx *evalContext + src executor +} + +func (e *selectionExec) SetSrcExec(exec executor) { + e.src = exec +} + +func (e *selectionExec) GetSrcExec() executor { + return e.src +} + +func (e *selectionExec) ResetCounts() { + e.src.ResetCounts() +} + +func (e *selectionExec) Counts() []int64 { + return e.src.Counts() +} + +// evalBool evaluates expression to a boolean value. +func evalBool(exprs []expression.Expression, row []types.Datum, ctx *stmtctx.StatementContext) (bool, error) { + for _, expr := range exprs { + data, err := expr.Eval(chunk.MutRowFromDatums(row).ToRow()) + if err != nil { + return false, errors.Trace(err) + } + if data.IsNull() { + return false, nil + } + + isBool, err := data.ToBool(ctx) + if err != nil { + return false, errors.Trace(err) + } + if isBool == 0 { + return false, nil + } + } + return true, nil +} + +func (e *selectionExec) Next(ctx context.Context) (value [][]byte, err error) { + for { + value, err = e.src.Next(ctx) + if err != nil { + return nil, errors.Trace(err) + } + if value == nil { + return nil, nil + } + + err = e.evalCtx.decodeRelatedColumnVals(e.relatedColOffsets, value, e.row) + if err != nil { + return nil, errors.Trace(err) + } + match, err := evalBool(e.conditions, e.row, e.evalCtx.sc) + if err != nil { + return nil, errors.Trace(err) + } + if match { + return value, nil + } + } +} + +type topNExec struct { + heap *topNHeap + evalCtx *evalContext + relatedColOffsets []int + orderByExprs []expression.Expression + row []types.Datum + cursor int + executed bool + + src executor +} + +func (e *topNExec) SetSrcExec(src executor) { + e.src = src +} + +func (e *topNExec) GetSrcExec() executor { + return e.src +} + +func (e *topNExec) ResetCounts() { + e.src.ResetCounts() +} + +func (e *topNExec) Counts() []int64 { + return e.src.Counts() +} + +func (e *topNExec) innerNext(ctx context.Context) (bool, error) { + value, err := e.src.Next(ctx) + if err != nil { + return false, errors.Trace(err) + } + if value == nil { + return false, nil + } + err = e.evalTopN(value) + if err != nil { + return false, errors.Trace(err) + } + return true, nil +} + +func (e *topNExec) Next(ctx context.Context) (value [][]byte, err error) { + if !e.executed { + for { + hasMore, err := e.innerNext(ctx) + if err != nil { + return nil, errors.Trace(err) + } + if !hasMore { + break + } + } + e.executed = true + } + if e.cursor >= len(e.heap.rows) { + return nil, nil + } + sort.Sort(&e.heap.topNSorter) + row := e.heap.rows[e.cursor] + e.cursor++ + + return row.data, nil +} + +// evalTopN evaluates the top n elements from the data. The input receives a record including its handle and data. +// And this function will check if this record can replace one of the old records. +func (e *topNExec) evalTopN(value [][]byte) error { + newRow := &sortRow{ + key: make([]types.Datum, len(value)), + } + err := e.evalCtx.decodeRelatedColumnVals(e.relatedColOffsets, value, e.row) + if err != nil { + return errors.Trace(err) + } + for i, expr := range e.orderByExprs { + newRow.key[i], err = expr.Eval(chunk.MutRowFromDatums(e.row).ToRow()) + if err != nil { + return errors.Trace(err) + } + } + + if e.heap.tryToAddRow(newRow) { + newRow.data = append(newRow.data, value...) + } + return errors.Trace(e.heap.err) +} + +type limitExec struct { + limit uint64 + cursor uint64 + + src executor +} + +func (e *limitExec) SetSrcExec(src executor) { + e.src = src +} + +func (e *limitExec) GetSrcExec() executor { + return e.src +} + +func (e *limitExec) ResetCounts() { + e.src.ResetCounts() +} + +func (e *limitExec) Counts() []int64 { + return e.src.Counts() +} + +func (e *limitExec) Next(ctx context.Context) (value [][]byte, err error) { + if e.cursor >= e.limit { + return nil, nil + } + + value, err = e.src.Next(ctx) + if err != nil { + return nil, errors.Trace(err) + } + if value == nil { + return nil, nil + } + e.cursor++ + return value, nil +} + +// getRowData decodes raw byte slice to row data. +func getRowData( + columns []*tipb.ColumnInfo, + colIDs map[int64]int, + handle int64, + value []byte, + rd *rowcodec.BytesDecoder, +) ([][]byte, error) { + return rd.DecodeToBytes(colIDs, handle, value, nil) +} + +func convertToExprs(sc *stmtctx.StatementContext, fieldTps []*types.FieldType, pbExprs []*tipb.Expr) ([]expression.Expression, error) { + exprs := make([]expression.Expression, 0, len(pbExprs)) + for _, expr := range pbExprs { + e, err := expression.PBToExpr(expr, fieldTps, sc) + if err != nil { + return nil, errors.Trace(err) + } + exprs = append(exprs, e) + } + return exprs, nil +} diff --git a/store/mockstore/mocktikv/mock.go b/store/mockstore/mocktikv/mock.go new file mode 100644 index 0000000..95fa1c8 --- /dev/null +++ b/store/mockstore/mocktikv/mock.go @@ -0,0 +1,37 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mocktikv + +import ( + "github.com/pingcap-incubator/tinykv/scheduler/client" + "github.com/pingcap/errors" +) + +// NewTiKVAndPDClient creates a TiKV client and PD client from options. +func NewTiKVAndPDClient(cluster *Cluster, mvccStore MVCCStore, path string) (*RPCClient, pd.Client, error) { + if cluster == nil { + cluster = NewCluster() + BootstrapWithSingleStore(cluster) + } + + if mvccStore == nil { + var err error + mvccStore, err = NewMVCCLevelDB(path) + if err != nil { + return nil, nil, errors.Trace(err) + } + } + + return NewRPCClient(cluster, mvccStore), NewPDClient(cluster), nil +} diff --git a/store/mockstore/mocktikv/mock_tikv_test.go b/store/mockstore/mocktikv/mock_tikv_test.go new file mode 100644 index 0000000..bb6663c --- /dev/null +++ b/store/mockstore/mocktikv/mock_tikv_test.go @@ -0,0 +1,681 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mocktikv + +import ( + "math" + "testing" + + "github.com/pingcap-incubator/tinykv/proto/pkg/kvrpcpb" + . "github.com/pingcap/check" +) + +func TestT(t *testing.T) { + TestingT(t) +} + +// testMockTiKVSuite tests MVCCStore interface. +// SetUpTest should set specific MVCCStore implementation. +type testMockTiKVSuite struct { + store MVCCStore +} + +type testMarshal struct{} + +// testMVCCLevelDB is used to test MVCCLevelDB implementation. +type testMVCCLevelDB struct { + testMockTiKVSuite +} + +var ( + _ = Suite(&testMockTiKVSuite{}) + _ = Suite(&testMVCCLevelDB{}) + _ = Suite(testMarshal{}) +) + +func (s *testMockTiKVSuite) SetUpTest(c *C) { + var err error + s.store, err = NewMVCCLevelDB("") + c.Assert(err, IsNil) +} + +func putMutations(kvpairs ...string) []*kvrpcpb.Mutation { + var mutations []*kvrpcpb.Mutation + for i := 0; i < len(kvpairs); i += 2 { + mutations = append(mutations, &kvrpcpb.Mutation{ + Op: kvrpcpb.Op_Put, + Key: []byte(kvpairs[i]), + Value: []byte(kvpairs[i+1]), + }) + } + return mutations +} + +func lock(key, primary string, ts uint64) *kvrpcpb.LockInfo { + return &kvrpcpb.LockInfo{ + Key: []byte(key), + PrimaryLock: []byte(primary), + LockVersion: ts, + } +} + +func (s *testMockTiKVSuite) mustGetNone(c *C, key string, ts uint64) { + val, err := s.store.Get([]byte(key), ts) + c.Assert(err, IsNil) + c.Assert(val, IsNil) +} + +func (s *testMockTiKVSuite) mustGetErr(c *C, key string, ts uint64) { + val, err := s.store.Get([]byte(key), ts) + c.Assert(err, NotNil) + c.Assert(val, IsNil) +} + +func (s *testMockTiKVSuite) mustGetOK(c *C, key string, ts uint64, expect string) { + val, err := s.store.Get([]byte(key), ts) + c.Assert(err, IsNil) + c.Assert(string(val), Equals, expect) +} + +func (s *testMockTiKVSuite) mustPutOK(c *C, key, value string, startTS, commitTS uint64) { + req := &kvrpcpb.PrewriteRequest{ + Mutations: putMutations(key, value), + PrimaryLock: []byte(key), + StartVersion: startTS, + } + errs := s.store.Prewrite(req) + for _, err := range errs { + c.Assert(err, IsNil) + } + err := s.store.Commit([][]byte{[]byte(key)}, startTS, commitTS) + c.Assert(err, IsNil) +} + +func (s *testMockTiKVSuite) mustDeleteOK(c *C, key string, startTS, commitTS uint64) { + mutations := []*kvrpcpb.Mutation{ + { + Op: kvrpcpb.Op_Del, + Key: []byte(key), + }, + } + req := &kvrpcpb.PrewriteRequest{ + Mutations: mutations, + PrimaryLock: []byte(key), + StartVersion: startTS, + } + errs := s.store.Prewrite(req) + for _, err := range errs { + c.Assert(err, IsNil) + } + err := s.store.Commit([][]byte{[]byte(key)}, startTS, commitTS) + c.Assert(err, IsNil) +} + +func (s *testMockTiKVSuite) mustScanOK(c *C, start string, limit int, ts uint64, expect ...string) { + s.mustRangeScanOK(c, start, "", limit, ts, expect...) +} + +func (s *testMockTiKVSuite) mustRangeScanOK(c *C, start, end string, limit int, ts uint64, expect ...string) { + pairs := s.store.Scan([]byte(start), []byte(end), limit, ts) + c.Assert(len(pairs)*2, Equals, len(expect)) + for i := 0; i < len(pairs); i++ { + c.Assert(pairs[i].Err, IsNil) + c.Assert(pairs[i].Key, BytesEquals, []byte(expect[i*2])) + c.Assert(string(pairs[i].Value), Equals, expect[i*2+1]) + } +} + +func (s *testMockTiKVSuite) mustReverseScanOK(c *C, end string, limit int, ts uint64, expect ...string) { + s.mustRangeReverseScanOK(c, "", end, limit, ts, expect...) +} + +func (s *testMockTiKVSuite) mustRangeReverseScanOK(c *C, start, end string, limit int, ts uint64, expect ...string) { + pairs := s.store.ReverseScan([]byte(start), []byte(end), limit, ts) + c.Assert(len(pairs)*2, Equals, len(expect)) + for i := 0; i < len(pairs); i++ { + c.Assert(pairs[i].Err, IsNil) + c.Assert(pairs[i].Key, BytesEquals, []byte(expect[i*2])) + c.Assert(string(pairs[i].Value), Equals, expect[i*2+1]) + } +} + +func (s *testMockTiKVSuite) mustPrewriteOK(c *C, mutations []*kvrpcpb.Mutation, primary string, startTS uint64) { + s.mustPrewriteWithTTLOK(c, mutations, primary, startTS, 0) +} + +func (s *testMockTiKVSuite) mustPrewriteWithTTLOK(c *C, mutations []*kvrpcpb.Mutation, primary string, startTS uint64, ttl uint64) { + req := &kvrpcpb.PrewriteRequest{ + Mutations: mutations, + PrimaryLock: []byte(primary), + StartVersion: startTS, + LockTtl: ttl, + } + errs := s.store.Prewrite(req) + for _, err := range errs { + c.Assert(err, IsNil) + } +} + +func (s *testMockTiKVSuite) mustCommitOK(c *C, keys [][]byte, startTS, commitTS uint64) { + err := s.store.Commit(keys, startTS, commitTS) + c.Assert(err, IsNil) +} + +func (s *testMockTiKVSuite) mustCommitErr(c *C, keys [][]byte, startTS, commitTS uint64) { + err := s.store.Commit(keys, startTS, commitTS) + c.Assert(err, NotNil) +} + +func (s *testMockTiKVSuite) mustRollbackOK(c *C, keys [][]byte, startTS uint64) { + err := s.store.Rollback(keys, startTS) + c.Assert(err, IsNil) +} + +func (s *testMockTiKVSuite) mustRollbackErr(c *C, keys [][]byte, startTS uint64) { + err := s.store.Rollback(keys, startTS) + c.Assert(err, NotNil) +} + +func (s *testMockTiKVSuite) mustScanLock(c *C, maxTs uint64, expect []*kvrpcpb.LockInfo) { + locks, err := s.store.ScanLock(nil, nil, maxTs) + c.Assert(err, IsNil) + c.Assert(locks, DeepEquals, expect) +} + +func (s *testMockTiKVSuite) mustResolveLock(c *C, startTS, commitTS uint64) { + c.Assert(s.store.ResolveLock(nil, nil, startTS, commitTS), IsNil) +} + +func (s *testMockTiKVSuite) mustBatchResolveLock(c *C, txnInfos map[uint64]uint64) { + c.Assert(s.store.BatchResolveLock(nil, nil, txnInfos), IsNil) +} + +func (s *testMockTiKVSuite) mustGC(c *C, safePoint uint64) { + c.Assert(s.store.GC(nil, nil, safePoint), IsNil) +} + +func (s *testMockTiKVSuite) mustDeleteRange(c *C, startKey, endKey string) { + err := s.store.DeleteRange([]byte(startKey), []byte(endKey)) + c.Assert(err, IsNil) +} + +func (s *testMockTiKVSuite) TestGet(c *C) { + s.mustGetNone(c, "x", 10) + s.mustPutOK(c, "x", "x", 5, 10) + s.mustGetNone(c, "x", 9) + s.mustGetOK(c, "x", 10, "x") + s.mustGetOK(c, "x", 11, "x") +} + +func (s *testMockTiKVSuite) TestGetWithLock(c *C) { + key := "key" + value := "value" + s.mustPutOK(c, key, value, 5, 10) + mutations := []*kvrpcpb.Mutation{{ + Op: kvrpcpb.Op_Lock, + Key: []byte(key), + }, + } + // test with lock's type is lock + s.mustPrewriteOK(c, mutations, key, 20) + s.mustGetOK(c, key, 25, value) + s.mustCommitOK(c, [][]byte{[]byte(key)}, 20, 30) + + // test get with lock's max ts and primary key + s.mustPrewriteOK(c, putMutations(key, "value2", "key2", "v5"), key, 40) + s.mustGetErr(c, key, 41) + s.mustGetErr(c, "key2", math.MaxUint64) + s.mustGetOK(c, key, math.MaxUint64, "value") +} + +func (s *testMockTiKVSuite) TestDelete(c *C) { + s.mustPutOK(c, "x", "x5-10", 5, 10) + s.mustDeleteOK(c, "x", 15, 20) + s.mustGetNone(c, "x", 5) + s.mustGetNone(c, "x", 9) + s.mustGetOK(c, "x", 10, "x5-10") + s.mustGetOK(c, "x", 19, "x5-10") + s.mustGetNone(c, "x", 20) + s.mustGetNone(c, "x", 21) +} + +func (s *testMockTiKVSuite) TestCleanupRollback(c *C) { + s.mustPutOK(c, "secondary", "s-0", 1, 2) + s.mustPrewriteOK(c, putMutations("primary", "p-5", "secondary", "s-5"), "primary", 5) + s.mustGetErr(c, "secondary", 8) + s.mustGetErr(c, "secondary", 12) + s.mustCommitOK(c, [][]byte{[]byte("primary")}, 5, 10) + s.mustRollbackErr(c, [][]byte{[]byte("primary")}, 5) +} + +func (s *testMockTiKVSuite) TestReverseScan(c *C) { + // ver10: A(10) - B(_) - C(10) - D(_) - E(10) + s.mustPutOK(c, "A", "A10", 5, 10) + s.mustPutOK(c, "C", "C10", 5, 10) + s.mustPutOK(c, "E", "E10", 5, 10) + + checkV10 := func() { + s.mustReverseScanOK(c, "Z", 0, 10) + s.mustReverseScanOK(c, "Z", 1, 10, "E", "E10") + s.mustReverseScanOK(c, "Z", 2, 10, "E", "E10", "C", "C10") + s.mustReverseScanOK(c, "Z", 3, 10, "E", "E10", "C", "C10", "A", "A10") + s.mustReverseScanOK(c, "Z", 4, 10, "E", "E10", "C", "C10", "A", "A10") + s.mustReverseScanOK(c, "E\x00", 3, 10, "E", "E10", "C", "C10", "A", "A10") + s.mustReverseScanOK(c, "C\x00", 3, 10, "C", "C10", "A", "A10") + s.mustReverseScanOK(c, "C\x00", 4, 10, "C", "C10", "A", "A10") + s.mustReverseScanOK(c, "B", 1, 10, "A", "A10") + s.mustRangeReverseScanOK(c, "", "E", 5, 10, "C", "C10", "A", "A10") + s.mustRangeReverseScanOK(c, "", "C\x00", 5, 10, "C", "C10", "A", "A10") + s.mustRangeReverseScanOK(c, "A\x00", "C", 5, 10) + } + checkV10() + + // ver20: A(10) - B(20) - C(10) - D(20) - E(10) + s.mustPutOK(c, "B", "B20", 15, 20) + s.mustPutOK(c, "D", "D20", 15, 20) + + checkV20 := func() { + s.mustReverseScanOK(c, "Z", 5, 20, "E", "E10", "D", "D20", "C", "C10", "B", "B20", "A", "A10") + s.mustReverseScanOK(c, "C\x00", 5, 20, "C", "C10", "B", "B20", "A", "A10") + s.mustReverseScanOK(c, "A\x00", 1, 20, "A", "A10") + s.mustRangeReverseScanOK(c, "B", "D", 5, 20, "C", "C10", "B", "B20") + s.mustRangeReverseScanOK(c, "B", "D\x00", 5, 20, "D", "D20", "C", "C10", "B", "B20") + s.mustRangeReverseScanOK(c, "B\x00", "D\x00", 5, 20, "D", "D20", "C", "C10") + } + checkV10() + checkV20() + + // ver30: A(_) - B(20) - C(10) - D(_) - E(10) + s.mustDeleteOK(c, "A", 25, 30) + s.mustDeleteOK(c, "D", 25, 30) + + checkV30 := func() { + s.mustReverseScanOK(c, "Z", 5, 30, "E", "E10", "C", "C10", "B", "B20") + s.mustReverseScanOK(c, "C", 1, 30, "B", "B20") + s.mustReverseScanOK(c, "C\x00", 5, 30, "C", "C10", "B", "B20") + } + checkV10() + checkV20() + checkV30() + + // ver40: A(_) - B(_) - C(40) - D(40) - E(10) + s.mustDeleteOK(c, "B", 35, 40) + s.mustPutOK(c, "C", "C40", 35, 40) + s.mustPutOK(c, "D", "D40", 35, 40) + + checkV40 := func() { + s.mustReverseScanOK(c, "Z", 5, 40, "E", "E10", "D", "D40", "C", "C40") + s.mustReverseScanOK(c, "Z", 5, 100, "E", "E10", "D", "D40", "C", "C40") + } + checkV10() + checkV20() + checkV30() + checkV40() +} + +func (s *testMockTiKVSuite) TestScan(c *C) { + // ver10: A(10) - B(_) - C(10) - D(_) - E(10) + s.mustPutOK(c, "A", "A10", 5, 10) + s.mustPutOK(c, "C", "C10", 5, 10) + s.mustPutOK(c, "E", "E10", 5, 10) + + checkV10 := func() { + s.mustScanOK(c, "", 0, 10) + s.mustScanOK(c, "", 1, 10, "A", "A10") + s.mustScanOK(c, "", 2, 10, "A", "A10", "C", "C10") + s.mustScanOK(c, "", 3, 10, "A", "A10", "C", "C10", "E", "E10") + s.mustScanOK(c, "", 4, 10, "A", "A10", "C", "C10", "E", "E10") + s.mustScanOK(c, "A", 3, 10, "A", "A10", "C", "C10", "E", "E10") + s.mustScanOK(c, "A\x00", 3, 10, "C", "C10", "E", "E10") + s.mustScanOK(c, "C", 4, 10, "C", "C10", "E", "E10") + s.mustScanOK(c, "F", 1, 10) + s.mustRangeScanOK(c, "", "E", 5, 10, "A", "A10", "C", "C10") + s.mustRangeScanOK(c, "", "C\x00", 5, 10, "A", "A10", "C", "C10") + s.mustRangeScanOK(c, "A\x00", "C", 5, 10) + } + checkV10() + + // ver20: A(10) - B(20) - C(10) - D(20) - E(10) + s.mustPutOK(c, "B", "B20", 15, 20) + s.mustPutOK(c, "D", "D20", 15, 20) + + checkV20 := func() { + s.mustScanOK(c, "", 5, 20, "A", "A10", "B", "B20", "C", "C10", "D", "D20", "E", "E10") + s.mustScanOK(c, "C", 5, 20, "C", "C10", "D", "D20", "E", "E10") + s.mustScanOK(c, "D\x00", 1, 20, "E", "E10") + s.mustRangeScanOK(c, "B", "D", 5, 20, "B", "B20", "C", "C10") + s.mustRangeScanOK(c, "B", "D\x00", 5, 20, "B", "B20", "C", "C10", "D", "D20") + s.mustRangeScanOK(c, "B\x00", "D\x00", 5, 20, "C", "C10", "D", "D20") + } + checkV10() + checkV20() + + // ver30: A(_) - B(20) - C(10) - D(_) - E(10) + s.mustDeleteOK(c, "A", 25, 30) + s.mustDeleteOK(c, "D", 25, 30) + + checkV30 := func() { + s.mustScanOK(c, "", 5, 30, "B", "B20", "C", "C10", "E", "E10") + s.mustScanOK(c, "A", 1, 30, "B", "B20") + s.mustScanOK(c, "C\x00", 5, 30, "E", "E10") + } + checkV10() + checkV20() + checkV30() + + // ver40: A(_) - B(_) - C(40) - D(40) - E(10) + s.mustDeleteOK(c, "B", 35, 40) + s.mustPutOK(c, "C", "C40", 35, 40) + s.mustPutOK(c, "D", "D40", 35, 40) + + checkV40 := func() { + s.mustScanOK(c, "", 5, 40, "C", "C40", "D", "D40", "E", "E10") + s.mustScanOK(c, "", 5, 100, "C", "C40", "D", "D40", "E", "E10") + } + checkV10() + checkV20() + checkV30() + checkV40() +} + +func (s *testMockTiKVSuite) TestScanLock(c *C) { + s.mustPutOK(c, "k1", "v1", 1, 2) + s.mustPrewriteOK(c, putMutations("p1", "v5", "s1", "v5"), "p1", 5) + s.mustPrewriteOK(c, putMutations("p2", "v10", "s2", "v10"), "p2", 10) + s.mustPrewriteOK(c, putMutations("p3", "v20", "s3", "v20"), "p3", 20) + + locks, err := s.store.ScanLock([]byte("a"), []byte("r"), 12) + c.Assert(err, IsNil) + c.Assert(locks, DeepEquals, []*kvrpcpb.LockInfo{ + lock("p1", "p1", 5), + lock("p2", "p2", 10), + }) + + s.mustScanLock(c, 10, []*kvrpcpb.LockInfo{ + lock("p1", "p1", 5), + lock("p2", "p2", 10), + lock("s1", "p1", 5), + lock("s2", "p2", 10), + }) +} + +func (s *testMockTiKVSuite) TestCommitConflict(c *C) { + // txn A want set x to A + // txn B want set x to B + // A prewrite. + s.mustPrewriteOK(c, putMutations("x", "A"), "x", 5) + // B prewrite and find A's lock. + req := &kvrpcpb.PrewriteRequest{ + Mutations: putMutations("x", "B"), + PrimaryLock: []byte("x"), + StartVersion: 10, + } + errs := s.store.Prewrite(req) + c.Assert(errs[0], NotNil) + // B find rollback A because A exist too long. + s.mustRollbackOK(c, [][]byte{[]byte("x")}, 5) + // if A commit here, it would find its lock removed, report error txn not found. + s.mustCommitErr(c, [][]byte{[]byte("x")}, 5, 10) + // B prewrite itself after it rollback A. + s.mustPrewriteOK(c, putMutations("x", "B"), "x", 10) + // if A commit here, it would find its lock replaced by others and commit fail. + s.mustCommitErr(c, [][]byte{[]byte("x")}, 5, 20) + // B commit success. + s.mustCommitOK(c, [][]byte{[]byte("x")}, 10, 20) + // if B commit again, it will success because the key already committed. + s.mustCommitOK(c, [][]byte{[]byte("x")}, 10, 20) +} + +func (s *testMockTiKVSuite) TestResolveLock(c *C) { + s.mustPrewriteOK(c, putMutations("p1", "v5", "s1", "v5"), "p1", 5) + s.mustPrewriteOK(c, putMutations("p2", "v10", "s2", "v10"), "p2", 10) + s.mustResolveLock(c, 5, 0) + s.mustResolveLock(c, 10, 20) + s.mustGetNone(c, "p1", 20) + s.mustGetNone(c, "s1", 30) + s.mustGetOK(c, "p2", 20, "v10") + s.mustGetOK(c, "s2", 30, "v10") + s.mustScanLock(c, 30, nil) +} + +func (s *testMockTiKVSuite) TestBatchResolveLock(c *C) { + s.mustPrewriteOK(c, putMutations("p1", "v11", "s1", "v11"), "p1", 11) + s.mustPrewriteOK(c, putMutations("p2", "v12", "s2", "v12"), "p2", 12) + s.mustPrewriteOK(c, putMutations("p3", "v13"), "p3", 13) + s.mustPrewriteOK(c, putMutations("p4", "v14", "s3", "v14", "s4", "v14"), "p4", 14) + s.mustPrewriteOK(c, putMutations("p5", "v15", "s5", "v15"), "p5", 15) + txnInfos := map[uint64]uint64{ + 11: 0, + 12: 22, + 13: 0, + 14: 24, + } + s.mustBatchResolveLock(c, txnInfos) + s.mustGetNone(c, "p1", 20) + s.mustGetNone(c, "p3", 30) + s.mustGetOK(c, "p2", 30, "v12") + s.mustGetOK(c, "s4", 30, "v14") + s.mustScanLock(c, 30, []*kvrpcpb.LockInfo{ + lock("p5", "p5", 15), + lock("s5", "p5", 15), + }) + txnInfos = map[uint64]uint64{ + 15: 0, + } + s.mustBatchResolveLock(c, txnInfos) + s.mustScanLock(c, 30, nil) +} + +func (s *testMockTiKVSuite) TestGC(c *C) { + var safePoint uint64 = 100 + + // Prepare data + s.mustPutOK(c, "k1", "v1", 1, 2) + s.mustPutOK(c, "k1", "v2", 11, 12) + + s.mustPutOK(c, "k2", "v1", 1, 2) + s.mustPutOK(c, "k2", "v2", 11, 12) + s.mustPutOK(c, "k2", "v3", 101, 102) + + s.mustPutOK(c, "k3", "v1", 1, 2) + s.mustPutOK(c, "k3", "v2", 11, 12) + s.mustDeleteOK(c, "k3", 101, 102) + + s.mustPutOK(c, "k4", "v1", 1, 2) + s.mustDeleteOK(c, "k4", 11, 12) + + // Check prepared data + s.mustGetOK(c, "k1", 5, "v1") + s.mustGetOK(c, "k1", 15, "v2") + s.mustGetOK(c, "k2", 5, "v1") + s.mustGetOK(c, "k2", 15, "v2") + s.mustGetOK(c, "k2", 105, "v3") + s.mustGetOK(c, "k3", 5, "v1") + s.mustGetOK(c, "k3", 15, "v2") + s.mustGetNone(c, "k3", 105) + s.mustGetOK(c, "k4", 5, "v1") + s.mustGetNone(c, "k4", 105) + + s.mustGC(c, safePoint) + + s.mustGetNone(c, "k1", 5) + s.mustGetOK(c, "k1", 15, "v2") + s.mustGetNone(c, "k2", 5) + s.mustGetOK(c, "k2", 15, "v2") + s.mustGetOK(c, "k2", 105, "v3") + s.mustGetNone(c, "k3", 5) + s.mustGetOK(c, "k3", 15, "v2") + s.mustGetNone(c, "k3", 105) + s.mustGetNone(c, "k4", 5) + s.mustGetNone(c, "k4", 105) +} + +func (s *testMockTiKVSuite) TestRollbackAndWriteConflict(c *C) { + s.mustPutOK(c, "test", "test", 1, 3) + req := &kvrpcpb.PrewriteRequest{ + Mutations: putMutations("lock", "lock", "test", "test1"), + PrimaryLock: []byte("test"), + StartVersion: 2, + LockTtl: 2, + } + errs := s.store.Prewrite(req) + s.mustWriteWriteConflict(c, errs, 1) + + s.mustPutOK(c, "test", "test2", 5, 8) + + // simulate `getTxnStatus` for txn 2. + err := s.store.Cleanup([]byte("test"), 2, math.MaxUint64) + c.Assert(err, IsNil) + req = &kvrpcpb.PrewriteRequest{ + Mutations: putMutations("test", "test3"), + PrimaryLock: []byte("test"), + StartVersion: 6, + LockTtl: 1, + } + errs = s.store.Prewrite(req) + s.mustWriteWriteConflict(c, errs, 0) +} + +func (s *testMockTiKVSuite) TestDeleteRange(c *C) { + for i := 1; i <= 5; i++ { + key := string(byte(i) + byte('0')) + value := "v" + key + s.mustPutOK(c, key, value, uint64(1+2*i), uint64(2+2*i)) + } + + s.mustScanOK(c, "0", 10, 20, "1", "v1", "2", "v2", "3", "v3", "4", "v4", "5", "v5") + + s.mustDeleteRange(c, "2", "4") + s.mustScanOK(c, "0", 10, 30, "1", "v1", "4", "v4", "5", "v5") + + s.mustDeleteRange(c, "5", "5") + s.mustScanOK(c, "0", 10, 40, "1", "v1", "4", "v4", "5", "v5") + + s.mustDeleteRange(c, "41", "42") + s.mustScanOK(c, "0", 10, 50, "1", "v1", "4", "v4", "5", "v5") + + s.mustDeleteRange(c, "4\x00", "5\x00") + s.mustScanOK(c, "0", 10, 60, "1", "v1", "4", "v4") + + s.mustDeleteRange(c, "0", "9") + s.mustScanOK(c, "0", 10, 70) +} + +func (s *testMockTiKVSuite) mustWriteWriteConflict(c *C, errs []error, i int) { + c.Assert(errs[i], NotNil) + _, ok := errs[i].(*ErrConflict) + c.Assert(ok, IsTrue) +} + +func (s testMarshal) TestMarshalmvccLock(c *C) { + l := mvccLock{ + startTS: 47, + primary: []byte{'a', 'b', 'c'}, + value: []byte{'d', 'e'}, + op: kvrpcpb.Op_Put, + ttl: 444, + } + bin, err := l.MarshalBinary() + c.Assert(err, IsNil) + + var l1 mvccLock + err = l1.UnmarshalBinary(bin) + c.Assert(err, IsNil) + + c.Assert(l.startTS, Equals, l1.startTS) + c.Assert(l.op, Equals, l1.op) + c.Assert(l.ttl, Equals, l1.ttl) + c.Assert(string(l.primary), Equals, string(l1.primary)) + c.Assert(string(l.value), Equals, string(l1.value)) +} + +func (s testMarshal) TestMarshalmvccValue(c *C) { + v := mvccValue{ + valueType: typePut, + startTS: 42, + commitTS: 55, + value: []byte{'d', 'e'}, + } + bin, err := v.MarshalBinary() + c.Assert(err, IsNil) + + var v1 mvccValue + err = v1.UnmarshalBinary(bin) + c.Assert(err, IsNil) + + c.Assert(v.valueType, Equals, v1.valueType) + c.Assert(v.startTS, Equals, v1.startTS) + c.Assert(v.commitTS, Equals, v1.commitTS) + c.Assert(string(v.value), Equals, string(v.value)) +} + +func (s *testMVCCLevelDB) TestErrors(c *C) { + c.Assert((&ErrKeyAlreadyExist{}).Error(), Equals, `key already exist, key: ""`) + c.Assert(ErrAbort("txn").Error(), Equals, "abort: txn") + c.Assert(ErrAlreadyCommitted(0).Error(), Equals, "txn already committed") + c.Assert((&ErrConflict{}).Error(), Equals, "write conflict") +} + +func (s *testMVCCLevelDB) TestCheckTxnStatus(c *C) { + startTS := uint64(5 << 18) + s.mustPrewriteWithTTLOK(c, putMutations("pk", "val"), "pk", startTS, 666) + + ttl, commitTS, _, err := s.store.CheckTxnStatus([]byte("pk"), startTS, 666) + c.Assert(err, IsNil) + c.Assert(ttl, Equals, uint64(666)) + c.Assert(commitTS, Equals, uint64(0)) + + s.mustCommitOK(c, [][]byte{[]byte("pk")}, startTS, startTS+101) + + ttl, commitTS, _, err = s.store.CheckTxnStatus([]byte("pk"), startTS, 666) + c.Assert(err, IsNil) + c.Assert(ttl, Equals, uint64(0)) + c.Assert(commitTS, Equals, uint64(startTS+101)) + + s.mustPrewriteWithTTLOK(c, putMutations("pk1", "val"), "pk1", startTS, 666) + s.mustRollbackOK(c, [][]byte{[]byte("pk1")}, startTS) + + ttl, commitTS, action, err := s.store.CheckTxnStatus([]byte("pk1"), startTS, 666) + c.Assert(err, IsNil) + c.Assert(ttl, Equals, uint64(0)) + c.Assert(commitTS, Equals, uint64(0)) + c.Assert(action, Equals, kvrpcpb.Action_NoAction) + + s.mustPrewriteWithTTLOK(c, putMutations("pk2", "val"), "pk2", startTS, 666) + currentTS := uint64(777 << 18) + ttl, commitTS, action, err = s.store.CheckTxnStatus([]byte("pk2"), startTS, currentTS) + c.Assert(err, IsNil) + c.Assert(ttl, Equals, uint64(0)) + c.Assert(commitTS, Equals, uint64(0)) + c.Assert(action, Equals, kvrpcpb.Action_TTLExpireRollback) +} + +func (s *testMVCCLevelDB) TestTxnHeartBeat(c *C) { + s.mustPrewriteWithTTLOK(c, putMutations("pk", "val"), "pk", 5, 666) + + // Update the ttl + ttl, err := s.store.TxnHeartBeat([]byte("pk"), 5, 888) + c.Assert(err, IsNil) + c.Assert(ttl, Greater, uint64(666)) + + // Advise ttl is small + ttl, err = s.store.TxnHeartBeat([]byte("pk"), 5, 300) + c.Assert(err, IsNil) + c.Assert(ttl, Greater, uint64(300)) + + // The lock has already been clean up + c.Assert(s.store.Cleanup([]byte("pk"), 5, math.MaxUint64), IsNil) + _, err = s.store.TxnHeartBeat([]byte("pk"), 5, 1000) + c.Assert(err, NotNil) +} diff --git a/store/mockstore/mocktikv/mvcc.go b/store/mockstore/mocktikv/mvcc.go new file mode 100644 index 0000000..db05c7a --- /dev/null +++ b/store/mockstore/mocktikv/mvcc.go @@ -0,0 +1,295 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mocktikv + +import ( + "bytes" + "encoding/binary" + "io" + "math" + + "github.com/google/btree" + "github.com/pingcap-incubator/tinykv/proto/pkg/kvrpcpb" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/util/codec" +) + +type mvccValueType int + +const ( + typePut mvccValueType = iota + typeDelete + typeRollback +) + +type mvccValue struct { + valueType mvccValueType + startTS uint64 + commitTS uint64 + value []byte +} + +type mvccLock struct { + startTS uint64 + primary []byte + value []byte + op kvrpcpb.Op + ttl uint64 + forUpdateTS uint64 +} + +type mvccEntry struct { + key MvccKey + values []mvccValue + lock *mvccLock +} + +// MarshalBinary implements encoding.BinaryMarshaler interface. +func (l *mvccLock) MarshalBinary() ([]byte, error) { + var ( + mh marshalHelper + buf bytes.Buffer + ) + mh.WriteNumber(&buf, l.startTS) + mh.WriteSlice(&buf, l.primary) + mh.WriteSlice(&buf, l.value) + mh.WriteNumber(&buf, l.op) + mh.WriteNumber(&buf, l.ttl) + mh.WriteNumber(&buf, l.forUpdateTS) + return buf.Bytes(), errors.Trace(mh.err) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler interface. +func (l *mvccLock) UnmarshalBinary(data []byte) error { + var mh marshalHelper + buf := bytes.NewBuffer(data) + mh.ReadNumber(buf, &l.startTS) + mh.ReadSlice(buf, &l.primary) + mh.ReadSlice(buf, &l.value) + mh.ReadNumber(buf, &l.op) + mh.ReadNumber(buf, &l.ttl) + mh.ReadNumber(buf, &l.forUpdateTS) + return errors.Trace(mh.err) +} + +// MarshalBinary implements encoding.BinaryMarshaler interface. +func (v mvccValue) MarshalBinary() ([]byte, error) { + var ( + mh marshalHelper + buf bytes.Buffer + ) + mh.WriteNumber(&buf, int64(v.valueType)) + mh.WriteNumber(&buf, v.startTS) + mh.WriteNumber(&buf, v.commitTS) + mh.WriteSlice(&buf, v.value) + return buf.Bytes(), errors.Trace(mh.err) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler interface. +func (v *mvccValue) UnmarshalBinary(data []byte) error { + var mh marshalHelper + buf := bytes.NewBuffer(data) + var vt int64 + mh.ReadNumber(buf, &vt) + v.valueType = mvccValueType(vt) + mh.ReadNumber(buf, &v.startTS) + mh.ReadNumber(buf, &v.commitTS) + mh.ReadSlice(buf, &v.value) + return errors.Trace(mh.err) +} + +type marshalHelper struct { + err error +} + +func (mh *marshalHelper) WriteSlice(buf io.Writer, slice []byte) { + if mh.err != nil { + return + } + var tmp [binary.MaxVarintLen64]byte + off := binary.PutUvarint(tmp[:], uint64(len(slice))) + if err := writeFull(buf, tmp[:off]); err != nil { + mh.err = errors.Trace(err) + return + } + if err := writeFull(buf, slice); err != nil { + mh.err = errors.Trace(err) + } +} + +func (mh *marshalHelper) WriteNumber(buf io.Writer, n interface{}) { + if mh.err != nil { + return + } + err := binary.Write(buf, binary.LittleEndian, n) + if err != nil { + mh.err = errors.Trace(err) + } +} + +func writeFull(w io.Writer, slice []byte) error { + written := 0 + for written < len(slice) { + n, err := w.Write(slice[written:]) + if err != nil { + return errors.Trace(err) + } + written += n + } + return nil +} + +func (mh *marshalHelper) ReadNumber(r io.Reader, n interface{}) { + if mh.err != nil { + return + } + err := binary.Read(r, binary.LittleEndian, n) + if err != nil { + mh.err = errors.Trace(err) + } +} + +func (mh *marshalHelper) ReadSlice(r *bytes.Buffer, slice *[]byte) { + if mh.err != nil { + return + } + sz, err := binary.ReadUvarint(r) + if err != nil { + mh.err = errors.Trace(err) + return + } + const c10M = 10 * 1024 * 1024 + if sz > c10M { + mh.err = errors.New("too large slice, maybe something wrong") + return + } + data := make([]byte, sz) + if _, err := io.ReadFull(r, data); err != nil { + mh.err = errors.Trace(err) + return + } + *slice = data +} + +// lockErr returns ErrLocked. +// Note that parameter key is raw key, while key in ErrLocked is mvcc key. +func (l *mvccLock) lockErr(key []byte) error { + return &ErrLocked{ + Key: mvccEncode(key, lockVer), + Primary: l.primary, + StartTS: l.startTS, + TTL: l.ttl, + LockType: l.op, + } +} + +func (l *mvccLock) check(ts uint64, key []byte) (uint64, error) { + // ignore when ts is older than lock or lock's type is Lock. + if l.startTS > ts || l.op == kvrpcpb.Op_Lock { + return ts, nil + } + // for point get latest version. + if ts == math.MaxUint64 && bytes.Equal(l.primary, key) { + return l.startTS - 1, nil + } + return 0, l.lockErr(key) +} + +func (e *mvccEntry) Less(than btree.Item) bool { + return bytes.Compare(e.key, than.(*mvccEntry).key) < 0 +} + +func (e *mvccEntry) Get(ts uint64) ([]byte, error) { + if e.lock != nil { + var err error + ts, err = e.lock.check(ts, e.key.Raw()) + if err != nil { + return nil, err + } + } + for _, v := range e.values { + if v.commitTS <= ts && v.valueType != typeRollback { + return v.value, nil + } + } + return nil, nil +} + +// MVCCStore is a mvcc key-value storage. +type MVCCStore interface { + Get(key []byte, startTS uint64) ([]byte, error) + Scan(startKey, endKey []byte, limit int, startTS uint64) []Pair + ReverseScan(startKey, endKey []byte, limit int, startTS uint64) []Pair + Prewrite(req *kvrpcpb.PrewriteRequest) []error + Commit(keys [][]byte, startTS, commitTS uint64) error + Rollback(keys [][]byte, startTS uint64) error + Cleanup(key []byte, startTS, currentTS uint64) error + ScanLock(startKey, endKey []byte, maxTS uint64) ([]*kvrpcpb.LockInfo, error) + TxnHeartBeat(primaryKey []byte, startTS uint64, adviseTTL uint64) (uint64, error) + ResolveLock(startKey, endKey []byte, startTS, commitTS uint64) error + BatchResolveLock(startKey, endKey []byte, txnInfos map[uint64]uint64) error + GC(startKey, endKey []byte, safePoint uint64) error + DeleteRange(startKey, endKey []byte) error + CheckTxnStatus(primaryKey []byte, lockTS uint64, currentTS uint64) (uint64, uint64, kvrpcpb.Action, error) + Close() error +} + +// RawKV is a key-value storage. MVCCStore can be implemented upon it with timestamp encoded into key. +type RawKV interface { + RawGet(key []byte) []byte + RawBatchGet(keys [][]byte) [][]byte + RawScan(startKey, endKey []byte, limit int) []Pair // Scan the range of [startKey, endKey) + RawReverseScan(startKey, endKey []byte, limit int) []Pair // Scan the range of [endKey, startKey) + RawPut(key, value []byte) + RawBatchPut(keys, values [][]byte) + RawDelete(key []byte) + RawBatchDelete(keys [][]byte) + RawDeleteRange(startKey, endKey []byte) +} + +// Pair is a KV pair read from MvccStore or an error if any occurs. +type Pair struct { + Key []byte + Value []byte + Err error +} + +func regionContains(startKey []byte, endKey []byte, key []byte) bool { + return bytes.Compare(startKey, key) <= 0 && + (bytes.Compare(key, endKey) < 0 || len(endKey) == 0) +} + +// MvccKey is the encoded key type. +// On TiKV, keys are encoded before they are saved into storage engine. +type MvccKey []byte + +// NewMvccKey encodes a key into MvccKey. +func NewMvccKey(key []byte) MvccKey { + if len(key) == 0 { + return nil + } + return codec.EncodeBytes(nil, key) +} + +// Raw decodes a MvccKey to original key. +func (key MvccKey) Raw() []byte { + if len(key) == 0 { + return nil + } + _, k, err := codec.DecodeBytes(key, nil) + if err != nil { + panic(err) + } + return k +} diff --git a/store/mockstore/mocktikv/mvcc_leveldb.go b/store/mockstore/mocktikv/mvcc_leveldb.go new file mode 100644 index 0000000..d078330 --- /dev/null +++ b/store/mockstore/mocktikv/mvcc_leveldb.go @@ -0,0 +1,1252 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mocktikv + +import ( + "bytes" + "math" + "sync" + + "github.com/pingcap-incubator/tinykv/proto/pkg/kvrpcpb" + "github.com/pingcap/errors" + "github.com/pingcap/goleveldb/leveldb" + "github.com/pingcap/goleveldb/leveldb/iterator" + "github.com/pingcap/goleveldb/leveldb/opt" + "github.com/pingcap/goleveldb/leveldb/storage" + "github.com/pingcap/goleveldb/leveldb/util" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/store/tikv/oracle" + "github.com/pingcap/tidb/util/codec" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +// MVCCLevelDB implements the MVCCStore interface. +type MVCCLevelDB struct { + // Key layout: + // ... + // Key_lock -- (0) + // Key_verMax -- (1) + // ... + // Key_ver+1 -- (2) + // Key_ver -- (3) + // Key_ver-1 -- (4) + // ... + // Key_0 -- (5) + // NextKey_lock -- (6) + // NextKey_verMax -- (7) + // ... + // NextKey_ver+1 -- (8) + // NextKey_ver -- (9) + // NextKey_ver-1 -- (10) + // ... + // NextKey_0 -- (11) + // ... + // EOF + + // db represents leveldb + db *leveldb.DB + // mu used for lock + // leveldb can not guarantee multiple operations to be atomic, for example, read + // then write, another write may happen during it, so this lock is necessory. + mu sync.RWMutex +} + +const lockVer uint64 = math.MaxUint64 + +// ErrInvalidEncodedKey describes parsing an invalid format of EncodedKey. +var ErrInvalidEncodedKey = errors.New("invalid encoded key") + +// mvccEncode returns the encoded key. +func mvccEncode(key []byte, ver uint64) []byte { + b := codec.EncodeBytes(nil, key) + ret := codec.EncodeUintDesc(b, ver) + return ret +} + +// mvccDecode parses the origin key and version of an encoded key, if the encoded key is a meta key, +// just returns the origin key. +func mvccDecode(encodedKey []byte) ([]byte, uint64, error) { + // Skip DataPrefix + remainBytes, key, err := codec.DecodeBytes(encodedKey, nil) + if err != nil { + // should never happen + return nil, 0, errors.Trace(err) + } + // if it's meta key + if len(remainBytes) == 0 { + return key, 0, nil + } + var ver uint64 + remainBytes, ver, err = codec.DecodeUintDesc(remainBytes) + if err != nil { + // should never happen + return nil, 0, errors.Trace(err) + } + if len(remainBytes) != 0 { + return nil, 0, ErrInvalidEncodedKey + } + return key, ver, nil +} + +// MustNewMVCCStore is used for testing, use NewMVCCLevelDB instead. +func MustNewMVCCStore() MVCCStore { + mvccStore, err := NewMVCCLevelDB("") + if err != nil { + panic(err) + } + return mvccStore +} + +// NewMVCCLevelDB returns a new MVCCLevelDB object. +func NewMVCCLevelDB(path string) (*MVCCLevelDB, error) { + var ( + d *leveldb.DB + err error + ) + if path == "" { + d, err = leveldb.Open(storage.NewMemStorage(), nil) + } else { + d, err = leveldb.OpenFile(path, &opt.Options{BlockCacheCapacity: 600 * 1024 * 1024}) + } + + return &MVCCLevelDB{db: d}, errors.Trace(err) +} + +// Iterator wraps iterator.Iterator to provide Valid() method. +type Iterator struct { + iterator.Iterator + valid bool +} + +// Next moves the iterator to the next key/value pair. +func (iter *Iterator) Next() { + iter.valid = iter.Iterator.Next() +} + +// Valid returns whether the iterator is exhausted. +func (iter *Iterator) Valid() bool { + return iter.valid +} + +func newIterator(db *leveldb.DB, slice *util.Range) *Iterator { + iter := &Iterator{db.NewIterator(slice, nil), true} + iter.Next() + return iter +} + +func newScanIterator(db *leveldb.DB, startKey, endKey []byte) (*Iterator, []byte, error) { + var start, end []byte + if len(startKey) > 0 { + start = mvccEncode(startKey, lockVer) + } + if len(endKey) > 0 { + end = mvccEncode(endKey, lockVer) + } + iter := newIterator(db, &util.Range{ + Start: start, + Limit: end, + }) + // newScanIterator must handle startKey is nil, in this case, the real startKey + // should be change the frist key of the store. + if len(startKey) == 0 && iter.Valid() { + key, _, err := mvccDecode(iter.Key()) + if err != nil { + return nil, nil, errors.Trace(err) + } + startKey = key + } + return iter, startKey, nil +} + +type lockDecoder struct { + lock mvccLock + expectKey []byte +} + +// Decode decodes the lock value if current iterator is at expectKey::lock. +func (dec *lockDecoder) Decode(iter *Iterator) (bool, error) { + if iter.Error() != nil || !iter.Valid() { + return false, iter.Error() + } + + iterKey := iter.Key() + key, ver, err := mvccDecode(iterKey) + if err != nil { + return false, errors.Trace(err) + } + if !bytes.Equal(key, dec.expectKey) { + return false, nil + } + if ver != lockVer { + return false, nil + } + + var lock mvccLock + err = lock.UnmarshalBinary(iter.Value()) + if err != nil { + return false, errors.Trace(err) + } + dec.lock = lock + iter.Next() + return true, nil +} + +type valueDecoder struct { + value mvccValue + expectKey []byte +} + +// Decode decodes a mvcc value if iter key is expectKey. +func (dec *valueDecoder) Decode(iter *Iterator) (bool, error) { + if iter.Error() != nil || !iter.Valid() { + return false, iter.Error() + } + + key, ver, err := mvccDecode(iter.Key()) + if err != nil { + return false, errors.Trace(err) + } + if !bytes.Equal(key, dec.expectKey) { + return false, nil + } + if ver == lockVer { + return false, nil + } + + var value mvccValue + err = value.UnmarshalBinary(iter.Value()) + if err != nil { + return false, errors.Trace(err) + } + dec.value = value + iter.Next() + return true, nil +} + +type skipDecoder struct { + currKey []byte +} + +// Decode skips the iterator as long as its key is currKey, the new key would be stored. +func (dec *skipDecoder) Decode(iter *Iterator) (bool, error) { + if iter.Error() != nil { + return false, iter.Error() + } + for iter.Valid() { + key, _, err := mvccDecode(iter.Key()) + if err != nil { + return false, errors.Trace(err) + } + if !bytes.Equal(key, dec.currKey) { + dec.currKey = key + return true, nil + } + iter.Next() + } + return false, nil +} + +// Get implements the MVCCStore interface. +// key cannot be nil or []byte{} +func (mvcc *MVCCLevelDB) Get(key []byte, startTS uint64) ([]byte, error) { + mvcc.mu.RLock() + defer mvcc.mu.RUnlock() + + return mvcc.getValue(key, startTS) +} + +func (mvcc *MVCCLevelDB) getValue(key []byte, startTS uint64) ([]byte, error) { + startKey := mvccEncode(key, lockVer) + iter := newIterator(mvcc.db, &util.Range{ + Start: startKey, + }) + defer iter.Release() + + return getValue(iter, key, startTS) +} + +func getValue(iter *Iterator, key []byte, startTS uint64) ([]byte, error) { + dec1 := lockDecoder{expectKey: key} + ok, err := dec1.Decode(iter) + if ok { + startTS, err = dec1.lock.check(startTS, key) + } + if err != nil { + return nil, errors.Trace(err) + } + dec2 := valueDecoder{expectKey: key} + for iter.Valid() { + ok, err := dec2.Decode(iter) + if err != nil { + return nil, errors.Trace(err) + } + if !ok { + break + } + + value := &dec2.value + if value.valueType == typeRollback { + continue + } + // Read the first committed value that can be seen at startTS. + if value.commitTS <= startTS { + if value.valueType == typeDelete { + return nil, nil + } + return value.value, nil + } + } + return nil, nil +} + +// Scan implements the MVCCStore interface. +func (mvcc *MVCCLevelDB) Scan(startKey, endKey []byte, limit int, startTS uint64) []Pair { + mvcc.mu.RLock() + defer mvcc.mu.RUnlock() + + iter, currKey, err := newScanIterator(mvcc.db, startKey, endKey) + defer iter.Release() + if err != nil { + logutil.BgLogger().Error("scan new iterator fail", zap.Error(err)) + return nil + } + + ok := true + var pairs []Pair + for len(pairs) < limit && ok { + value, err := getValue(iter, currKey, startTS) + if err != nil { + pairs = append(pairs, Pair{ + Key: currKey, + Err: errors.Trace(err), + }) + } + if value != nil { + pairs = append(pairs, Pair{ + Key: currKey, + Value: value, + }) + } + + skip := skipDecoder{currKey} + ok, err = skip.Decode(iter) + if err != nil { + logutil.BgLogger().Error("seek to next key error", zap.Error(err)) + break + } + currKey = skip.currKey + } + return pairs +} + +// ReverseScan implements the MVCCStore interface. The search range is [startKey, endKey). +func (mvcc *MVCCLevelDB) ReverseScan(startKey, endKey []byte, limit int, startTS uint64) []Pair { + mvcc.mu.RLock() + defer mvcc.mu.RUnlock() + + var mvccEnd []byte + if len(endKey) != 0 { + mvccEnd = mvccEncode(endKey, lockVer) + } + iter := mvcc.db.NewIterator(&util.Range{ + Limit: mvccEnd, + }, nil) + defer iter.Release() + + succ := iter.Last() + currKey, _, err := mvccDecode(iter.Key()) + // TODO: return error. + terror.Log(errors.Trace(err)) + helper := reverseScanHelper{ + startTS: startTS, + currKey: currKey, + } + + for succ && len(helper.pairs) < limit { + key, ver, err := mvccDecode(iter.Key()) + if err != nil { + break + } + if bytes.Compare(key, startKey) < 0 { + break + } + + if !bytes.Equal(key, helper.currKey) { + helper.finishEntry() + helper.currKey = key + } + if ver == lockVer { + var lock mvccLock + err = lock.UnmarshalBinary(iter.Value()) + helper.entry.lock = &lock + } else { + var value mvccValue + err = value.UnmarshalBinary(iter.Value()) + helper.entry.values = append(helper.entry.values, value) + } + if err != nil { + logutil.BgLogger().Error("unmarshal fail", zap.Error(err)) + break + } + succ = iter.Prev() + } + if len(helper.pairs) < limit { + helper.finishEntry() + } + return helper.pairs +} + +type reverseScanHelper struct { + startTS uint64 + currKey []byte + entry mvccEntry + pairs []Pair +} + +func (helper *reverseScanHelper) finishEntry() { + reverse(helper.entry.values) + helper.entry.key = NewMvccKey(helper.currKey) + val, err := helper.entry.Get(helper.startTS) + if len(val) != 0 || err != nil { + helper.pairs = append(helper.pairs, Pair{ + Key: helper.currKey, + Value: val, + Err: err, + }) + } + helper.entry = mvccEntry{} +} + +func reverse(values []mvccValue) { + i, j := 0, len(values)-1 + for i < j { + values[i], values[j] = values[j], values[i] + i++ + j-- + } +} + +// Prewrite implements the MVCCStore interface. +func (mvcc *MVCCLevelDB) Prewrite(req *kvrpcpb.PrewriteRequest) []error { + mutations := req.Mutations + primary := req.PrimaryLock + startTS := req.StartVersion + ttl := req.LockTtl + mvcc.mu.Lock() + defer mvcc.mu.Unlock() + + anyError := false + batch := &leveldb.Batch{} + errs := make([]error, 0, len(mutations)) + for _, m := range mutations { + // If the operation is Insert, check if key is exists at first. + var err error + err = prewriteMutation(mvcc.db, batch, m, startTS, primary, ttl) + errs = append(errs, err) + if err != nil { + anyError = true + } + } + if anyError { + return errs + } + if err := mvcc.db.Write(batch, nil); err != nil { + return []error{err} + } + + return errs +} + +func checkConflictValue(iter *Iterator, m *kvrpcpb.Mutation, startTS uint64) error { + dec := valueDecoder{ + expectKey: m.Key, + } + _, err := dec.Decode(iter) + if err != nil { + return errors.Trace(err) + } + + // Note that it's a write conflict here, even if the value is a rollback one. + if dec.value.commitTS >= startTS { + return &ErrConflict{ + StartTS: startTS, + ConflictTS: dec.value.startTS, + ConflictCommitTS: dec.value.commitTS, + Key: m.Key, + } + } + return nil +} + +func prewriteMutation(db *leveldb.DB, batch *leveldb.Batch, + mutation *kvrpcpb.Mutation, startTS uint64, + primary []byte, ttl uint64) error { + startKey := mvccEncode(mutation.Key, lockVer) + iter := newIterator(db, &util.Range{ + Start: startKey, + }) + defer iter.Release() + + dec := lockDecoder{ + expectKey: mutation.Key, + } + ok, err := dec.Decode(iter) + if err != nil { + return errors.Trace(err) + } + if ok { + if dec.lock.startTS != startTS { + return dec.lock.lockErr(mutation.Key) + } + } else { + err = checkConflictValue(iter, mutation, startTS) + if err != nil { + return err + } + } + + op := mutation.GetOp() + lock := mvccLock{ + startTS: startTS, + primary: primary, + value: mutation.Value, + op: op, + ttl: ttl, + } + + writeKey := mvccEncode(mutation.Key, lockVer) + writeValue, err := lock.MarshalBinary() + if err != nil { + return errors.Trace(err) + } + + batch.Put(writeKey, writeValue) + return nil +} + +// Commit implements the MVCCStore interface. +func (mvcc *MVCCLevelDB) Commit(keys [][]byte, startTS, commitTS uint64) error { + mvcc.mu.Lock() + defer func() { + mvcc.mu.Unlock() + }() + + batch := &leveldb.Batch{} + for _, k := range keys { + err := commitKey(mvcc.db, batch, k, startTS, commitTS) + if err != nil { + return errors.Trace(err) + } + } + return mvcc.db.Write(batch, nil) +} + +func commitKey(db *leveldb.DB, batch *leveldb.Batch, key []byte, startTS, commitTS uint64) error { + startKey := mvccEncode(key, lockVer) + iter := newIterator(db, &util.Range{ + Start: startKey, + }) + defer iter.Release() + + dec := lockDecoder{ + expectKey: key, + } + ok, err := dec.Decode(iter) + if err != nil { + return errors.Trace(err) + } + if !ok || dec.lock.startTS != startTS { + // If the lock of this transaction is not found, or the lock is replaced by + // another transaction, check commit information of this transaction. + c, ok, err1 := getTxnCommitInfo(iter, key, startTS) + if err1 != nil { + return errors.Trace(err1) + } + if ok && c.valueType != typeRollback { + // c.valueType != typeRollback means the transaction is already committed, do nothing. + return nil + } + return ErrRetryable("txn not found") + } + + if err = commitLock(batch, dec.lock, key, startTS, commitTS); err != nil { + return errors.Trace(err) + } + return nil +} + +func commitLock(batch *leveldb.Batch, lock mvccLock, key []byte, startTS, commitTS uint64) error { + if lock.op != kvrpcpb.Op_Lock { + var valueType mvccValueType + if lock.op == kvrpcpb.Op_Put { + valueType = typePut + } else { + valueType = typeDelete + } + value := mvccValue{ + valueType: valueType, + startTS: startTS, + commitTS: commitTS, + value: lock.value, + } + writeKey := mvccEncode(key, commitTS) + writeValue, err := value.MarshalBinary() + if err != nil { + return errors.Trace(err) + } + batch.Put(writeKey, writeValue) + } + batch.Delete(mvccEncode(key, lockVer)) + return nil +} + +// Rollback implements the MVCCStore interface. +func (mvcc *MVCCLevelDB) Rollback(keys [][]byte, startTS uint64) error { + mvcc.mu.Lock() + defer func() { + mvcc.mu.Unlock() + }() + + batch := &leveldb.Batch{} + for _, k := range keys { + err := rollbackKey(mvcc.db, batch, k, startTS) + if err != nil { + return errors.Trace(err) + } + } + return mvcc.db.Write(batch, nil) +} + +func rollbackKey(db *leveldb.DB, batch *leveldb.Batch, key []byte, startTS uint64) error { + startKey := mvccEncode(key, lockVer) + iter := newIterator(db, &util.Range{ + Start: startKey, + }) + defer iter.Release() + + if iter.Valid() { + dec := lockDecoder{ + expectKey: key, + } + ok, err := dec.Decode(iter) + if err != nil { + return errors.Trace(err) + } + // If current transaction's lock exist. + if ok && dec.lock.startTS == startTS { + if err = rollbackLock(batch, key, startTS); err != nil { + return errors.Trace(err) + } + return nil + } + + // If current transaction's lock not exist. + // If commit info of current transaction exist. + c, ok, err := getTxnCommitInfo(iter, key, startTS) + if err != nil { + return errors.Trace(err) + } + if ok { + // If current transaction is already committed. + if c.valueType != typeRollback { + return ErrAlreadyCommitted(c.commitTS) + } + // If current transaction is already rollback. + return nil + } + } + + // If current transaction is not prewritted before. + value := mvccValue{ + valueType: typeRollback, + startTS: startTS, + commitTS: startTS, + } + writeKey := mvccEncode(key, startTS) + writeValue, err := value.MarshalBinary() + if err != nil { + return errors.Trace(err) + } + batch.Put(writeKey, writeValue) + return nil +} + +func rollbackLock(batch *leveldb.Batch, key []byte, startTS uint64) error { + tomb := mvccValue{ + valueType: typeRollback, + startTS: startTS, + commitTS: startTS, + } + writeKey := mvccEncode(key, startTS) + writeValue, err := tomb.MarshalBinary() + if err != nil { + return errors.Trace(err) + } + batch.Put(writeKey, writeValue) + batch.Delete(mvccEncode(key, lockVer)) + return nil +} + +func getTxnCommitInfo(iter *Iterator, expectKey []byte, startTS uint64) (mvccValue, bool, error) { + for iter.Valid() { + dec := valueDecoder{ + expectKey: expectKey, + } + ok, err := dec.Decode(iter) + if err != nil || !ok { + return mvccValue{}, ok, errors.Trace(err) + } + + if dec.value.startTS == startTS { + return dec.value, true, nil + } + } + return mvccValue{}, false, nil +} + +// Cleanup implements the MVCCStore interface. +// Cleanup API is deprecated, use CheckTxnStatus instead. +func (mvcc *MVCCLevelDB) Cleanup(key []byte, startTS, currentTS uint64) error { + mvcc.mu.Lock() + defer func() { + mvcc.mu.Unlock() + }() + + batch := &leveldb.Batch{} + startKey := mvccEncode(key, lockVer) + iter := newIterator(mvcc.db, &util.Range{ + Start: startKey, + }) + defer iter.Release() + + if iter.Valid() { + dec := lockDecoder{ + expectKey: key, + } + ok, err := dec.Decode(iter) + if err != nil { + return err + } + // If current transaction's lock exists. + if ok && dec.lock.startTS == startTS { + // If the lock has already outdated, clean up it. + if currentTS == 0 || uint64(oracle.ExtractPhysical(dec.lock.startTS))+dec.lock.ttl < uint64(oracle.ExtractPhysical(currentTS)) { + if err = rollbackLock(batch, key, startTS); err != nil { + return err + } + return mvcc.db.Write(batch, nil) + } + + // Otherwise, return a locked error with the TTL information. + return dec.lock.lockErr(key) + } + + // If current transaction's lock does not exist. + // If the commit information of the current transaction exist. + c, ok, err := getTxnCommitInfo(iter, key, startTS) + if err != nil { + return errors.Trace(err) + } + if ok { + // If the current transaction has already committed. + if c.valueType != typeRollback { + return ErrAlreadyCommitted(c.commitTS) + } + // If the current transaction has already rollbacked. + return nil + } + } + + // If current transaction is not prewritted before. + value := mvccValue{ + valueType: typeRollback, + startTS: startTS, + commitTS: startTS, + } + writeKey := mvccEncode(key, startTS) + writeValue, err := value.MarshalBinary() + if err != nil { + return errors.Trace(err) + } + batch.Put(writeKey, writeValue) + return nil +} + +// CheckTxnStatus checks the primary lock of a transaction to decide its status. +// The return values are (ttl, commitTS, err): +// If the transaction is active, this function returns the ttl of the lock; +// If the transaction is committed, this function returns the commitTS; +// If the transaction is rollbacked, this function returns (0, 0, nil) +// Note that CheckTxnStatus may also push forward the `minCommitTS` of the +// transaction, so it's not simply a read-only operation. +// +// primaryKey + lockTS together could locate the primary lock. +// currentTS is the current ts, but it may be inaccurate. Just use it to check TTL. +func (mvcc *MVCCLevelDB) CheckTxnStatus(primaryKey []byte, lockTS, currentTS uint64) (ttl uint64, commitTS uint64, action kvrpcpb.Action, err error) { + mvcc.mu.Lock() + defer mvcc.mu.Unlock() + + action = kvrpcpb.Action_NoAction + + startKey := mvccEncode(primaryKey, lockVer) + iter := newIterator(mvcc.db, &util.Range{ + Start: startKey, + }) + defer iter.Release() + + if iter.Valid() { + dec := lockDecoder{ + expectKey: primaryKey, + } + var ok bool + ok, err = dec.Decode(iter) + if err != nil { + err = errors.Trace(err) + return + } + // If current transaction's lock exists. + if ok && dec.lock.startTS == lockTS { + lock := dec.lock + batch := &leveldb.Batch{} + + // If the lock has already outdated, clean up it. + if uint64(oracle.ExtractPhysical(lock.startTS))+lock.ttl < uint64(oracle.ExtractPhysical(currentTS)) { + if err = rollbackLock(batch, primaryKey, lockTS); err != nil { + err = errors.Trace(err) + return + } + if err = mvcc.db.Write(batch, nil); err != nil { + err = errors.Trace(err) + return + } + return 0, 0, kvrpcpb.Action_TTLExpireRollback, nil + } + + return lock.ttl, 0, action, nil + } + + // If current transaction's lock does not exist. + // If the commit info of the current transaction exists. + c, ok, err1 := getTxnCommitInfo(iter, primaryKey, lockTS) + if err1 != nil { + err = errors.Trace(err1) + return + } + if ok { + // If current transaction is already committed. + if c.valueType != typeRollback { + return 0, c.commitTS, action, nil + } + // If current transaction is already rollback. + return 0, 0, kvrpcpb.Action_NoAction, nil + } + } + + return 0, 0, action, nil +} + +// TxnHeartBeat implements the MVCCStore interface. +func (mvcc *MVCCLevelDB) TxnHeartBeat(key []byte, startTS uint64, adviseTTL uint64) (uint64, error) { + mvcc.mu.Lock() + defer mvcc.mu.Unlock() + + startKey := mvccEncode(key, lockVer) + iter := newIterator(mvcc.db, &util.Range{ + Start: startKey, + }) + defer iter.Release() + + if iter.Valid() { + dec := lockDecoder{ + expectKey: key, + } + ok, err := dec.Decode(iter) + if err != nil { + return 0, errors.Trace(err) + } + if ok && dec.lock.startTS == startTS { + if !bytes.Equal(dec.lock.primary, key) { + return 0, errors.New("txnHeartBeat on non-primary key, the code should not run here") + } + + lock := dec.lock + batch := &leveldb.Batch{} + // Increase the ttl of this transaction. + if adviseTTL > lock.ttl { + lock.ttl = adviseTTL + writeKey := mvccEncode(key, lockVer) + writeValue, err := lock.MarshalBinary() + if err != nil { + return 0, errors.Trace(err) + } + batch.Put(writeKey, writeValue) + if err = mvcc.db.Write(batch, nil); err != nil { + return 0, errors.Trace(err) + } + } + return lock.ttl, nil + } + } + return 0, errors.New("lock doesn't exist") +} + +// ScanLock implements the MVCCStore interface. +func (mvcc *MVCCLevelDB) ScanLock(startKey, endKey []byte, maxTS uint64) ([]*kvrpcpb.LockInfo, error) { + mvcc.mu.RLock() + defer mvcc.mu.RUnlock() + + iter, currKey, err := newScanIterator(mvcc.db, startKey, endKey) + defer iter.Release() + if err != nil { + return nil, errors.Trace(err) + } + + var locks []*kvrpcpb.LockInfo + for iter.Valid() { + dec := lockDecoder{expectKey: currKey} + ok, err := dec.Decode(iter) + if err != nil { + return nil, errors.Trace(err) + } + if ok && dec.lock.startTS <= maxTS { + locks = append(locks, &kvrpcpb.LockInfo{ + PrimaryLock: dec.lock.primary, + LockVersion: dec.lock.startTS, + Key: currKey, + }) + } + + skip := skipDecoder{currKey: currKey} + _, err = skip.Decode(iter) + if err != nil { + return nil, errors.Trace(err) + } + currKey = skip.currKey + } + return locks, nil +} + +// ResolveLock implements the MVCCStore interface. +func (mvcc *MVCCLevelDB) ResolveLock(startKey, endKey []byte, startTS, commitTS uint64) error { + mvcc.mu.Lock() + defer mvcc.mu.Unlock() + + iter, currKey, err := newScanIterator(mvcc.db, startKey, endKey) + defer iter.Release() + if err != nil { + return errors.Trace(err) + } + + batch := &leveldb.Batch{} + for iter.Valid() { + dec := lockDecoder{expectKey: currKey} + ok, err := dec.Decode(iter) + if err != nil { + return errors.Trace(err) + } + if ok && dec.lock.startTS == startTS { + if commitTS > 0 { + err = commitLock(batch, dec.lock, currKey, startTS, commitTS) + } else { + err = rollbackLock(batch, currKey, startTS) + } + if err != nil { + return errors.Trace(err) + } + } + + skip := skipDecoder{currKey: currKey} + _, err = skip.Decode(iter) + if err != nil { + return errors.Trace(err) + } + currKey = skip.currKey + } + return mvcc.db.Write(batch, nil) +} + +// BatchResolveLock implements the MVCCStore interface. +func (mvcc *MVCCLevelDB) BatchResolveLock(startKey, endKey []byte, txnInfos map[uint64]uint64) error { + mvcc.mu.Lock() + defer mvcc.mu.Unlock() + + iter, currKey, err := newScanIterator(mvcc.db, startKey, endKey) + defer iter.Release() + if err != nil { + return errors.Trace(err) + } + + batch := &leveldb.Batch{} + for iter.Valid() { + dec := lockDecoder{expectKey: currKey} + ok, err := dec.Decode(iter) + if err != nil { + return errors.Trace(err) + } + if ok { + if commitTS, ok := txnInfos[dec.lock.startTS]; ok { + if commitTS > 0 { + err = commitLock(batch, dec.lock, currKey, dec.lock.startTS, commitTS) + } else { + err = rollbackLock(batch, currKey, dec.lock.startTS) + } + if err != nil { + return errors.Trace(err) + } + } + } + + skip := skipDecoder{currKey: currKey} + _, err = skip.Decode(iter) + if err != nil { + return errors.Trace(err) + } + currKey = skip.currKey + } + return mvcc.db.Write(batch, nil) +} + +// GC implements the MVCCStore interface +func (mvcc *MVCCLevelDB) GC(startKey, endKey []byte, safePoint uint64) error { + mvcc.mu.Lock() + defer mvcc.mu.Unlock() + + iter, currKey, err := newScanIterator(mvcc.db, startKey, endKey) + defer iter.Release() + if err != nil { + return errors.Trace(err) + } + + // Mock TiKV usually doesn't need to process large amount of data. So write it in a single batch. + batch := &leveldb.Batch{} + + for iter.Valid() { + lockDec := lockDecoder{expectKey: currKey} + ok, err := lockDec.Decode(iter) + if err != nil { + return errors.Trace(err) + } + if ok && lockDec.lock.startTS <= safePoint { + return errors.Errorf( + "key %+q has lock with startTs %v which is under safePoint %v", + currKey, + lockDec.lock.startTS, + safePoint) + } + + keepNext := true + dec := valueDecoder{expectKey: currKey} + + for iter.Valid() { + ok, err := dec.Decode(iter) + if err != nil { + return errors.Trace(err) + } + + if !ok { + // Go to the next key + currKey, _, err = mvccDecode(iter.Key()) + if err != nil { + return errors.Trace(err) + } + break + } + + if dec.value.commitTS > safePoint { + continue + } + + if dec.value.valueType == typePut || dec.value.valueType == typeDelete { + // Keep the latest version if it's `typePut` + if !keepNext || dec.value.valueType == typeDelete { + batch.Delete(mvccEncode(currKey, dec.value.commitTS)) + } + keepNext = false + } else { + // Delete all other types + batch.Delete(mvccEncode(currKey, dec.value.commitTS)) + } + } + } + + return mvcc.db.Write(batch, nil) +} + +// DeleteRange implements the MVCCStore interface. +func (mvcc *MVCCLevelDB) DeleteRange(startKey, endKey []byte) error { + return mvcc.doRawDeleteRange(codec.EncodeBytes(nil, startKey), codec.EncodeBytes(nil, endKey)) +} + +// Close calls leveldb's Close to free resources. +func (mvcc *MVCCLevelDB) Close() error { + return mvcc.db.Close() +} + +// RawPut implements the RawKV interface. +func (mvcc *MVCCLevelDB) RawPut(key, value []byte) { + mvcc.mu.Lock() + defer mvcc.mu.Unlock() + + if value == nil { + value = []byte{} + } + terror.Log(mvcc.db.Put(key, value, nil)) +} + +// RawBatchPut implements the RawKV interface +func (mvcc *MVCCLevelDB) RawBatchPut(keys, values [][]byte) { + mvcc.mu.Lock() + defer mvcc.mu.Unlock() + + batch := &leveldb.Batch{} + for i, key := range keys { + value := values[i] + if value == nil { + value = []byte{} + } + batch.Put(key, value) + } + terror.Log(mvcc.db.Write(batch, nil)) +} + +// RawGet implements the RawKV interface. +func (mvcc *MVCCLevelDB) RawGet(key []byte) []byte { + mvcc.mu.Lock() + defer mvcc.mu.Unlock() + + ret, err := mvcc.db.Get(key, nil) + terror.Log(err) + return ret +} + +// RawBatchGet implements the RawKV interface. +func (mvcc *MVCCLevelDB) RawBatchGet(keys [][]byte) [][]byte { + mvcc.mu.Lock() + defer mvcc.mu.Unlock() + + values := make([][]byte, 0, len(keys)) + for _, key := range keys { + value, err := mvcc.db.Get(key, nil) + terror.Log(err) + values = append(values, value) + } + return values +} + +// RawDelete implements the RawKV interface. +func (mvcc *MVCCLevelDB) RawDelete(key []byte) { + mvcc.mu.Lock() + defer mvcc.mu.Unlock() + + terror.Log(mvcc.db.Delete(key, nil)) +} + +// RawBatchDelete implements the RawKV interface. +func (mvcc *MVCCLevelDB) RawBatchDelete(keys [][]byte) { + mvcc.mu.Lock() + defer mvcc.mu.Unlock() + + batch := &leveldb.Batch{} + for _, key := range keys { + batch.Delete(key) + } + terror.Log(mvcc.db.Write(batch, nil)) +} + +// RawScan implements the RawKV interface. +func (mvcc *MVCCLevelDB) RawScan(startKey, endKey []byte, limit int) []Pair { + mvcc.mu.Lock() + defer mvcc.mu.Unlock() + + iter := mvcc.db.NewIterator(&util.Range{ + Start: startKey, + }, nil) + + var pairs []Pair + for iter.Next() && len(pairs) < limit { + key := iter.Key() + value := iter.Value() + err := iter.Error() + if len(endKey) > 0 && bytes.Compare(key, endKey) >= 0 { + break + } + pairs = append(pairs, Pair{ + Key: append([]byte{}, key...), + Value: append([]byte{}, value...), + Err: err, + }) + } + return pairs +} + +// RawReverseScan implements the RawKV interface. +// Scan the range of [endKey, startKey) +// It doesn't support Scanning from "", because locating the last Region is not yet implemented. +func (mvcc *MVCCLevelDB) RawReverseScan(startKey, endKey []byte, limit int) []Pair { + mvcc.mu.Lock() + defer mvcc.mu.Unlock() + + iter := mvcc.db.NewIterator(&util.Range{ + Limit: startKey, + }, nil) + + success := iter.Last() + + var pairs []Pair + for success && len(pairs) < limit { + key := iter.Key() + value := iter.Value() + err := iter.Error() + if bytes.Compare(key, endKey) < 0 { + break + } + pairs = append(pairs, Pair{ + Key: append([]byte{}, key...), + Value: append([]byte{}, value...), + Err: err, + }) + success = iter.Prev() + } + return pairs +} + +// RawDeleteRange implements the RawKV interface. +func (mvcc *MVCCLevelDB) RawDeleteRange(startKey, endKey []byte) { + terror.Log(mvcc.doRawDeleteRange(startKey, endKey)) +} + +// doRawDeleteRange deletes all keys in a range and return the error if any. +func (mvcc *MVCCLevelDB) doRawDeleteRange(startKey, endKey []byte) error { + mvcc.mu.Lock() + defer mvcc.mu.Unlock() + + batch := &leveldb.Batch{} + + iter := mvcc.db.NewIterator(&util.Range{ + Start: startKey, + Limit: endKey, + }, nil) + for iter.Next() { + batch.Delete(iter.Key()) + } + + return mvcc.db.Write(batch, nil) +} + +var valueTypeOpMap = [...]kvrpcpb.Op{ + typePut: kvrpcpb.Op_Put, + typeDelete: kvrpcpb.Op_Del, + typeRollback: kvrpcpb.Op_Rollback, +} diff --git a/store/mockstore/mocktikv/mvcc_test.go b/store/mockstore/mocktikv/mvcc_test.go new file mode 100644 index 0000000..0890e2e --- /dev/null +++ b/store/mockstore/mocktikv/mvcc_test.go @@ -0,0 +1,34 @@ +// Copyright 2018-present, PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mocktikv + +import . "github.com/pingcap/check" + +var _ = Suite(testMvccSuite{}) + +type testMvccSuite struct { +} + +func (s testMvccSuite) TestRegionContains(c *C) { + c.Check(regionContains([]byte{}, []byte{}, []byte{}), IsTrue) + c.Check(regionContains([]byte{}, []byte{}, []byte{1}), IsTrue) + c.Check(regionContains([]byte{1, 1, 1}, []byte{}, []byte{1, 1, 0}), IsFalse) + c.Check(regionContains([]byte{1, 1, 1}, []byte{}, []byte{1, 1, 1}), IsTrue) + c.Check(regionContains([]byte{}, []byte{2, 2, 2}, []byte{2, 2, 1}), IsTrue) + c.Check(regionContains([]byte{}, []byte{2, 2, 2}, []byte{2, 2, 2}), IsFalse) + c.Check(regionContains([]byte{1, 1, 1}, []byte{2, 2, 2}, []byte{1, 1, 0}), IsFalse) + c.Check(regionContains([]byte{1, 1, 1}, []byte{2, 2, 2}, []byte{1, 1, 1}), IsTrue) + c.Check(regionContains([]byte{1, 1, 1}, []byte{2, 2, 2}, []byte{2, 2, 1}), IsTrue) + c.Check(regionContains([]byte{1, 1, 1}, []byte{2, 2, 2}, []byte{2, 2, 2}), IsFalse) +} diff --git a/store/mockstore/mocktikv/pd.go b/store/mockstore/mocktikv/pd.go new file mode 100644 index 0000000..82c6942 --- /dev/null +++ b/store/mockstore/mocktikv/pd.go @@ -0,0 +1,134 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mocktikv + +import ( + "context" + "sync" + "time" + + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/proto/pkg/schedulerpb" + pd "github.com/pingcap-incubator/tinykv/scheduler/client" +) + +// Use global variables to prevent pdClients from creating duplicate timestamps. +var tsMu = struct { + sync.Mutex + physicalTS int64 + logicalTS int64 +}{} + +type pdClient struct { + cluster *Cluster + // SafePoint set by `UpdateGCSafePoint`. Not to be confused with SafePointKV. + gcSafePoint uint64 + gcSafePointMu sync.Mutex +} + +// NewPDClient creates a mock pd.Client that uses local timestamp and meta data +// from a Cluster. +func NewPDClient(cluster *Cluster) pd.Client { + return &pdClient{ + cluster: cluster, + } +} + +func (c *pdClient) GetClusterID(ctx context.Context) uint64 { + return 1 +} + +func (c *pdClient) GetTS(context.Context) (int64, int64, error) { + tsMu.Lock() + defer tsMu.Unlock() + + ts := time.Now().UnixNano() / int64(time.Millisecond) + if tsMu.physicalTS >= ts { + tsMu.logicalTS++ + } else { + tsMu.physicalTS = ts + tsMu.logicalTS = 0 + } + return tsMu.physicalTS, tsMu.logicalTS, nil +} + +func (c *pdClient) GetTSAsync(ctx context.Context) pd.TSFuture { + return &mockTSFuture{c, ctx} +} + +type mockTSFuture struct { + pdc *pdClient + ctx context.Context +} + +func (m *mockTSFuture) Wait() (int64, int64, error) { + return m.pdc.GetTS(m.ctx) +} + +func (c *pdClient) GetRegion(ctx context.Context, key []byte) (*metapb.Region, *metapb.Peer, error) { + region, peer := c.cluster.GetRegionByKey(key) + return region, peer, nil +} + +func (c *pdClient) GetPrevRegion(ctx context.Context, key []byte) (*metapb.Region, *metapb.Peer, error) { + region, peer := c.cluster.GetPrevRegionByKey(key) + return region, peer, nil +} + +func (c *pdClient) GetRegionByID(ctx context.Context, regionID uint64) (*metapb.Region, *metapb.Peer, error) { + region, peer := c.cluster.GetRegionByID(regionID) + return region, peer, nil +} + +func (c *pdClient) ScanRegions(ctx context.Context, startKey []byte, endKey []byte, limit int) ([]*metapb.Region, []*metapb.Peer, error) { + regions, peers := c.cluster.ScanRegions(startKey, endKey, limit) + return regions, peers, nil +} + +func (c *pdClient) GetStore(ctx context.Context, storeID uint64) (*metapb.Store, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + store := c.cluster.GetStore(storeID) + return store, nil +} + +func (c *pdClient) GetAllStores(ctx context.Context, opts ...pd.GetStoreOption) ([]*metapb.Store, error) { + return c.cluster.GetAllStores(), nil +} + +func (c *pdClient) UpdateGCSafePoint(ctx context.Context, safePoint uint64) (uint64, error) { + c.gcSafePointMu.Lock() + defer c.gcSafePointMu.Unlock() + + if safePoint > c.gcSafePoint { + c.gcSafePoint = safePoint + } + return c.gcSafePoint, nil +} + +func (c *pdClient) Close() { +} + +func (c *pdClient) ScatterRegion(ctx context.Context, regionID uint64) error { + return nil +} + +func (c *pdClient) GetOperator(ctx context.Context, regionID uint64) (*schedulerpb.GetOperatorResponse, error) { + return &schedulerpb.GetOperatorResponse{Status: schedulerpb.OperatorStatus_SUCCESS}, nil +} + +func (c *pdClient) GetLeaderAddr() string { return "mockpd" } diff --git a/store/mockstore/mocktikv/rpc.go b/store/mockstore/mocktikv/rpc.go new file mode 100644 index 0000000..e7e3f9e --- /dev/null +++ b/store/mockstore/mocktikv/rpc.go @@ -0,0 +1,550 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mocktikv + +import ( + "context" + "fmt" + "io" + "time" + + "github.com/golang/protobuf/proto" + "github.com/pingcap-incubator/tinykv/proto/pkg/coprocessor" + "github.com/pingcap-incubator/tinykv/proto/pkg/errorpb" + "github.com/pingcap-incubator/tinykv/proto/pkg/kvrpcpb" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/store/tikv/tikvrpc" +) + +// For gofail injection. +var undeterminedErr = terror.ErrResultUndetermined + +func checkGoContext(ctx context.Context) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + return nil + } +} + +func convertToKeyError(err error) *kvrpcpb.KeyError { + if locked, ok := errors.Cause(err).(*ErrLocked); ok { + return &kvrpcpb.KeyError{ + Locked: &kvrpcpb.LockInfo{ + Key: locked.Key.Raw(), + PrimaryLock: locked.Primary, + LockVersion: locked.StartTS, + LockTtl: locked.TTL, + }, + } + } + if writeConflict, ok := errors.Cause(err).(*ErrConflict); ok { + return &kvrpcpb.KeyError{ + Conflict: &kvrpcpb.WriteConflict{ + Key: writeConflict.Key, + ConflictTs: writeConflict.ConflictTS, + StartTs: writeConflict.StartTS, + }, + } + } + if retryable, ok := errors.Cause(err).(ErrRetryable); ok { + return &kvrpcpb.KeyError{ + Retryable: retryable.Error(), + } + } + return &kvrpcpb.KeyError{ + Abort: err.Error(), + } +} + +func convertToKeyErrors(errs []error) []*kvrpcpb.KeyError { + var keyErrors = make([]*kvrpcpb.KeyError, 0) + for _, err := range errs { + if err != nil { + keyErrors = append(keyErrors, convertToKeyError(err)) + } + } + return keyErrors +} + +func convertToPbPairs(pairs []Pair) []*kvrpcpb.KvPair { + kvPairs := make([]*kvrpcpb.KvPair, 0, len(pairs)) + for _, p := range pairs { + var kvPair *kvrpcpb.KvPair + if p.Err == nil { + kvPair = &kvrpcpb.KvPair{ + Key: p.Key, + Value: p.Value, + } + } else { + kvPair = &kvrpcpb.KvPair{ + Error: convertToKeyError(p.Err), + } + } + kvPairs = append(kvPairs, kvPair) + } + return kvPairs +} + +// rpcHandler mocks tikv's side handler behavior. In general, you may assume +// TiKV just translate the logic from Go to Rust. +type rpcHandler struct { + cluster *Cluster + mvccStore MVCCStore + + // storeID stores id for current request + storeID uint64 + // startKey is used for handling normal request. + startKey []byte + endKey []byte + // rawStartKey is used for handling coprocessor request. + rawStartKey []byte + rawEndKey []byte +} + +func (h *rpcHandler) checkRequestContext(ctx *kvrpcpb.Context) *errorpb.Error { + ctxPeer := ctx.GetPeer() + if ctxPeer != nil && ctxPeer.GetStoreId() != h.storeID { + return &errorpb.Error{ + Message: *proto.String("store not match"), + StoreNotMatch: &errorpb.StoreNotMatch{}, + } + } + region, leaderID := h.cluster.GetRegion(ctx.GetRegionId()) + // No region found. + if region == nil { + return &errorpb.Error{ + Message: *proto.String("region not found"), + RegionNotFound: &errorpb.RegionNotFound{ + RegionId: *proto.Uint64(ctx.GetRegionId()), + }, + } + } + var storePeer, leaderPeer *metapb.Peer + for _, p := range region.Peers { + if p.GetStoreId() == h.storeID { + storePeer = p + } + if p.GetId() == leaderID { + leaderPeer = p + } + } + // The Store does not contain a Peer of the Region. + if storePeer == nil { + return &errorpb.Error{ + Message: *proto.String("region not found"), + RegionNotFound: &errorpb.RegionNotFound{ + RegionId: *proto.Uint64(ctx.GetRegionId()), + }, + } + } + // No leader. + if leaderPeer == nil { + return &errorpb.Error{ + Message: *proto.String("no leader"), + NotLeader: &errorpb.NotLeader{ + RegionId: *proto.Uint64(ctx.GetRegionId()), + }, + } + } + // The Peer on the Store is not leader. + if storePeer.GetId() != leaderPeer.GetId() { + return &errorpb.Error{ + Message: *proto.String("not leader"), + NotLeader: &errorpb.NotLeader{ + RegionId: *proto.Uint64(ctx.GetRegionId()), + Leader: leaderPeer, + }, + } + } + // Region epoch does not match. + if !proto.Equal(region.GetRegionEpoch(), ctx.GetRegionEpoch()) { + nextRegion, _ := h.cluster.GetRegionByKey(region.GetEndKey()) + currentRegions := []*metapb.Region{region} + if nextRegion != nil { + currentRegions = append(currentRegions, nextRegion) + } + return &errorpb.Error{ + Message: *proto.String("epoch not match"), + EpochNotMatch: &errorpb.EpochNotMatch{ + CurrentRegions: currentRegions, + }, + } + } + h.startKey, h.endKey = region.StartKey, region.EndKey + return nil +} + +func (h *rpcHandler) checkRequest(ctx *kvrpcpb.Context, size int) *errorpb.Error { + return h.checkRequestContext(ctx) +} + +func (h *rpcHandler) checkKeyInRegion(key []byte) bool { + return regionContains(h.startKey, h.endKey, []byte(NewMvccKey(key))) +} + +func (h *rpcHandler) handleKvGet(req *kvrpcpb.GetRequest) *kvrpcpb.GetResponse { + if !h.checkKeyInRegion(req.Key) { + panic("KvGet: key not in region") + } + + val, err := h.mvccStore.Get(req.Key, req.GetVersion()) + if err != nil { + return &kvrpcpb.GetResponse{ + Error: convertToKeyError(err), + } + } + return &kvrpcpb.GetResponse{ + Value: val, + } +} + +func (h *rpcHandler) handleKvScan(req *kvrpcpb.ScanRequest) *kvrpcpb.ScanResponse { + endKey := MvccKey(h.endKey).Raw() + if !h.checkKeyInRegion(req.GetStartKey()) { + panic("KvScan: startKey not in region") + } + pairs := h.mvccStore.Scan(req.GetStartKey(), endKey, int(req.GetLimit()), req.GetVersion()) + + return &kvrpcpb.ScanResponse{ + Pairs: convertToPbPairs(pairs), + } +} + +func (h *rpcHandler) handleKvPrewrite(req *kvrpcpb.PrewriteRequest) *kvrpcpb.PrewriteResponse { + for _, m := range req.Mutations { + if !h.checkKeyInRegion(m.Key) { + panic("KvPrewrite: key not in region") + } + } + errs := h.mvccStore.Prewrite(req) + return &kvrpcpb.PrewriteResponse{ + Errors: convertToKeyErrors(errs), + } +} + +func (h *rpcHandler) handleKvCommit(req *kvrpcpb.CommitRequest) *kvrpcpb.CommitResponse { + for _, k := range req.Keys { + if !h.checkKeyInRegion(k) { + panic("KvCommit: key not in region") + } + } + var resp kvrpcpb.CommitResponse + err := h.mvccStore.Commit(req.Keys, req.GetStartVersion(), req.GetCommitVersion()) + if err != nil { + resp.Error = convertToKeyError(err) + } + return &resp +} + +func (h *rpcHandler) handleKvCheckTxnStatus(req *kvrpcpb.CheckTxnStatusRequest) (*kvrpcpb.CheckTxnStatusResponse, error) { + if !h.checkKeyInRegion(req.PrimaryKey) { + panic("KvCheckTxnStatus: key not in region") + } + var resp kvrpcpb.CheckTxnStatusResponse + ttl, commitTS, action, err := h.mvccStore.CheckTxnStatus(req.GetPrimaryKey(), req.GetLockTs(), req.GetCurrentTs()) + if err != nil { + return nil, err + } + resp.LockTtl, resp.CommitVersion, resp.Action = ttl, commitTS, action + return &resp, nil +} + +func (h *rpcHandler) handleKvBatchRollback(req *kvrpcpb.BatchRollbackRequest) *kvrpcpb.BatchRollbackResponse { + err := h.mvccStore.Rollback(req.Keys, req.StartVersion) + if err != nil { + return &kvrpcpb.BatchRollbackResponse{ + Error: convertToKeyError(err), + } + } + return &kvrpcpb.BatchRollbackResponse{} +} + +func (h *rpcHandler) handleKvResolveLock(req *kvrpcpb.ResolveLockRequest) *kvrpcpb.ResolveLockResponse { + startKey := MvccKey(h.startKey).Raw() + endKey := MvccKey(h.endKey).Raw() + err := h.mvccStore.ResolveLock(startKey, endKey, req.GetStartVersion(), req.GetCommitVersion()) + if err != nil { + return &kvrpcpb.ResolveLockResponse{ + Error: convertToKeyError(err), + } + } + return &kvrpcpb.ResolveLockResponse{} +} + +func (h *rpcHandler) handleKvRawGet(req *kvrpcpb.RawGetRequest) *kvrpcpb.RawGetResponse { + rawKV, ok := h.mvccStore.(RawKV) + if !ok { + return &kvrpcpb.RawGetResponse{ + Error: "not implemented", + } + } + return &kvrpcpb.RawGetResponse{ + Value: rawKV.RawGet(req.GetKey()), + } +} + +func (h *rpcHandler) handleKvRawPut(req *kvrpcpb.RawPutRequest) *kvrpcpb.RawPutResponse { + rawKV, ok := h.mvccStore.(RawKV) + if !ok { + return &kvrpcpb.RawPutResponse{ + Error: "not implemented", + } + } + rawKV.RawPut(req.GetKey(), req.GetValue()) + return &kvrpcpb.RawPutResponse{} +} + +func (h *rpcHandler) handleKvRawDelete(req *kvrpcpb.RawDeleteRequest) *kvrpcpb.RawDeleteResponse { + rawKV, ok := h.mvccStore.(RawKV) + if !ok { + return &kvrpcpb.RawDeleteResponse{ + Error: "not implemented", + } + } + rawKV.RawDelete(req.GetKey()) + return &kvrpcpb.RawDeleteResponse{} +} + +func (h *rpcHandler) handleKvRawScan(req *kvrpcpb.RawScanRequest) *kvrpcpb.RawScanResponse { + rawKV, ok := h.mvccStore.(RawKV) + if !ok { + errStr := "not implemented" + return &kvrpcpb.RawScanResponse{ + RegionError: &errorpb.Error{ + Message: errStr, + }, + } + } + + var pairs []Pair + upperBound := h.endKey + pairs = rawKV.RawScan( + req.StartKey, + upperBound, + int(req.GetLimit()), + ) + + return &kvrpcpb.RawScanResponse{ + Kvs: convertToPbPairs(pairs), + } +} + +// RPCClient sends kv RPC calls to mock cluster. RPCClient mocks the behavior of +// a rpc client at tikv's side. +type RPCClient struct { + Cluster *Cluster + MvccStore MVCCStore + done chan struct{} +} + +// NewRPCClient creates an RPCClient. +// Note that close the RPCClient may close the underlying MvccStore. +func NewRPCClient(cluster *Cluster, mvccStore MVCCStore) *RPCClient { + done := make(chan struct{}) + return &RPCClient{ + Cluster: cluster, + MvccStore: mvccStore, + done: done, + } +} + +func (c *RPCClient) getAndCheckStoreByAddr(addr string) (*metapb.Store, error) { + store, err := c.Cluster.GetAndCheckStoreByAddr(addr) + if err != nil { + return nil, err + } + if store == nil { + return nil, errors.New("connect fail") + } + if store.GetState() == metapb.StoreState_Offline || + store.GetState() == metapb.StoreState_Tombstone { + return nil, errors.New("connection refused") + } + return store, nil +} + +func (c *RPCClient) checkArgs(ctx context.Context, addr string) (*rpcHandler, error) { + if err := checkGoContext(ctx); err != nil { + return nil, err + } + + store, err := c.getAndCheckStoreByAddr(addr) + if err != nil { + return nil, err + } + handler := &rpcHandler{ + cluster: c.Cluster, + mvccStore: c.MvccStore, + // set store id for current request + storeID: store.GetId(), + } + return handler, nil +} + +// SendRequest sends a request to mock cluster. +func (c *RPCClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) { + reqCtx := &req.Context + resp := &tikvrpc.Response{} + + handler, err := c.checkArgs(ctx, addr) + if err != nil { + return nil, err + } + switch req.Type { + case tikvrpc.CmdGet: + r := req.Get() + if err := handler.checkRequest(reqCtx, r.Size()); err != nil { + resp.Resp = &kvrpcpb.GetResponse{RegionError: err} + return resp, nil + } + resp.Resp = handler.handleKvGet(r) + case tikvrpc.CmdScan: + r := req.Scan() + if err := handler.checkRequest(reqCtx, r.Size()); err != nil { + resp.Resp = &kvrpcpb.ScanResponse{RegionError: err} + return resp, nil + } + resp.Resp = handler.handleKvScan(r) + + case tikvrpc.CmdPrewrite: + failpoint.Inject("rpcPrewriteResult", func(val failpoint.Value) { + switch val.(string) { + case "notLeader": + failpoint.Return(&tikvrpc.Response{ + Resp: &kvrpcpb.PrewriteResponse{RegionError: &errorpb.Error{NotLeader: &errorpb.NotLeader{}}}, + }, nil) + } + }) + + r := req.Prewrite() + if err := handler.checkRequest(reqCtx, r.Size()); err != nil { + resp.Resp = &kvrpcpb.PrewriteResponse{RegionError: err} + return resp, nil + } + resp.Resp = handler.handleKvPrewrite(r) + case tikvrpc.CmdCommit: + failpoint.Inject("rpcCommitResult", func(val failpoint.Value) { + switch val.(string) { + case "timeout": + failpoint.Return(nil, errors.New("timeout")) + case "notLeader": + failpoint.Return(&tikvrpc.Response{ + Resp: &kvrpcpb.CommitResponse{RegionError: &errorpb.Error{NotLeader: &errorpb.NotLeader{}}}, + }, nil) + case "keyError": + failpoint.Return(&tikvrpc.Response{ + Resp: &kvrpcpb.CommitResponse{Error: &kvrpcpb.KeyError{}}, + }, nil) + } + }) + + r := req.Commit() + if err := handler.checkRequest(reqCtx, r.Size()); err != nil { + resp.Resp = &kvrpcpb.CommitResponse{RegionError: err} + return resp, nil + } + resp.Resp = handler.handleKvCommit(r) + failpoint.Inject("rpcCommitTimeout", func(val failpoint.Value) { + if val.(bool) { + failpoint.Return(nil, undeterminedErr) + } + }) + case tikvrpc.CmdCheckTxnStatus: + r := req.CheckTxnStatus() + if err := handler.checkRequest(reqCtx, r.Size()); err != nil { + resp.Resp = &kvrpcpb.CheckTxnStatusResponse{RegionError: err} + return resp, nil + } + resp.Resp, err = handler.handleKvCheckTxnStatus(r) + return resp, err + case tikvrpc.CmdBatchRollback: + r := req.BatchRollback() + if err := handler.checkRequest(reqCtx, r.Size()); err != nil { + resp.Resp = &kvrpcpb.BatchRollbackResponse{RegionError: err} + return resp, nil + } + resp.Resp = handler.handleKvBatchRollback(r) + case tikvrpc.CmdResolveLock: + r := req.ResolveLock() + if err := handler.checkRequest(reqCtx, r.Size()); err != nil { + resp.Resp = &kvrpcpb.ResolveLockResponse{RegionError: err} + return resp, nil + } + resp.Resp = handler.handleKvResolveLock(r) + case tikvrpc.CmdRawGet: + r := req.RawGet() + if err := handler.checkRequest(reqCtx, r.Size()); err != nil { + resp.Resp = &kvrpcpb.RawGetResponse{RegionError: err} + return resp, nil + } + resp.Resp = handler.handleKvRawGet(r) + case tikvrpc.CmdRawPut: + r := req.RawPut() + if err := handler.checkRequest(reqCtx, r.Size()); err != nil { + resp.Resp = &kvrpcpb.RawPutResponse{RegionError: err} + return resp, nil + } + resp.Resp = handler.handleKvRawPut(r) + case tikvrpc.CmdRawDelete: + r := req.RawDelete() + if err := handler.checkRequest(reqCtx, r.Size()); err != nil { + resp.Resp = &kvrpcpb.RawDeleteResponse{RegionError: err} + return resp, nil + } + resp.Resp = handler.handleKvRawDelete(r) + case tikvrpc.CmdRawScan: + r := req.RawScan() + if err := handler.checkRequest(reqCtx, r.Size()); err != nil { + resp.Resp = &kvrpcpb.RawScanResponse{RegionError: err} + return resp, nil + } + resp.Resp = handler.handleKvRawScan(r) + case tikvrpc.CmdCop: + r := req.Cop() + if err := handler.checkRequestContext(reqCtx); err != nil { + resp.Resp = &coprocessor.Response{RegionError: err} + return resp, nil + } + handler.rawStartKey = MvccKey(handler.startKey).Raw() + handler.rawEndKey = MvccKey(handler.endKey).Raw() + var res *coprocessor.Response + switch r.GetTp() { + case kv.ReqTypeDAG: + res = handler.handleCopDAGRequest(r) + case kv.ReqTypeAnalyze: + res = handler.handleCopAnalyzeRequest(r) + default: + panic(fmt.Sprintf("unknown coprocessor request type: %v", r.GetTp())) + } + resp.Resp = res + default: + return nil, errors.Errorf("unsupported this request type %v", req.Type) + } + return resp, nil +} + +// Close closes the client. +func (c *RPCClient) Close() error { + close(c.done) + if raw, ok := c.MvccStore.(io.Closer); ok { + return raw.Close() + } + return nil +} diff --git a/store/mockstore/mocktikv/topn.go b/store/mockstore/mocktikv/topn.go new file mode 100644 index 0000000..3b2a5d9 --- /dev/null +++ b/store/mockstore/mocktikv/topn.go @@ -0,0 +1,139 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mocktikv + +import ( + "container/heap" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tipb/go-tipb" +) + +type sortRow struct { + key []types.Datum + data [][]byte +} + +// topNSorter implements sort.Interface. When all rows have been processed, the topNSorter will sort the whole data in heap. +type topNSorter struct { + orderByItems []*tipb.ByItem + rows []*sortRow + err error + sc *stmtctx.StatementContext +} + +func (t *topNSorter) Len() int { + return len(t.rows) +} + +func (t *topNSorter) Swap(i, j int) { + t.rows[i], t.rows[j] = t.rows[j], t.rows[i] +} + +func (t *topNSorter) Less(i, j int) bool { + for index, by := range t.orderByItems { + v1 := t.rows[i].key[index] + v2 := t.rows[j].key[index] + + ret, err := v1.CompareDatum(t.sc, &v2) + if err != nil { + t.err = errors.Trace(err) + return true + } + + if by.Desc { + ret = -ret + } + + if ret < 0 { + return true + } else if ret > 0 { + return false + } + } + + return false +} + +// topNHeap holds the top n elements using heap structure. It implements heap.Interface. +// When we insert a row, topNHeap will check if the row can become one of the top n element or not. +type topNHeap struct { + topNSorter + + // totalCount is equal to the limit count, which means the max size of heap. + totalCount int + // heapSize means the current size of this heap. + heapSize int +} + +func (t *topNHeap) Len() int { + return t.heapSize +} + +func (t *topNHeap) Push(x interface{}) { + t.rows = append(t.rows, x.(*sortRow)) + t.heapSize++ +} + +func (t *topNHeap) Pop() interface{} { + return nil +} + +func (t *topNHeap) Less(i, j int) bool { + for index, by := range t.orderByItems { + v1 := t.rows[i].key[index] + v2 := t.rows[j].key[index] + + ret, err := v1.CompareDatum(t.sc, &v2) + if err != nil { + t.err = errors.Trace(err) + return true + } + + if by.Desc { + ret = -ret + } + + if ret > 0 { + return true + } else if ret < 0 { + return false + } + } + + return false +} + +// tryToAddRow tries to add a row to heap. +// When this row is not less than any rows in heap, it will never become the top n element. +// Then this function returns false. +func (t *topNHeap) tryToAddRow(row *sortRow) bool { + success := false + if t.heapSize == t.totalCount { + t.rows = append(t.rows, row) + // When this row is less than the top element, it will replace it and adjust the heap structure. + if t.Less(0, t.heapSize) { + t.Swap(0, t.heapSize) + heap.Fix(t, 0) + success = true + } + t.rows = t.rows[:t.heapSize] + } else { + heap.Push(t, row) + success = true + } + return success +} diff --git a/store/mockstore/tikv.go b/store/mockstore/tikv.go new file mode 100644 index 0000000..6ab404f --- /dev/null +++ b/store/mockstore/tikv.go @@ -0,0 +1,99 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mockstore + +import ( + "net/url" + "strings" + + "github.com/pingcap-incubator/tinykv/scheduler/client" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/store/mockstore/mocktikv" + "github.com/pingcap/tidb/store/tikv" +) + +// MockDriver is in memory mock TiKV driver. +type MockDriver struct { +} + +// Open creates a MockTiKV storage. +func (d MockDriver) Open(path string) (kv.Storage, error) { + u, err := url.Parse(path) + if err != nil { + return nil, errors.Trace(err) + } + if !strings.EqualFold(u.Scheme, "mocktikv") { + return nil, errors.Errorf("Uri scheme expected(mocktikv) but found (%s)", u.Scheme) + } + + opts := []MockTiKVStoreOption{WithPath(u.Path)} + return NewMockTikvStore(opts...) +} + +type mockOptions struct { + cluster *mocktikv.Cluster + mvccStore mocktikv.MVCCStore + clientHijack func(tikv.Client) tikv.Client + pdClientHijack func(pd.Client) pd.Client + path string +} + +// MockTiKVStoreOption is used to control some behavior of mock tikv. +type MockTiKVStoreOption func(*mockOptions) + +// WithHijackClient hijacks KV client's behavior, makes it easy to simulate the network +// problem between TiDB and TiKV. +func WithHijackClient(wrap func(tikv.Client) tikv.Client) MockTiKVStoreOption { + return func(c *mockOptions) { + c.clientHijack = wrap + } +} + +// WithCluster provides the customized cluster. +func WithCluster(cluster *mocktikv.Cluster) MockTiKVStoreOption { + return func(c *mockOptions) { + c.cluster = cluster + } +} + +// WithMVCCStore provides the customized mvcc store. +func WithMVCCStore(store mocktikv.MVCCStore) MockTiKVStoreOption { + return func(c *mockOptions) { + c.mvccStore = store + } +} + +// WithPath specifies the mocktikv path. +func WithPath(path string) MockTiKVStoreOption { + return func(c *mockOptions) { + c.path = path + } +} + +// NewMockTikvStore creates a mocked tikv store, the path is the file path to store the data. +// If path is an empty string, a memory storage will be created. +func NewMockTikvStore(options ...MockTiKVStoreOption) (kv.Storage, error) { + var opt mockOptions + for _, f := range options { + f(&opt) + } + + client, pdClient, err := mocktikv.NewTiKVAndPDClient(opt.cluster, opt.mvccStore, opt.path) + if err != nil { + return nil, errors.Trace(err) + } + + return tikv.NewTestTiKVStore(client, pdClient, opt.clientHijack, opt.pdClientHijack) +} diff --git a/store/store.go b/store/store.go new file mode 100644 index 0000000..48c6a58 --- /dev/null +++ b/store/store.go @@ -0,0 +1,79 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package store + +import ( + "net/url" + "strings" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/util" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +var stores = make(map[string]kv.Driver) + +// Register registers a kv storage with unique name and its associated Driver. +func Register(name string, driver kv.Driver) error { + name = strings.ToLower(name) + + if _, ok := stores[name]; ok { + return errors.Errorf("%s is already registered", name) + } + + stores[name] = driver + return nil +} + +// New creates a kv Storage with path. +// +// The path must be a URL format 'engine://path?params' like the one for +// session.Open() but with the dbname cut off. +// Examples: +// goleveldb://relative/path +// boltdb:///absolute/path +// +// The engine should be registered before creating storage. +func New(path string) (kv.Storage, error) { + return newStoreWithRetry(path, util.DefaultMaxRetries) +} + +func newStoreWithRetry(path string, maxRetries int) (kv.Storage, error) { + storeURL, err := url.Parse(path) + if err != nil { + return nil, err + } + + name := strings.ToLower(storeURL.Scheme) + d, ok := stores[name] + if !ok { + return nil, errors.Errorf("invalid uri format, storage %s is not registered", name) + } + + var s kv.Storage + err = util.RunWithRetry(maxRetries, util.RetryInterval, func() (bool, error) { + logutil.BgLogger().Info("new store", zap.String("path", path)) + s, err = d.Open(path) + return kv.IsTxnRetryableError(err), err + }) + + if err == nil { + logutil.BgLogger().Info("new store with retry success") + } else { + logutil.BgLogger().Warn("new store with retry failed", zap.Error(err)) + } + return s, errors.Trace(err) +} diff --git a/store/store_test.go b/store/store_test.go new file mode 100644 index 0000000..3faf49b --- /dev/null +++ b/store/store_test.go @@ -0,0 +1,687 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package store + +import ( + "context" + "fmt" + "os" + "strconv" + "sync" + "sync/atomic" + "testing" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/testleak" +) + +const ( + startIndex = 0 + testCount = 2 + indexStep = 2 +) + +type brokenStore struct{} + +func (s *brokenStore) Open(schema string) (kv.Storage, error) { + return nil, kv.ErrTxnRetryable +} + +func TestT(t *testing.T) { + CustomVerboseFlag = true + logLevel := os.Getenv("log_level") + logutil.InitLogger(logutil.NewLogConfig(logLevel, logutil.DefaultLogFormat, logutil.EmptyFileLogConfig, false)) + TestingT(t) +} + +var _ = Suite(&testKVSuite{}) + +type testKVSuite struct { + s kv.Storage +} + +func (s *testKVSuite) SetUpSuite(c *C) { + testleak.BeforeTest() + store, err := mockstore.NewMockTikvStore() + c.Assert(err, IsNil) + s.s = store +} + +func (s *testKVSuite) TearDownSuite(c *C) { + err := s.s.Close() + c.Assert(err, IsNil) + testleak.AfterTest(c)() +} + +func insertData(c *C, txn kv.Transaction) { + for i := startIndex; i < testCount; i++ { + val := encodeInt(i * indexStep) + err := txn.Set(val, val) + c.Assert(err, IsNil) + } +} + +func mustDel(c *C, txn kv.Transaction) { + for i := startIndex; i < testCount; i++ { + val := encodeInt(i * indexStep) + err := txn.Delete(val) + c.Assert(err, IsNil) + } +} + +func encodeInt(n int) []byte { + return []byte(fmt.Sprintf("%010d", n)) +} + +func decodeInt(s []byte) int { + var n int + fmt.Sscanf(string(s), "%010d", &n) + return n +} + +func valToStr(c *C, iter kv.Iterator) string { + val := iter.Value() + return string(val) +} + +func checkSeek(c *C, txn kv.Transaction) { + for i := startIndex; i < testCount; i++ { + val := encodeInt(i * indexStep) + iter, err := txn.Iter(val, nil) + c.Assert(err, IsNil) + c.Assert([]byte(iter.Key()), BytesEquals, val) + c.Assert(decodeInt([]byte(valToStr(c, iter))), Equals, i*indexStep) + iter.Close() + } + + // Test iterator Next() + for i := startIndex; i < testCount-1; i++ { + val := encodeInt(i * indexStep) + iter, err := txn.Iter(val, nil) + c.Assert(err, IsNil) + c.Assert([]byte(iter.Key()), BytesEquals, val) + c.Assert(valToStr(c, iter), Equals, string(val)) + + err = iter.Next() + c.Assert(err, IsNil) + c.Assert(iter.Valid(), IsTrue) + + val = encodeInt((i + 1) * indexStep) + c.Assert([]byte(iter.Key()), BytesEquals, val) + c.Assert(valToStr(c, iter), Equals, string(val)) + iter.Close() + } + + // Non exist and beyond maximum seek test + iter, err := txn.Iter(encodeInt(testCount*indexStep), nil) + c.Assert(err, IsNil) + c.Assert(iter.Valid(), IsFalse) + + // Non exist but between existing keys seek test, + // it returns the smallest key that larger than the one we are seeking + inBetween := encodeInt((testCount-1)*indexStep - 1) + last := encodeInt((testCount - 1) * indexStep) + iter, err = txn.Iter(inBetween, nil) + c.Assert(err, IsNil) + c.Assert(iter.Valid(), IsTrue) + c.Assert([]byte(iter.Key()), Not(BytesEquals), inBetween) + c.Assert([]byte(iter.Key()), BytesEquals, last) + iter.Close() +} + +func mustNotGet(c *C, txn kv.Transaction) { + for i := startIndex; i < testCount; i++ { + s := encodeInt(i * indexStep) + _, err := txn.Get(context.TODO(), s) + c.Assert(err, NotNil) + } +} + +func mustGet(c *C, txn kv.Transaction) { + for i := startIndex; i < testCount; i++ { + s := encodeInt(i * indexStep) + val, err := txn.Get(context.TODO(), s) + c.Assert(err, IsNil) + c.Assert(string(val), Equals, string(s)) + } +} + +func (s *testKVSuite) TestNew(c *C) { + store, err := New("goleveldb://relative/path") + c.Assert(err, NotNil) + c.Assert(store, IsNil) +} + +func (s *testKVSuite) TestGetSet(c *C) { + txn, err := s.s.Begin() + c.Assert(err, IsNil) + + insertData(c, txn) + + mustGet(c, txn) + + // Check transaction results + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + + txn, err = s.s.Begin() + c.Assert(err, IsNil) + defer txn.Commit(context.Background()) + + mustGet(c, txn) + mustDel(c, txn) +} + +func (s *testKVSuite) TestSeek(c *C) { + txn, err := s.s.Begin() + c.Assert(err, IsNil) + + insertData(c, txn) + checkSeek(c, txn) + + // Check transaction results + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + + txn, err = s.s.Begin() + c.Assert(err, IsNil) + defer txn.Commit(context.Background()) + + checkSeek(c, txn) + mustDel(c, txn) +} + +func (s *testKVSuite) TestInc(c *C) { + txn, err := s.s.Begin() + c.Assert(err, IsNil) + + key := []byte("incKey") + n, err := kv.IncInt64(txn, key, 100) + c.Assert(err, IsNil) + c.Assert(n, Equals, int64(100)) + + // Check transaction results + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + + txn, err = s.s.Begin() + c.Assert(err, IsNil) + + n, err = kv.IncInt64(txn, key, -200) + c.Assert(err, IsNil) + c.Assert(n, Equals, int64(-100)) + + err = txn.Delete(key) + c.Assert(err, IsNil) + + n, err = kv.IncInt64(txn, key, 100) + c.Assert(err, IsNil) + c.Assert(n, Equals, int64(100)) + + err = txn.Delete(key) + c.Assert(err, IsNil) + + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) +} + +func (s *testKVSuite) TestDelete(c *C) { + txn, err := s.s.Begin() + c.Assert(err, IsNil) + + insertData(c, txn) + + mustDel(c, txn) + + mustNotGet(c, txn) + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + + // Try get + txn, err = s.s.Begin() + c.Assert(err, IsNil) + + mustNotGet(c, txn) + + // Insert again + insertData(c, txn) + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + + // Delete all + txn, err = s.s.Begin() + c.Assert(err, IsNil) + + mustDel(c, txn) + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + + txn, err = s.s.Begin() + c.Assert(err, IsNil) + + mustNotGet(c, txn) + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) +} + +func (s *testKVSuite) TestDelete2(c *C) { + txn, err := s.s.Begin() + c.Assert(err, IsNil) + val := []byte("test") + txn.Set([]byte("DATA_test_tbl_department_record__0000000001_0003"), val) + txn.Set([]byte("DATA_test_tbl_department_record__0000000001_0004"), val) + txn.Set([]byte("DATA_test_tbl_department_record__0000000002_0003"), val) + txn.Set([]byte("DATA_test_tbl_department_record__0000000002_0004"), val) + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + + // Delete all + txn, err = s.s.Begin() + c.Assert(err, IsNil) + + it, err := txn.Iter([]byte("DATA_test_tbl_department_record__0000000001_0003"), nil) + c.Assert(err, IsNil) + for it.Valid() { + err = txn.Delete([]byte(it.Key())) + c.Assert(err, IsNil) + err = it.Next() + c.Assert(err, IsNil) + } + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + + txn, err = s.s.Begin() + c.Assert(err, IsNil) + it, _ = txn.Iter([]byte("DATA_test_tbl_department_record__000000000"), nil) + c.Assert(it.Valid(), IsFalse) + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) +} + +func (s *testKVSuite) TestSetNil(c *C) { + txn, err := s.s.Begin() + defer txn.Commit(context.Background()) + c.Assert(err, IsNil) + err = txn.Set([]byte("1"), nil) + c.Assert(err, NotNil) +} + +func (s *testKVSuite) TestBasicSeek(c *C) { + txn, err := s.s.Begin() + c.Assert(err, IsNil) + txn.Set([]byte("1"), []byte("1")) + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + txn, err = s.s.Begin() + c.Assert(err, IsNil) + defer txn.Commit(context.Background()) + + it, err := txn.Iter([]byte("2"), nil) + c.Assert(err, IsNil) + c.Assert(it.Valid(), Equals, false) + txn.Delete([]byte("1")) +} + +func (s *testKVSuite) TestBasicTable(c *C) { + txn, err := s.s.Begin() + c.Assert(err, IsNil) + for i := 1; i < 5; i++ { + b := []byte(strconv.Itoa(i)) + txn.Set(b, b) + } + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + txn, err = s.s.Begin() + c.Assert(err, IsNil) + defer txn.Commit(context.Background()) + + err = txn.Set([]byte("1"), []byte("1")) + c.Assert(err, IsNil) + + it, err := txn.Iter([]byte("0"), nil) + c.Assert(err, IsNil) + c.Assert(string(it.Key()), Equals, "1") + + err = txn.Set([]byte("0"), []byte("0")) + c.Assert(err, IsNil) + it, err = txn.Iter([]byte("0"), nil) + c.Assert(err, IsNil) + c.Assert(string(it.Key()), Equals, "0") + err = txn.Delete([]byte("0")) + c.Assert(err, IsNil) + + txn.Delete([]byte("1")) + it, err = txn.Iter([]byte("0"), nil) + c.Assert(err, IsNil) + c.Assert(string(it.Key()), Equals, "2") + + err = txn.Delete([]byte("3")) + c.Assert(err, IsNil) + it, err = txn.Iter([]byte("2"), nil) + c.Assert(err, IsNil) + c.Assert(string(it.Key()), Equals, "2") + + it, err = txn.Iter([]byte("3"), nil) + c.Assert(err, IsNil) + c.Assert(string(it.Key()), Equals, "4") + err = txn.Delete([]byte("2")) + c.Assert(err, IsNil) + err = txn.Delete([]byte("4")) + c.Assert(err, IsNil) +} + +func (s *testKVSuite) TestRollback(c *C) { + txn, err := s.s.Begin() + c.Assert(err, IsNil) + + err = txn.Rollback() + c.Assert(err, IsNil) + + txn, err = s.s.Begin() + c.Assert(err, IsNil) + + insertData(c, txn) + + mustGet(c, txn) + + err = txn.Rollback() + c.Assert(err, IsNil) + + txn, err = s.s.Begin() + c.Assert(err, IsNil) + defer txn.Commit(context.Background()) + + for i := startIndex; i < testCount; i++ { + _, err := txn.Get(context.TODO(), []byte(strconv.Itoa(i))) + c.Assert(err, NotNil) + } +} + +func (s *testKVSuite) TestSeekMin(c *C) { + rows := []struct { + key string + value string + }{ + {"DATA_test_main_db_tbl_tbl_test_record__00000000000000000001", "lock-version"}, + {"DATA_test_main_db_tbl_tbl_test_record__00000000000000000001_0002", "1"}, + {"DATA_test_main_db_tbl_tbl_test_record__00000000000000000001_0003", "hello"}, + {"DATA_test_main_db_tbl_tbl_test_record__00000000000000000002", "lock-version"}, + {"DATA_test_main_db_tbl_tbl_test_record__00000000000000000002_0002", "2"}, + {"DATA_test_main_db_tbl_tbl_test_record__00000000000000000002_0003", "hello"}, + } + + txn, err := s.s.Begin() + c.Assert(err, IsNil) + for _, row := range rows { + txn.Set([]byte(row.key), []byte(row.value)) + } + + it, err := txn.Iter(nil, nil) + c.Assert(err, IsNil) + for it.Valid() { + fmt.Printf("%s, %s\n", it.Key(), it.Value()) + it.Next() + } + + it, err = txn.Iter([]byte("DATA_test_main_db_tbl_tbl_test_record__00000000000000000000"), nil) + c.Assert(err, IsNil) + c.Assert(string(it.Key()), Equals, "DATA_test_main_db_tbl_tbl_test_record__00000000000000000001") + + for _, row := range rows { + txn.Delete([]byte(row.key)) + } +} + +func (s *testKVSuite) TestConditionIfNotExist(c *C) { + var success int64 + cnt := 100 + b := []byte("1") + var wg sync.WaitGroup + wg.Add(cnt) + for i := 0; i < cnt; i++ { + go func() { + defer wg.Done() + txn, err := s.s.Begin() + c.Assert(err, IsNil) + err = txn.Set(b, b) + if err != nil { + return + } + err = txn.Commit(context.Background()) + if err == nil { + atomic.AddInt64(&success, 1) + } + }() + } + wg.Wait() + // At least one txn can success. + c.Assert(success, Greater, int64(0)) + + // Clean up + txn, err := s.s.Begin() + c.Assert(err, IsNil) + err = txn.Delete(b) + c.Assert(err, IsNil) + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) +} + +func (s *testKVSuite) TestConditionIfEqual(c *C) { + var success int64 + cnt := 100 + b := []byte("1") + var wg sync.WaitGroup + wg.Add(cnt) + + txn, err := s.s.Begin() + c.Assert(err, IsNil) + txn.Set(b, b) + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + + for i := 0; i < cnt; i++ { + go func() { + defer wg.Done() + // Use txn1/err1 instead of txn/err is + // to pass `go tool vet -shadow` check. + txn1, err1 := s.s.Begin() + c.Assert(err1, IsNil) + txn1.Set(b, []byte("newValue")) + err1 = txn1.Commit(context.Background()) + if err1 == nil { + atomic.AddInt64(&success, 1) + } + }() + } + wg.Wait() + c.Assert(success, Greater, int64(0)) + + // Clean up + txn, err = s.s.Begin() + c.Assert(err, IsNil) + err = txn.Delete(b) + c.Assert(err, IsNil) + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) +} + +func (s *testKVSuite) TestConditionUpdate(c *C) { + txn, err := s.s.Begin() + c.Assert(err, IsNil) + txn.Delete([]byte("b")) + kv.IncInt64(txn, []byte("a"), 1) + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) +} + +func (s *testKVSuite) TestDBClose(c *C) { + c.Skip("don't know why it fails.") + store, err := mockstore.NewMockTikvStore() + c.Assert(err, IsNil) + + txn, err := store.Begin() + c.Assert(err, IsNil) + + err = txn.Set([]byte("a"), []byte("b")) + c.Assert(err, IsNil) + + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + + ver, err := store.CurrentVersion() + c.Assert(err, IsNil) + c.Assert(kv.MaxVersion.Cmp(ver), Equals, 1) + + snap, err := store.GetSnapshot(kv.MaxVersion) + c.Assert(err, IsNil) + + _, err = snap.Get(context.TODO(), []byte("a")) + c.Assert(err, IsNil) + + txn, err = store.Begin() + c.Assert(err, IsNil) + + err = store.Close() + c.Assert(err, IsNil) + + _, err = store.Begin() + c.Assert(err, NotNil) + + _, err = store.GetSnapshot(kv.MaxVersion) + c.Assert(err, NotNil) + + err = txn.Set([]byte("a"), []byte("b")) + c.Assert(err, IsNil) + + err = txn.Commit(context.Background()) + c.Assert(err, NotNil) +} + +func (s *testKVSuite) TestIsolationInc(c *C) { + threadCnt := 4 + + ids := make(map[int64]struct{}, threadCnt*100) + var m sync.Mutex + var wg sync.WaitGroup + + wg.Add(threadCnt) + for i := 0; i < threadCnt; i++ { + go func() { + defer wg.Done() + for j := 0; j < 100; j++ { + var id int64 + err := kv.RunInNewTxn(s.s, true, func(txn kv.Transaction) error { + var err1 error + id, err1 = kv.IncInt64(txn, []byte("key"), 1) + return err1 + }) + c.Assert(err, IsNil) + + m.Lock() + _, ok := ids[id] + ids[id] = struct{}{} + m.Unlock() + c.Assert(ok, IsFalse) + } + }() + } + + wg.Wait() + + // delete + txn, err := s.s.Begin() + c.Assert(err, IsNil) + defer txn.Commit(context.Background()) + txn.Delete([]byte("key")) +} + +func (s *testKVSuite) TestIsolationMultiInc(c *C) { + threadCnt := 4 + incCnt := 100 + keyCnt := 4 + + keys := make([][]byte, 0, keyCnt) + for i := 0; i < keyCnt; i++ { + keys = append(keys, []byte(fmt.Sprintf("test_key_%d", i))) + } + + var wg sync.WaitGroup + + wg.Add(threadCnt) + for i := 0; i < threadCnt; i++ { + go func() { + defer wg.Done() + for j := 0; j < incCnt; j++ { + err := kv.RunInNewTxn(s.s, true, func(txn kv.Transaction) error { + for _, key := range keys { + _, err1 := kv.IncInt64(txn, key, 1) + if err1 != nil { + return err1 + } + } + + return nil + }) + c.Assert(err, IsNil) + } + }() + } + + wg.Wait() + + err := kv.RunInNewTxn(s.s, false, func(txn kv.Transaction) error { + for _, key := range keys { + id, err1 := kv.GetInt64(context.TODO(), txn, key) + if err1 != nil { + return err1 + } + c.Assert(id, Equals, int64(threadCnt*incCnt)) + txn.Delete(key) + } + return nil + }) + c.Assert(err, IsNil) +} + +func (s *testKVSuite) TestRetryOpenStore(c *C) { + begin := time.Now() + Register("dummy", &brokenStore{}) + store, err := newStoreWithRetry("dummy://dummy-store", 3) + if store != nil { + defer store.Close() + } + c.Assert(err, NotNil) + elapse := time.Since(begin) + c.Assert(uint64(elapse), GreaterEqual, uint64(3*time.Second), Commentf("elapse: %s", elapse)) +} + +func (s *testKVSuite) TestOpenStore(c *C) { + Register("open", &brokenStore{}) + store, err := newStoreWithRetry(":", 3) + if store != nil { + defer store.Close() + } + c.Assert(err, NotNil) +} + +func (s *testKVSuite) TestRegister(c *C) { + err := Register("retry", &brokenStore{}) + c.Assert(err, IsNil) + err = Register("retry", &brokenStore{}) + c.Assert(err, NotNil) +} diff --git a/store/tikv/2pc.go b/store/tikv/2pc.go new file mode 100644 index 0000000..b140f1c --- /dev/null +++ b/store/tikv/2pc.go @@ -0,0 +1,764 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv + +import ( + "bytes" + "context" + "math" + "sync" + "time" + + pb "github.com/pingcap-incubator/tinykv/proto/pkg/kvrpcpb" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/terror" + + "github.com/pingcap/tidb/store/tikv/tikvrpc" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +type twoPhaseCommitAction interface { + handleSingleBatch(*twoPhaseCommitter, *Backoffer, batchKeys) error + String() string +} + +type actionPrewrite struct{} +type actionCommit struct{} +type actionCleanup struct{} + +var ( + _ twoPhaseCommitAction = actionPrewrite{} + _ twoPhaseCommitAction = actionCommit{} + _ twoPhaseCommitAction = actionCleanup{} +) + +// Global variable set by config file. +var ( + ManagedLockTTL uint64 = 20000 // 20s +) + +func (actionPrewrite) String() string { + return "prewrite" +} + +func (actionCommit) String() string { + return "commit" +} + +func (actionCleanup) String() string { + return "cleanup" +} + +// twoPhaseCommitter executes a two-phase commit protocol. +type twoPhaseCommitter struct { + store *tikvStore + txn *tikvTxn + startTS uint64 + keys [][]byte + mutations map[string]*mutationEx + lockTTL uint64 + commitTS uint64 + connID uint64 // connID is used for log. + cleanWg sync.WaitGroup + txnSize int + + primaryKey []byte + + mu struct { + sync.RWMutex + undeterminedErr error // undeterminedErr saves the rpc error we encounter when commit primary key. + committed bool + } + // regionTxnSize stores the number of keys involved in each region + regionTxnSize map[uint64]int +} + +// batchExecutor is txn controller providing rate control like utils +type batchExecutor struct { + rateLim int // concurrent worker numbers + rateLimiter *rateLimit // rate limiter for concurrency control, maybe more strategies + committer *twoPhaseCommitter // here maybe more different type committer in the future + action twoPhaseCommitAction // the work action type + backoffer *Backoffer // Backoffer + tokenWaitDuration time.Duration // get token wait time +} + +type mutationEx struct { + pb.Mutation +} + +// newTwoPhaseCommitter creates a twoPhaseCommitter. +func newTwoPhaseCommitter(txn *tikvTxn, connID uint64) (*twoPhaseCommitter, error) { + return &twoPhaseCommitter{ + store: txn.store, + txn: txn, + startTS: txn.StartTS(), + connID: connID, + regionTxnSize: map[uint64]int{}, + }, nil +} + +func (c *twoPhaseCommitter) initKeysAndMutations() error { + var ( + keys [][]byte + size int + putCnt int + delCnt int + lockCnt int + ) + mutations := make(map[string]*mutationEx) + txn := c.txn + err := txn.us.WalkBuffer(func(k kv.Key, v []byte) error { + if len(v) > 0 { + if tablecodec.IsUntouchedIndexKValue(k, v) { + return nil + } + op := pb.Op_Put + mutations[string(k)] = &mutationEx{ + Mutation: pb.Mutation{ + Op: op, + Key: k, + Value: v, + }, + } + putCnt++ + } else { + mutations[string(k)] = &mutationEx{ + Mutation: pb.Mutation{ + Op: pb.Op_Del, + Key: k, + }, + } + delCnt++ + } + keys = append(keys, k) + entrySize := len(k) + len(v) + if entrySize > kv.TxnEntrySizeLimit { + return kv.ErrEntryTooLarge.GenWithStackByArgs(kv.TxnEntrySizeLimit, entrySize) + } + size += entrySize + return nil + }) + if err != nil { + return errors.Trace(err) + } + for _, lockKey := range txn.lockKeys { + _, ok := mutations[string(lockKey)] + if !ok { + mutations[string(lockKey)] = &mutationEx{ + Mutation: pb.Mutation{ + Op: pb.Op_Lock, + Key: lockKey, + }, + } + lockCnt++ + keys = append(keys, lockKey) + size += len(lockKey) + } + } + if len(keys) == 0 { + return nil + } + c.txnSize = size + + if size > int(kv.TxnTotalSizeLimit) { + return kv.ErrTxnTooLarge.GenWithStackByArgs(size) + } + const logEntryCount = 10000 + const logSize = 4 * 1024 * 1024 // 4MB + if len(keys) > logEntryCount || size > logSize { + tableID := tablecodec.DecodeTableID(keys[0]) + logutil.BgLogger().Info("[BIG_TXN]", + zap.Uint64("con", c.connID), + zap.Int64("table ID", tableID), + zap.Int("size", size), + zap.Int("keys", len(keys)), + zap.Int("puts", putCnt), + zap.Int("dels", delCnt), + zap.Int("locks", lockCnt), + zap.Uint64("txnStartTS", txn.startTS)) + } + + // Sanity check for startTS. + if txn.StartTS() == math.MaxUint64 { + err = errors.Errorf("try to commit with invalid txnStartTS: %d", txn.StartTS()) + logutil.BgLogger().Error("commit failed", + zap.Uint64("conn", c.connID), + zap.Error(err)) + return errors.Trace(err) + } + + c.keys = keys + c.mutations = mutations + c.lockTTL = txnLockTTL(txn.startTime, size) + return nil +} + +func (c *twoPhaseCommitter) primary() []byte { + if len(c.primaryKey) == 0 { + return c.keys[0] + } + return c.primaryKey +} + +const bytesPerMiB = 1024 * 1024 + +func txnLockTTL(startTime time.Time, txnSize int) uint64 { + // Increase lockTTL for large transactions. + // The formula is `ttl = ttlFactor * sqrt(sizeInMiB)`. + // When writeSize is less than 256KB, the base ttl is defaultTTL (3s); + // When writeSize is 1MiB, 100MiB, or 400MiB, ttl is 6s, 60s, 120s correspondingly; + lockTTL := defaultLockTTL + if txnSize >= txnCommitBatchSize { + sizeMiB := float64(txnSize) / bytesPerMiB + lockTTL = uint64(float64(ttlFactor) * math.Sqrt(sizeMiB)) + if lockTTL < defaultLockTTL { + lockTTL = defaultLockTTL + } + if lockTTL > maxLockTTL { + lockTTL = maxLockTTL + } + } + + // Increase lockTTL by the transaction's read time. + // When resolving a lock, we compare current ts and startTS+lockTTL to decide whether to clean up. If a txn + // takes a long time to read, increasing its TTL will help to prevent it from been aborted soon after prewrite. + elapsed := time.Since(startTime) / time.Millisecond + return lockTTL + uint64(elapsed) +} + +// doActionOnKeys groups keys into primary batch and secondary batches, if primary batch exists in the key, +// it does action on primary batch first, then on secondary batches. If action is commit, secondary batches +// is done in background goroutine. +func (c *twoPhaseCommitter) doActionOnKeys(bo *Backoffer, action twoPhaseCommitAction, keys [][]byte) error { + if len(keys) == 0 { + return nil + } + groups, firstRegion, err := c.store.regionCache.GroupKeysByRegion(bo, keys, nil) + if err != nil { + return errors.Trace(err) + } + + var batches []batchKeys + var sizeFunc = c.keySize + if _, ok := action.(actionPrewrite); ok { + // Do not update regionTxnSize on retries. They are not used when building a PrewriteRequest. + if len(bo.errors) == 0 { + for region, keys := range groups { + c.regionTxnSize[region.id] = len(keys) + } + } + sizeFunc = c.keyValueSize + } + // Make sure the group that contains primary key goes first. + batches = appendBatchBySize(batches, firstRegion, groups[firstRegion], sizeFunc, txnCommitBatchSize) + delete(groups, firstRegion) + for id, g := range groups { + batches = appendBatchBySize(batches, id, g, sizeFunc, txnCommitBatchSize) + } + + firstIsPrimary := bytes.Equal(keys[0], c.primary()) + _, actionIsCommit := action.(actionCommit) + _, actionIsCleanup := action.(actionCleanup) + if firstIsPrimary && (actionIsCommit || actionIsCleanup) { + // primary should be committed/cleanup first + err = c.doActionOnBatches(bo, action, batches[:1]) + if err != nil { + return errors.Trace(err) + } + batches = batches[1:] + } + if actionIsCommit { + // Commit secondary batches in background goroutine to reduce latency. + // The backoffer instance is created outside of the goroutine to avoid + // potential data race in unit test since `CommitMaxBackoff` will be updated + // by test suites. + secondaryBo := NewBackoffer(context.Background(), CommitMaxBackoff).WithVars(c.txn.vars) + go func() { + e := c.doActionOnBatches(secondaryBo, action, batches) + if e != nil { + logutil.BgLogger().Debug("2PC async doActionOnBatches", + zap.Uint64("conn", c.connID), + zap.Stringer("action type", action), + zap.Error(e)) + } + }() + } else { + err = c.doActionOnBatches(bo, action, batches) + } + return errors.Trace(err) +} + +// doActionOnBatches does action to batches in parallel. +func (c *twoPhaseCommitter) doActionOnBatches(bo *Backoffer, action twoPhaseCommitAction, batches []batchKeys) error { + if len(batches) == 0 { + return nil + } + + if len(batches) == 1 { + e := action.handleSingleBatch(c, bo, batches[0]) + if e != nil { + logutil.BgLogger().Debug("2PC doActionOnBatches failed", + zap.Uint64("conn", c.connID), + zap.Stringer("action type", action), + zap.Error(e), + zap.Uint64("txnStartTS", c.startTS)) + } + return errors.Trace(e) + } + rateLim := len(batches) + // Set rateLim here for the large transaction. + // If the rate limit is too high, tikv will report service is busy. + // If the rate limit is too low, we can't full utilize the tikv's throughput. + // TODO: Find a self-adaptive way to control the rate limit here. + if rateLim > 32 { + rateLim = 32 + } + batchExecutor := newBatchExecutor(rateLim, c, action, bo) + err := batchExecutor.process(batches) + return errors.Trace(err) +} + +func (c *twoPhaseCommitter) keyValueSize(key []byte) int { + size := len(key) + if mutation := c.mutations[string(key)]; mutation != nil { + size += len(mutation.Value) + } + return size +} + +func (c *twoPhaseCommitter) keySize(key []byte) int { + return len(key) +} + +func (c *twoPhaseCommitter) buildPrewriteRequest(batch batchKeys) *tikvrpc.Request { + mutations := make([]*pb.Mutation, len(batch.keys)) + for i, k := range batch.keys { + tmp := c.mutations[string(k)] + mutations[i] = &tmp.Mutation + } + + req := &pb.PrewriteRequest{ + Mutations: mutations, + PrimaryLock: c.primary(), + StartVersion: c.startTS, + LockTtl: c.lockTTL, + } + return tikvrpc.NewRequest(tikvrpc.CmdPrewrite, req, pb.Context{}) +} + +func (actionPrewrite) handleSingleBatch(c *twoPhaseCommitter, bo *Backoffer, batch batchKeys) error { + req := c.buildPrewriteRequest(batch) + for { + resp, err := c.store.SendReq(bo, req, batch.region, readTimeoutShort) + if err != nil { + return errors.Trace(err) + } + regionErr, err := resp.GetRegionError() + if err != nil { + return errors.Trace(err) + } + if regionErr != nil { + err = bo.Backoff(BoRegionMiss, errors.New(regionErr.String())) + if err != nil { + return errors.Trace(err) + } + err = c.prewriteKeys(bo, batch.keys) + return errors.Trace(err) + } + if resp.Resp == nil { + return errors.Trace(ErrBodyMissing) + } + prewriteResp := resp.Resp.(*pb.PrewriteResponse) + keyErrs := prewriteResp.GetErrors() + if len(keyErrs) == 0 { + return nil + } + var locks []*Lock + for _, keyErr := range keyErrs { + // Extract lock from key error + lock, err1 := extractLockFromKeyErr(keyErr) + if err1 != nil { + return errors.Trace(err1) + } + logutil.BgLogger().Debug("prewrite encounters lock", + zap.Uint64("conn", c.connID), + zap.Stringer("lock", lock)) + locks = append(locks, lock) + } + // Set callerStartTS to 0 so as not to update minCommitTS. + msBeforeExpired, _, err := c.store.lockResolver.ResolveLocks(bo, 0, locks) + if err != nil { + return errors.Trace(err) + } + if msBeforeExpired > 0 { + err = bo.BackoffWithMaxSleep(BoTxnLock, int(msBeforeExpired), errors.Errorf("2PC prewrite lockedKeys: %d", len(locks))) + if err != nil { + return errors.Trace(err) + } + } + } +} + +func (c *twoPhaseCommitter) setUndeterminedErr(err error) { + c.mu.Lock() + defer c.mu.Unlock() + c.mu.undeterminedErr = err +} + +func (c *twoPhaseCommitter) getUndeterminedErr() error { + c.mu.RLock() + defer c.mu.RUnlock() + return c.mu.undeterminedErr +} + +func (actionCommit) handleSingleBatch(c *twoPhaseCommitter, bo *Backoffer, batch batchKeys) error { + req := tikvrpc.NewRequest(tikvrpc.CmdCommit, &pb.CommitRequest{ + StartVersion: c.startTS, + Keys: batch.keys, + CommitVersion: c.commitTS, + }, pb.Context{}) + + sender := NewRegionRequestSender(c.store.regionCache, c.store.client) + resp, err := sender.SendReq(bo, req, batch.region, readTimeoutShort) + + // If we fail to receive response for the request that commits primary key, it will be undetermined whether this + // transaction has been successfully committed. + // Under this circumstance, we can not declare the commit is complete (may lead to data lost), nor can we throw + // an error (may lead to the duplicated key error when upper level restarts the transaction). Currently the best + // solution is to populate this error and let upper layer drop the connection to the corresponding mysql client. + isPrimary := bytes.Equal(batch.keys[0], c.primary()) + if isPrimary && sender.rpcError != nil { + c.setUndeterminedErr(errors.Trace(sender.rpcError)) + } + + if err != nil { + return errors.Trace(err) + } + regionErr, err := resp.GetRegionError() + if err != nil { + return errors.Trace(err) + } + if regionErr != nil { + err = bo.Backoff(BoRegionMiss, errors.New(regionErr.String())) + if err != nil { + return errors.Trace(err) + } + // re-split keys and commit again. + err = c.commitKeys(bo, batch.keys) + return errors.Trace(err) + } + if resp.Resp == nil { + return errors.Trace(ErrBodyMissing) + } + commitResp := resp.Resp.(*pb.CommitResponse) + // Here we can make sure tikv has processed the commit primary key request. So + // we can clean undetermined error. + if isPrimary { + c.setUndeterminedErr(nil) + } + if keyErr := commitResp.GetError(); keyErr != nil { + c.mu.RLock() + defer c.mu.RUnlock() + err = extractKeyErr(keyErr) + if c.mu.committed { + // No secondary key could be rolled back after it's primary key is committed. + // There must be a serious bug somewhere. + logutil.BgLogger().Error("2PC failed commit key after primary key committed", + zap.Error(err), + zap.Uint64("txnStartTS", c.startTS)) + return errors.Trace(err) + } + // The transaction maybe rolled back by concurrent transactions. + logutil.BgLogger().Debug("2PC failed commit primary key", + zap.Error(err), + zap.Uint64("txnStartTS", c.startTS)) + return err + } + + c.mu.Lock() + defer c.mu.Unlock() + // Group that contains primary key is always the first. + // We mark transaction's status committed when we receive the first success response. + c.mu.committed = true + return nil +} + +func (actionCleanup) handleSingleBatch(c *twoPhaseCommitter, bo *Backoffer, batch batchKeys) error { + req := tikvrpc.NewRequest(tikvrpc.CmdBatchRollback, &pb.BatchRollbackRequest{ + Keys: batch.keys, + StartVersion: c.startTS, + }, pb.Context{}) + resp, err := c.store.SendReq(bo, req, batch.region, readTimeoutShort) + if err != nil { + return errors.Trace(err) + } + regionErr, err := resp.GetRegionError() + if err != nil { + return errors.Trace(err) + } + if regionErr != nil { + err = bo.Backoff(BoRegionMiss, errors.New(regionErr.String())) + if err != nil { + return errors.Trace(err) + } + err = c.cleanupKeys(bo, batch.keys) + return errors.Trace(err) + } + if keyErr := resp.Resp.(*pb.BatchRollbackResponse).GetError(); keyErr != nil { + err = errors.Errorf("conn %d 2PC cleanup failed: %s", c.connID, keyErr) + logutil.BgLogger().Debug("2PC failed cleanup key", + zap.Error(err), + zap.Uint64("txnStartTS", c.startTS)) + return errors.Trace(err) + } + return nil +} + +func (c *twoPhaseCommitter) prewriteKeys(bo *Backoffer, keys [][]byte) error { + return c.doActionOnKeys(bo, actionPrewrite{}, keys) +} + +func (c *twoPhaseCommitter) commitKeys(bo *Backoffer, keys [][]byte) error { + return c.doActionOnKeys(bo, actionCommit{}, keys) +} + +func (c *twoPhaseCommitter) cleanupKeys(bo *Backoffer, keys [][]byte) error { + return c.doActionOnKeys(bo, actionCleanup{}, keys) +} + +// execute executes the two-phase commit protocol. +func (c *twoPhaseCommitter) execute(ctx context.Context) (err error) { + defer func() { + // Always clean up all written keys if the txn does not commit. + c.mu.RLock() + committed := c.mu.committed + undetermined := c.mu.undeterminedErr != nil + c.mu.RUnlock() + if !committed && !undetermined { + c.cleanWg.Add(1) + go func() { + cleanupKeysCtx := context.WithValue(context.Background(), txnStartKey, ctx.Value(txnStartKey)) + err := c.cleanupKeys(NewBackoffer(cleanupKeysCtx, cleanupMaxBackoff).WithVars(c.txn.vars), c.keys) + if err != nil { + logutil.Logger(ctx).Info("2PC cleanup failed", + zap.Error(err), + zap.Uint64("txnStartTS", c.startTS)) + } else { + logutil.Logger(ctx).Info("2PC clean up done", + zap.Uint64("txnStartTS", c.startTS)) + } + c.cleanWg.Done() + }() + } + c.txn.commitTS = c.commitTS + }() + + prewriteBo := NewBackoffer(ctx, PrewriteMaxBackoff).WithVars(c.txn.vars) + err = c.prewriteKeys(prewriteBo, c.keys) + if err != nil { + logutil.Logger(ctx).Debug("2PC failed on prewrite", + zap.Error(err), + zap.Uint64("txnStartTS", c.startTS)) + return errors.Trace(err) + } + + commitTS, err := c.store.getTimestampWithRetry(NewBackoffer(ctx, tsoMaxBackoff).WithVars(c.txn.vars)) + if err != nil { + logutil.Logger(ctx).Warn("2PC get commitTS failed", + zap.Error(err), + zap.Uint64("txnStartTS", c.startTS)) + return errors.Trace(err) + } + + // check commitTS + if commitTS <= c.startTS { + err = errors.Errorf("conn %d Invalid transaction tso with txnStartTS=%v while txnCommitTS=%v", + c.connID, c.startTS, commitTS) + logutil.BgLogger().Error("invalid transaction", zap.Error(err)) + return errors.Trace(err) + } + c.commitTS = commitTS + if err = c.checkSchemaValid(); err != nil { + return errors.Trace(err) + } + + if c.store.oracle.IsExpired(c.startTS, kv.MaxTxnTimeUse) { + err = errors.Errorf("conn %d txn takes too much time, txnStartTS: %d, comm: %d", + c.connID, c.startTS, c.commitTS) + return err + } + + commitBo := NewBackoffer(ctx, CommitMaxBackoff).WithVars(c.txn.vars) + err = c.commitKeys(commitBo, c.keys) + if err != nil { + if undeterminedErr := c.getUndeterminedErr(); undeterminedErr != nil { + logutil.Logger(ctx).Error("2PC commit result undetermined", + zap.Error(err), + zap.NamedError("rpcErr", undeterminedErr), + zap.Uint64("txnStartTS", c.startTS)) + err = errors.Trace(terror.ErrResultUndetermined) + } + if !c.mu.committed { + logutil.Logger(ctx).Debug("2PC failed on commit", + zap.Error(err), + zap.Uint64("txnStartTS", c.startTS)) + return errors.Trace(err) + } + logutil.Logger(ctx).Debug("got some exceptions, but 2PC was still successful", + zap.Error(err), + zap.Uint64("txnStartTS", c.startTS)) + } + return nil +} + +type schemaLeaseChecker interface { + Check(txnTS uint64) error +} + +func (c *twoPhaseCommitter) checkSchemaValid() error { + checker, ok := c.txn.us.GetOption(kv.SchemaChecker).(schemaLeaseChecker) + if ok { + err := checker.Check(c.commitTS) + if err != nil { + return errors.Trace(err) + } + } + return nil +} + +// TiKV recommends each RPC packet should be less than ~1MB. We keep each packet's +// Key+Value size below 16KB. +const txnCommitBatchSize = 16 * 1024 + +// batchKeys is a batch of keys in the same region. +type batchKeys struct { + region RegionVerID + keys [][]byte +} + +// appendBatchBySize appends keys to []batchKeys. It may split the keys to make +// sure each batch's size does not exceed the limit. +func appendBatchBySize(b []batchKeys, region RegionVerID, keys [][]byte, sizeFn func([]byte) int, limit int) []batchKeys { + var start, end int + for start = 0; start < len(keys); start = end { + var size int + for end = start; end < len(keys) && size < limit; end++ { + size += sizeFn(keys[end]) + } + b = append(b, batchKeys{ + region: region, + keys: keys[start:end], + }) + } + return b +} + +// newBatchExecutor create processor to handle concurrent batch works(prewrite/commit etc) +func newBatchExecutor(rateLimit int, committer *twoPhaseCommitter, + action twoPhaseCommitAction, backoffer *Backoffer) *batchExecutor { + return &batchExecutor{rateLimit, nil, committer, + action, backoffer, time.Duration(1 * time.Millisecond)} +} + +// initUtils do initialize batchExecutor related policies like rateLimit util +func (batchExe *batchExecutor) initUtils() error { + // init rateLimiter by injected rate limit number + batchExe.rateLimiter = newRateLimit(batchExe.rateLim) + return nil +} + +// startWork concurrently do the work for each batch considering rate limit +func (batchExe *batchExecutor) startWorker(exitCh chan struct{}, ch chan error, batches []batchKeys) { + for idx, batch1 := range batches { + waitStart := time.Now() + if exit := batchExe.rateLimiter.getToken(exitCh); !exit { + batchExe.tokenWaitDuration += time.Since(waitStart) + batch := batch1 + go func() { + defer batchExe.rateLimiter.putToken() + var singleBatchBackoffer *Backoffer + if _, ok := batchExe.action.(actionCommit); ok { + // Because the secondary batches of the commit actions are implemented to be + // committed asynchronously in background goroutines, we should not + // fork a child context and call cancel() while the foreground goroutine exits. + // Otherwise the background goroutines will be canceled execeptionally. + // Here we makes a new clone of the original backoffer for this goroutine + // exclusively to avoid the data race when using the same backoffer + // in concurrent goroutines. + singleBatchBackoffer = batchExe.backoffer.Clone() + } else { + var singleBatchCancel context.CancelFunc + singleBatchBackoffer, singleBatchCancel = batchExe.backoffer.Fork() + defer singleBatchCancel() + } + ch <- batchExe.action.handleSingleBatch(batchExe.committer, singleBatchBackoffer, batch) + }() + } else { + logutil.Logger(batchExe.backoffer.ctx).Info("break startWorker", + zap.Stringer("action", batchExe.action), zap.Int("batch size", len(batches)), + zap.Int("index", idx)) + break + } + } +} + +// process will start worker routine and collect results +func (batchExe *batchExecutor) process(batches []batchKeys) error { + var err error + err = batchExe.initUtils() + if err != nil { + logutil.Logger(batchExe.backoffer.ctx).Error("batchExecutor initUtils failed", zap.Error(err)) + return err + } + + // For prewrite, stop sending other requests after receiving first error. + backoffer := batchExe.backoffer + var cancel context.CancelFunc + if _, ok := batchExe.action.(actionPrewrite); ok { + backoffer, cancel = batchExe.backoffer.Fork() + defer cancel() + } + // concurrently do the work for each batch. + ch := make(chan error, len(batches)) + exitCh := make(chan struct{}) + go batchExe.startWorker(exitCh, ch, batches) + // check results + for i := 0; i < len(batches); i++ { + if e := <-ch; e != nil { + logutil.Logger(backoffer.ctx).Debug("2PC doActionOnBatches failed", + zap.Uint64("conn", batchExe.committer.connID), + zap.Stringer("action type", batchExe.action), + zap.Error(e), + zap.Uint64("txnStartTS", batchExe.committer.startTS)) + // Cancel other requests and return the first error. + if cancel != nil { + logutil.Logger(backoffer.ctx).Debug("2PC doActionOnBatches to cancel other actions", + zap.Uint64("conn", batchExe.committer.connID), + zap.Stringer("action type", batchExe.action), + zap.Uint64("txnStartTS", batchExe.committer.startTS)) + cancel() + } + if err == nil { + err = e + } + } + } + close(exitCh) + + return err +} diff --git a/store/tikv/2pc_fail_test.go b/store/tikv/2pc_fail_test.go new file mode 100644 index 0000000..67ae796 --- /dev/null +++ b/store/tikv/2pc_fail_test.go @@ -0,0 +1,142 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv + +import ( + "context" + + . "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/terror" +) + +// TestFailCommitPrimaryRpcErrors tests rpc errors are handled properly when +// committing primary region task. +func (s *testCommitterSuite) TestFailCommitPrimaryRpcErrors(c *C) { + c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/mockstore/mocktikv/rpcCommitResult", `return("timeout")`), IsNil) + defer func() { + c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/mockstore/mocktikv/rpcCommitResult"), IsNil) + }() + // The rpc error will be wrapped to ErrResultUndetermined. + t1 := s.begin(c) + err := t1.Set([]byte("a"), []byte("a1")) + c.Assert(err, IsNil) + err = t1.Commit(context.Background()) + c.Assert(err, NotNil) + c.Assert(terror.ErrorEqual(err, terror.ErrResultUndetermined), IsTrue, Commentf("%s", errors.ErrorStack(err))) + + // We don't need to call "Rollback" after "Commit" fails. + err = t1.Rollback() + c.Assert(err, Equals, kv.ErrInvalidTxn) +} + +// TestFailCommitPrimaryRegionError tests RegionError is handled properly when +// committing primary region task. +func (s *testCommitterSuite) TestFailCommitPrimaryRegionError(c *C) { + c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/mockstore/mocktikv/rpcCommitResult", `return("notLeader")`), IsNil) + defer func() { + c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/mockstore/mocktikv/rpcCommitResult"), IsNil) + }() + // Ensure it returns the original error without wrapped to ErrResultUndetermined + // if it exceeds max retry timeout on RegionError. + t2 := s.begin(c) + err := t2.Set([]byte("b"), []byte("b1")) + c.Assert(err, IsNil) + err = t2.Commit(context.Background()) + c.Assert(err, NotNil) + c.Assert(terror.ErrorNotEqual(err, terror.ErrResultUndetermined), IsTrue) +} + +// TestFailCommitPrimaryRPCErrorThenRegionError tests the case when commit first +// receive a rpc timeout, then region errors afterwrards. +func (s *testCommitterSuite) TestFailCommitPrimaryRPCErrorThenRegionError(c *C) { + c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/mockstore/mocktikv/rpcCommitResult", `1*return("timeout")->return("notLeader")`), IsNil) + defer func() { + c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/mockstore/mocktikv/rpcCommitResult"), IsNil) + }() + // The region error will be wrapped to ErrResultUndetermined. + t1 := s.begin(c) + err := t1.Set([]byte("a"), []byte("a1")) + c.Assert(err, IsNil) + err = t1.Commit(context.Background()) + c.Assert(err, NotNil) + c.Assert(terror.ErrorEqual(err, terror.ErrResultUndetermined), IsTrue, Commentf("%s", errors.ErrorStack(err))) +} + +// TestFailCommitPrimaryKeyError tests KeyError is handled properly when +// committing primary region task. +func (s *testCommitterSuite) TestFailCommitPrimaryKeyError(c *C) { + c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/mockstore/mocktikv/rpcCommitResult", `return("keyError")`), IsNil) + defer func() { + c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/mockstore/mocktikv/rpcCommitResult"), IsNil) + }() + // Ensure it returns the original error without wrapped to ErrResultUndetermined + // if it meets KeyError. + t3 := s.begin(c) + err := t3.Set([]byte("c"), []byte("c1")) + c.Assert(err, IsNil) + err = t3.Commit(context.Background()) + c.Assert(err, NotNil) + c.Assert(terror.ErrorNotEqual(err, terror.ErrResultUndetermined), IsTrue) +} + +func (s *testCommitterSuite) TestFailCommitTimeout(c *C) { + c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/mockstore/mocktikv/rpcCommitTimeout", `return(true)`), IsNil) + defer func() { + c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/mockstore/mocktikv/rpcCommitTimeout"), IsNil) + }() + txn := s.begin(c) + err := txn.Set([]byte("a"), []byte("a1")) + c.Assert(err, IsNil) + err = txn.Set([]byte("b"), []byte("b1")) + c.Assert(err, IsNil) + err = txn.Set([]byte("c"), []byte("c1")) + c.Assert(err, IsNil) + err = txn.Commit(context.Background()) + c.Assert(err, NotNil) + + txn2 := s.begin(c) + value, err := txn2.Get(context.TODO(), []byte("a")) + c.Assert(err, IsNil) + c.Assert(len(value), Greater, 0) + _, err = txn2.Get(context.TODO(), []byte("b")) + c.Assert(err, IsNil) + c.Assert(len(value), Greater, 0) +} + +// TestFailPrewriteRegionError tests data race does not happen on retries +func (s *testCommitterSuite) TestFailPrewriteRegionError(c *C) { + c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/mockstore/mocktikv/rpcPrewriteResult", `return("notLeader")`), IsNil) + defer func() { + c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/mockstore/mocktikv/rpcPrewriteResult"), IsNil) + }() + + txn := s.begin(c) + + // Set the value big enough to create many batches. This increases the chance of data races. + var bigVal [18000]byte + for i := 0; i < 1000; i++ { + err := txn.Set([]byte{byte(i)}, bigVal[:]) + c.Assert(err, IsNil) + } + + committer, err := newTwoPhaseCommitterWithInit(txn, 1) + c.Assert(err, IsNil) + + ctx := context.Background() + err = committer.prewriteKeys(NewBackoffer(ctx, 1000), committer.keys) + c.Assert(err, NotNil) +} diff --git a/store/tikv/2pc_slow_test.go b/store/tikv/2pc_slow_test.go new file mode 100644 index 0000000..bf96a24 --- /dev/null +++ b/store/tikv/2pc_slow_test.go @@ -0,0 +1,37 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !race + +package tikv + +import . "github.com/pingcap/check" + +// TestCommitMultipleRegions tests commit multiple regions. +// The test takes too long under the race detector. +func (s *testCommitterSuite) TestCommitMultipleRegions(c *C) { + m := make(map[string]string) + for i := 0; i < 100; i++ { + k, v := randKV(10, 10) + m[k] = v + } + s.mustCommit(c, m) + + // Test big values. + m = make(map[string]string) + for i := 0; i < 50; i++ { + k, v := randKV(11, txnCommitBatchSize/7) + m[k] = v + } + s.mustCommit(c, m) +} diff --git a/store/tikv/2pc_test.go b/store/tikv/2pc_test.go new file mode 100644 index 0000000..3ee6463 --- /dev/null +++ b/store/tikv/2pc_test.go @@ -0,0 +1,437 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv + +import ( + "context" + "math" + "math/rand" + "strings" + "time" + + "github.com/pingcap-incubator/tinykv/proto/pkg/kvrpcpb" + . "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/store/mockstore/mocktikv" + "github.com/pingcap/tidb/store/tikv/tikvrpc" +) + +type testCommitterSuite struct { + OneByOneSuite + cluster *mocktikv.Cluster + store *tikvStore +} + +var _ = Suite(&testCommitterSuite{}) + +func (s *testCommitterSuite) SetUpSuite(c *C) { + ManagedLockTTL = 3000 // 3s + s.OneByOneSuite.SetUpSuite(c) +} + +func (s *testCommitterSuite) SetUpTest(c *C) { + s.cluster = mocktikv.NewCluster() + mocktikv.BootstrapWithMultiRegions(s.cluster, []byte("a"), []byte("b"), []byte("c")) + mvccStore, err := mocktikv.NewMVCCLevelDB("") + c.Assert(err, IsNil) + client := mocktikv.NewRPCClient(s.cluster, mvccStore) + pdCli := &codecPDClient{mocktikv.NewPDClient(s.cluster)} + spkv := NewMockSafePointKV() + store, err := newTikvStore("mocktikv-store", pdCli, spkv, client, false) + c.Assert(err, IsNil) + s.store = store + CommitMaxBackoff = 2000 +} + +func (s *testCommitterSuite) TearDownSuite(c *C) { + CommitMaxBackoff = 20000 + s.store.Close() + s.OneByOneSuite.TearDownSuite(c) +} + +func (s *testCommitterSuite) begin(c *C) *tikvTxn { + txn, err := s.store.Begin() + c.Assert(err, IsNil) + return txn.(*tikvTxn) +} + +func (s *testCommitterSuite) checkValues(c *C, m map[string]string) { + txn := s.begin(c) + for k, v := range m { + val, err := txn.Get(context.TODO(), []byte(k)) + c.Assert(err, IsNil) + c.Assert(string(val), Equals, v) + } +} + +func (s *testCommitterSuite) mustCommit(c *C, m map[string]string) { + txn := s.begin(c) + for k, v := range m { + err := txn.Set([]byte(k), []byte(v)) + c.Assert(err, IsNil) + } + err := txn.Commit(context.Background()) + c.Assert(err, IsNil) + + s.checkValues(c, m) +} + +func randKV(keyLen, valLen int) (string, string) { + const letters = "abc" + k, v := make([]byte, keyLen), make([]byte, valLen) + for i := range k { + k[i] = letters[rand.Intn(len(letters))] + } + for i := range v { + v[i] = letters[rand.Intn(len(letters))] + } + return string(k), string(v) +} + +func (s *testCommitterSuite) TestCommitRollback(c *C) { + s.mustCommit(c, map[string]string{ + "a": "a", + "b": "b", + "c": "c", + }) + + txn := s.begin(c) + txn.Set([]byte("a"), []byte("a1")) + txn.Set([]byte("b"), []byte("b1")) + txn.Set([]byte("c"), []byte("c1")) + + s.mustCommit(c, map[string]string{ + "c": "c2", + }) + + err := txn.Commit(context.Background()) + c.Assert(err, NotNil) + + s.checkValues(c, map[string]string{ + "a": "a", + "b": "b", + "c": "c2", + }) +} + +func (s *testCommitterSuite) TestPrewriteRollback(c *C) { + s.mustCommit(c, map[string]string{ + "a": "a0", + "b": "b0", + }) + + ctx := context.Background() + txn1 := s.begin(c) + err := txn1.Set([]byte("a"), []byte("a1")) + c.Assert(err, IsNil) + err = txn1.Set([]byte("b"), []byte("b1")) + c.Assert(err, IsNil) + committer, err := newTwoPhaseCommitterWithInit(txn1, 0) + c.Assert(err, IsNil) + err = committer.prewriteKeys(NewBackoffer(ctx, PrewriteMaxBackoff), committer.keys) + c.Assert(err, IsNil) + + txn2 := s.begin(c) + v, err := txn2.Get(context.TODO(), []byte("a")) + c.Assert(err, IsNil) + c.Assert(v, BytesEquals, []byte("a0")) + + err = committer.prewriteKeys(NewBackoffer(ctx, PrewriteMaxBackoff), committer.keys) + if err != nil { + // Retry. + txn1 = s.begin(c) + err = txn1.Set([]byte("a"), []byte("a1")) + c.Assert(err, IsNil) + err = txn1.Set([]byte("b"), []byte("b1")) + c.Assert(err, IsNil) + committer, err = newTwoPhaseCommitterWithInit(txn1, 0) + c.Assert(err, IsNil) + err = committer.prewriteKeys(NewBackoffer(ctx, PrewriteMaxBackoff), committer.keys) + c.Assert(err, IsNil) + } + committer.commitTS, err = s.store.oracle.GetTimestamp(ctx) + c.Assert(err, IsNil) + err = committer.commitKeys(NewBackoffer(ctx, CommitMaxBackoff), [][]byte{[]byte("a")}) + c.Assert(err, IsNil) + + txn3 := s.begin(c) + v, err = txn3.Get(context.TODO(), []byte("b")) + c.Assert(err, IsNil) + c.Assert(v, BytesEquals, []byte("b1")) +} + +func (s *testCommitterSuite) TestContextCancel(c *C) { + txn1 := s.begin(c) + err := txn1.Set([]byte("a"), []byte("a1")) + c.Assert(err, IsNil) + err = txn1.Set([]byte("b"), []byte("b1")) + c.Assert(err, IsNil) + committer, err := newTwoPhaseCommitterWithInit(txn1, 0) + c.Assert(err, IsNil) + + bo := NewBackoffer(context.Background(), PrewriteMaxBackoff) + backoffer, cancel := bo.Fork() + cancel() // cancel the context + err = committer.prewriteKeys(backoffer, committer.keys) + c.Assert(errors.Cause(err), Equals, context.Canceled) +} + +func (s *testCommitterSuite) TestContextCancel2(c *C) { + txn := s.begin(c) + err := txn.Set([]byte("a"), []byte("a")) + c.Assert(err, IsNil) + err = txn.Set([]byte("b"), []byte("b")) + c.Assert(err, IsNil) + ctx, cancel := context.WithCancel(context.Background()) + err = txn.Commit(ctx) + c.Assert(err, IsNil) + cancel() + // Secondary keys should not be canceled. + time.Sleep(time.Millisecond * 20) + c.Assert(s.isKeyLocked(c, []byte("b")), IsFalse) +} + +func (s *testCommitterSuite) mustGetRegionID(c *C, key []byte) uint64 { + loc, err := s.store.regionCache.LocateKey(NewBackoffer(context.Background(), getMaxBackoff), key) + c.Assert(err, IsNil) + return loc.Region.id +} + +func (s *testCommitterSuite) isKeyLocked(c *C, key []byte) bool { + ver, err := s.store.CurrentVersion() + c.Assert(err, IsNil) + bo := NewBackoffer(context.Background(), getMaxBackoff) + req := tikvrpc.NewRequest(tikvrpc.CmdGet, &kvrpcpb.GetRequest{ + Key: key, + Version: ver.Ver, + }) + loc, err := s.store.regionCache.LocateKey(bo, key) + c.Assert(err, IsNil) + resp, err := s.store.SendReq(bo, req, loc.Region, readTimeoutShort) + c.Assert(err, IsNil) + c.Assert(resp.Resp, NotNil) + keyErr := (resp.Resp.(*kvrpcpb.GetResponse)).GetError() + return keyErr.GetLocked() != nil +} + +func (s *testCommitterSuite) TestPrewriteCancel(c *C) { + // Setup region delays for key "b" and "c". + delays := map[uint64]time.Duration{ + s.mustGetRegionID(c, []byte("b")): time.Millisecond * 10, + s.mustGetRegionID(c, []byte("c")): time.Millisecond * 20, + } + s.store.client = &slowClient{ + Client: s.store.client, + regionDelays: delays, + } + + txn1, txn2 := s.begin(c), s.begin(c) + // txn2 writes "b" + err := txn2.Set([]byte("b"), []byte("b2")) + c.Assert(err, IsNil) + err = txn2.Commit(context.Background()) + c.Assert(err, IsNil) + // txn1 writes "a"(PK), "b", "c" on different regions. + // "b" will return an error and cancel commit. + err = txn1.Set([]byte("a"), []byte("a1")) + c.Assert(err, IsNil) + err = txn1.Set([]byte("b"), []byte("b1")) + c.Assert(err, IsNil) + err = txn1.Set([]byte("c"), []byte("c1")) + c.Assert(err, IsNil) + err = txn1.Commit(context.Background()) + c.Assert(err, NotNil) + // "c" should be cleaned up in reasonable time. + for i := 0; i < 50; i++ { + if !s.isKeyLocked(c, []byte("c")) { + return + } + time.Sleep(time.Millisecond * 10) + } + c.Fail() +} + +// slowClient wraps rpcClient and makes some regions respond with delay. +type slowClient struct { + Client + regionDelays map[uint64]time.Duration +} + +func (c *slowClient) SendReq(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) { + for id, delay := range c.regionDelays { + reqCtx := &req.Context + if reqCtx.GetRegionId() == id { + time.Sleep(delay) + } + } + return c.Client.SendRequest(ctx, addr, req, timeout) +} + +func (s *testCommitterSuite) TestIllegalTso(c *C) { + txn := s.begin(c) + data := map[string]string{ + "name": "aa", + "age": "12", + } + for k, v := range data { + err := txn.Set([]byte(k), []byte(v)) + c.Assert(err, IsNil) + } + // make start ts bigger. + txn.startTS = uint64(math.MaxUint64) + err := txn.Commit(context.Background()) + c.Assert(err, NotNil) + errMsgMustContain(c, err, "invalid txnStartTS") +} + +func errMsgMustContain(c *C, err error, msg string) { + c.Assert(strings.Contains(err.Error(), msg), IsTrue) +} + +func newTwoPhaseCommitterWithInit(txn *tikvTxn, connID uint64) (*twoPhaseCommitter, error) { + c, err := newTwoPhaseCommitter(txn, connID) + if err != nil { + return nil, errors.Trace(err) + } + if err = c.initKeysAndMutations(); err != nil { + return nil, errors.Trace(err) + } + return c, nil +} + +func (s *testCommitterSuite) TestCommitBeforePrewrite(c *C) { + txn := s.begin(c) + err := txn.Set([]byte("a"), []byte("a1")) + c.Assert(err, IsNil) + committer, err := newTwoPhaseCommitterWithInit(txn, 0) + c.Assert(err, IsNil) + ctx := context.Background() + err = committer.cleanupKeys(NewBackoffer(ctx, cleanupMaxBackoff), committer.keys) + c.Assert(err, IsNil) + err = committer.prewriteKeys(NewBackoffer(ctx, PrewriteMaxBackoff), committer.keys) + c.Assert(err, NotNil) + errMsgMustContain(c, err, "conflictCommitTS") +} + +func (s *testCommitterSuite) TestPrewritePrimaryKeyFailed(c *C) { + // commit (a,a1) + txn1 := s.begin(c) + err := txn1.Set([]byte("a"), []byte("a1")) + c.Assert(err, IsNil) + err = txn1.Commit(context.Background()) + c.Assert(err, IsNil) + + // check a + txn := s.begin(c) + v, err := txn.Get(context.TODO(), []byte("a")) + c.Assert(err, IsNil) + c.Assert(v, BytesEquals, []byte("a1")) + + // set txn2's startTs before txn1's + txn2 := s.begin(c) + txn2.startTS = txn1.startTS - 1 + err = txn2.Set([]byte("a"), []byte("a2")) + c.Assert(err, IsNil) + err = txn2.Set([]byte("b"), []byte("b2")) + c.Assert(err, IsNil) + // prewrite:primary a failed, b success + err = txn2.Commit(context.Background()) + c.Assert(err, NotNil) + + // txn2 failed with a rollback for record a. + txn = s.begin(c) + v, err = txn.Get(context.TODO(), []byte("a")) + c.Assert(err, IsNil) + c.Assert(v, BytesEquals, []byte("a1")) + _, err = txn.Get(context.TODO(), []byte("b")) + errMsgMustContain(c, err, "key not exist") + + // clean again, shouldn't be failed when a rollback already exist. + ctx := context.Background() + committer, err := newTwoPhaseCommitterWithInit(txn2, 0) + c.Assert(err, IsNil) + err = committer.cleanupKeys(NewBackoffer(ctx, cleanupMaxBackoff), committer.keys) + c.Assert(err, IsNil) + + // check the data after rollback twice. + txn = s.begin(c) + v, err = txn.Get(context.TODO(), []byte("a")) + c.Assert(err, IsNil) + c.Assert(v, BytesEquals, []byte("a1")) + + // update data in a new txn, should be success. + err = txn.Set([]byte("a"), []byte("a3")) + c.Assert(err, IsNil) + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + // check value + txn = s.begin(c) + v, err = txn.Get(context.TODO(), []byte("a")) + c.Assert(err, IsNil) + c.Assert(v, BytesEquals, []byte("a3")) +} + +func (s *testCommitterSuite) TestWrittenKeysOnConflict(c *C) { + // This test checks that when there is a write conflict, written keys is collected, + // so we can use it to clean up keys. + region, _ := s.cluster.GetRegionByKey([]byte("x")) + newRegionID := s.cluster.AllocID() + newPeerID := s.cluster.AllocID() + s.cluster.Split(region.Id, newRegionID, []byte("y"), []uint64{newPeerID}, newPeerID) + var totalTime time.Duration + for i := 0; i < 10; i++ { + txn1 := s.begin(c) + txn2 := s.begin(c) + txn2.Set([]byte("x1"), []byte("1")) + committer2, err := newTwoPhaseCommitterWithInit(txn2, 2) + c.Assert(err, IsNil) + err = committer2.execute(context.Background()) + c.Assert(err, IsNil) + txn1.Set([]byte("x1"), []byte("1")) + txn1.Set([]byte("y1"), []byte("2")) + committer1, err := newTwoPhaseCommitterWithInit(txn1, 2) + c.Assert(err, IsNil) + err = committer1.execute(context.Background()) + c.Assert(err, NotNil) + committer1.cleanWg.Wait() + txn3 := s.begin(c) + start := time.Now() + txn3.Get(context.TODO(), []byte("y1")) + totalTime += time.Since(start) + txn3.Commit(context.Background()) + } + c.Assert(totalTime, Less, time.Millisecond*200) +} + +func (s *testCommitterSuite) TestPrewriteTxnSize(c *C) { + // Prepare two regions first: (, 100) and [100, ) + region, _ := s.cluster.GetRegionByKey([]byte{50}) + newRegionID := s.cluster.AllocID() + newPeerID := s.cluster.AllocID() + s.cluster.Split(region.Id, newRegionID, []byte{100}, []uint64{newPeerID}, newPeerID) + + txn := s.begin(c) + var val [1024]byte + for i := byte(50); i < 120; i++ { + err := txn.Set([]byte{i}, val[:]) + c.Assert(err, IsNil) + } + + committer, err := newTwoPhaseCommitterWithInit(txn, 1) + c.Assert(err, IsNil) + + ctx := context.Background() + err = committer.prewriteKeys(NewBackoffer(ctx, PrewriteMaxBackoff), committer.keys) + c.Assert(err, IsNil) +} diff --git a/store/tikv/backoff.go b/store/tikv/backoff.go new file mode 100644 index 0000000..6a0af61 --- /dev/null +++ b/store/tikv/backoff.go @@ -0,0 +1,345 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv + +import ( + "context" + "fmt" + "math" + "math/rand" + "strings" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/log" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +const ( + // NoJitter makes the backoff sequence strict exponential. + NoJitter = 1 + iota + // FullJitter applies random factors to strict exponential. + FullJitter + // EqualJitter is also randomized, but prevents very short sleeps. + EqualJitter + // DecorrJitter increases the maximum jitter based on the last random value. + DecorrJitter +) + +// NewBackoffFn creates a backoff func which implements exponential backoff with +// optional jitters. +// See http://www.awsarchitectureblog.com/2015/03/backoff.html +func NewBackoffFn(base, cap, jitter int) func(ctx context.Context, maxSleepMs int) int { + if base < 2 { + // Top prevent panic in 'rand.Intn'. + base = 2 + } + attempts := 0 + lastSleep := base + return func(ctx context.Context, maxSleepMs int) int { + var sleep int + switch jitter { + case NoJitter: + sleep = expo(base, cap, attempts) + case FullJitter: + v := expo(base, cap, attempts) + sleep = rand.Intn(v) + case EqualJitter: + v := expo(base, cap, attempts) + sleep = v/2 + rand.Intn(v/2) + case DecorrJitter: + sleep = int(math.Min(float64(cap), float64(base+rand.Intn(lastSleep*3-base)))) + } + logutil.BgLogger().Debug("backoff", + zap.Int("base", base), + zap.Int("sleep", sleep)) + + realSleep := sleep + // when set maxSleepMs >= 0 in `tikv.BackoffWithMaxSleep` will force sleep maxSleepMs milliseconds. + if maxSleepMs >= 0 && realSleep > maxSleepMs { + realSleep = maxSleepMs + } + select { + case <-time.After(time.Duration(realSleep) * time.Millisecond): + attempts++ + lastSleep = sleep + return realSleep + case <-ctx.Done(): + return 0 + } + } +} + +func expo(base, cap, n int) int { + return int(math.Min(float64(cap), float64(base)*math.Pow(2.0, float64(n)))) +} + +type backoffType int + +// Back off types. +const ( + boTiKVRPC backoffType = iota + BoTxnLock + boTxnLockFast + BoPDRPC + BoRegionMiss + BoUpdateLeader + boServerBusy + boTxnNotFound +) + +func (t backoffType) createFn(vars *kv.Variables) func(context.Context, int) int { + if vars.Hook != nil { + vars.Hook(t.String(), vars) + } + switch t { + case boTiKVRPC: + return NewBackoffFn(100, 2000, EqualJitter) + case BoTxnLock: + return NewBackoffFn(200, 3000, EqualJitter) + case boTxnLockFast: + return NewBackoffFn(vars.BackoffLockFast, 3000, EqualJitter) + case BoPDRPC: + return NewBackoffFn(500, 3000, EqualJitter) + case BoRegionMiss: + // change base time to 2ms, because it may recover soon. + return NewBackoffFn(2, 500, NoJitter) + case boTxnNotFound: + return NewBackoffFn(2, 500, NoJitter) + case BoUpdateLeader: + return NewBackoffFn(1, 10, NoJitter) + case boServerBusy: + return NewBackoffFn(2000, 10000, EqualJitter) + } + return nil +} + +func (t backoffType) String() string { + switch t { + case boTiKVRPC: + return "tikvRPC" + case BoTxnLock: + return "txnLock" + case boTxnLockFast: + return "txnLockFast" + case BoPDRPC: + return "pdRPC" + case BoRegionMiss: + return "regionMiss" + case BoUpdateLeader: + return "updateLeader" + case boServerBusy: + return "serverBusy" + case boTxnNotFound: + return "txnNotFound" + } + return "" +} + +func (t backoffType) TError() error { + switch t { + case boTiKVRPC: + return ErrTiKVServerTimeout + case BoTxnLock, boTxnLockFast, boTxnNotFound: + return ErrResolveLockTimeout + case BoPDRPC: + return ErrPDServerTimeout + case BoRegionMiss, BoUpdateLeader: + return ErrRegionUnavailable + case boServerBusy: + return ErrTiKVServerBusy + } + return terror.ClassTiKV.New(mysql.ErrUnknown, mysql.MySQLErrName[mysql.ErrUnknown]) +} + +// Maximum total sleep time(in ms) for kv/cop commands. +const ( + copBuildTaskMaxBackoff = 5000 + tsoMaxBackoff = 15000 + scannerNextMaxBackoff = 20000 + batchGetMaxBackoff = 20000 + copNextMaxBackoff = 20000 + getMaxBackoff = 20000 + cleanupMaxBackoff = 20000 + GcOneRegionMaxBackoff = 20000 + GcResolveLockMaxBackoff = 100000 + deleteRangeOneRegionMaxBackoff = 100000 + rawkvMaxBackoff = 20000 + splitRegionBackoff = 20000 + maxSplitRegionsBackoff = 120000 + scatterRegionBackoff = 20000 + waitScatterRegionFinishBackoff = 120000 + locateRegionMaxBackoff = 20000 +) + +var ( + // CommitMaxBackoff is max sleep time of the 'commit' command + CommitMaxBackoff = 41000 + + // PrewriteMaxBackoff is max sleep time of the `pre-write` command. + PrewriteMaxBackoff = 20000 +) + +// Backoffer is a utility for retrying queries. +type Backoffer struct { + ctx context.Context + + fn map[backoffType]func(context.Context, int) int + maxSleep int + totalSleep int + errors []error + types []fmt.Stringer + vars *kv.Variables + noop bool + + backoffSleepMS map[backoffType]int + backoffTimes map[backoffType]int +} + +type txnStartCtxKeyType struct{} + +// txnStartKey is a key for transaction start_ts info in context.Context. +var txnStartKey = txnStartCtxKeyType{} + +// NewBackoffer creates a Backoffer with maximum sleep time(in ms). +func NewBackoffer(ctx context.Context, maxSleep int) *Backoffer { + return &Backoffer{ + ctx: ctx, + maxSleep: maxSleep, + vars: kv.DefaultVars, + } +} + +// NewNoopBackoff create a Backoffer do nothing just return error directly +func NewNoopBackoff(ctx context.Context) *Backoffer { + return &Backoffer{ctx: ctx, noop: true} +} + +// WithVars sets the kv.Variables to the Backoffer and return it. +func (b *Backoffer) WithVars(vars *kv.Variables) *Backoffer { + if vars != nil { + b.vars = vars + } + // maxSleep is the max sleep time in millisecond. + // When it is multiplied by BackOffWeight, it should not be greater than MaxInt32. + if math.MaxInt32/b.vars.BackOffWeight >= b.maxSleep { + b.maxSleep *= b.vars.BackOffWeight + } + return b +} + +// Backoff sleeps a while base on the backoffType and records the error message. +// It returns a retryable error if total sleep time exceeds maxSleep. +func (b *Backoffer) Backoff(typ backoffType, err error) error { + return b.BackoffWithMaxSleep(typ, -1, err) +} + +// BackoffWithMaxSleep sleeps a while base on the backoffType and records the error message +// and never sleep more than maxSleepMs for each sleep. +func (b *Backoffer) BackoffWithMaxSleep(typ backoffType, maxSleepMs int, err error) error { + if strings.Contains(err.Error(), mismatchClusterID) { + logutil.BgLogger().Fatal("critical error", zap.Error(err)) + } + select { + case <-b.ctx.Done(): + return errors.Trace(err) + default: + } + + b.errors = append(b.errors, errors.Errorf("%s at %s", err.Error(), time.Now().Format(time.RFC3339Nano))) + b.types = append(b.types, typ) + if b.noop || (b.maxSleep > 0 && b.totalSleep >= b.maxSleep) { + errMsg := fmt.Sprintf("%s backoffer.maxSleep %dms is exceeded, errors:", typ.String(), b.maxSleep) + for i, err := range b.errors { + // Print only last 3 errors for non-DEBUG log levels. + if log.GetLevel() == zapcore.DebugLevel || i >= len(b.errors)-3 { + errMsg += "\n" + err.Error() + } + } + logutil.BgLogger().Warn(errMsg) + // Use the first backoff type to generate a MySQL error. + return b.types[0].(backoffType).TError() + } + + // Lazy initialize. + if b.fn == nil { + b.fn = make(map[backoffType]func(context.Context, int) int) + } + f, ok := b.fn[typ] + if !ok { + f = typ.createFn(b.vars) + b.fn[typ] = f + } + + realSleep := f(b.ctx, maxSleepMs) + b.totalSleep += realSleep + if b.backoffSleepMS == nil { + b.backoffSleepMS = make(map[backoffType]int) + } + b.backoffSleepMS[typ] += realSleep + if b.backoffTimes == nil { + b.backoffTimes = make(map[backoffType]int) + } + b.backoffTimes[typ]++ + + var startTs interface{} + if ts := b.ctx.Value(txnStartKey); ts != nil { + startTs = ts + } + logutil.BgLogger().Debug("retry later", + zap.Error(err), + zap.Int("totalSleep", b.totalSleep), + zap.Int("maxSleep", b.maxSleep), + zap.Stringer("type", typ), + zap.Reflect("txnStartTS", startTs)) + return nil +} + +func (b *Backoffer) String() string { + if b.totalSleep == 0 { + return "" + } + return fmt.Sprintf(" backoff(%dms %v)", b.totalSleep, b.types) +} + +// Clone creates a new Backoffer which keeps current Backoffer's sleep time and errors, and shares +// current Backoffer's context. +func (b *Backoffer) Clone() *Backoffer { + return &Backoffer{ + ctx: b.ctx, + maxSleep: b.maxSleep, + totalSleep: b.totalSleep, + errors: b.errors, + vars: b.vars, + } +} + +// Fork creates a new Backoffer which keeps current Backoffer's sleep time and errors, and holds +// a child context of current Backoffer's context. +func (b *Backoffer) Fork() (*Backoffer, context.CancelFunc) { + ctx, cancel := context.WithCancel(b.ctx) + return &Backoffer{ + ctx: ctx, + maxSleep: b.maxSleep, + totalSleep: b.totalSleep, + errors: b.errors, + vars: b.vars, + }, cancel +} diff --git a/store/tikv/backoff_test.go b/store/tikv/backoff_test.go new file mode 100644 index 0000000..ddf7d1f --- /dev/null +++ b/store/tikv/backoff_test.go @@ -0,0 +1,42 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv + +import ( + "context" + "errors" + . "github.com/pingcap/check" +) + +type testBackoffSuite struct { + OneByOneSuite + store *tikvStore +} + +var _ = Suite(&testBackoffSuite{}) + +func (s *testBackoffSuite) SetUpTest(c *C) { + s.store = NewTestStore(c).(*tikvStore) +} + +func (s *testBackoffSuite) TearDownTest(c *C) { + s.store.Close() +} + +func (s *testBackoffSuite) TestBackoffWithMax(c *C) { + b := NewBackoffer(context.TODO(), 2000) + err := b.BackoffWithMaxSleep(boTxnLockFast, 30, errors.New("test")) + c.Assert(err, IsNil) + c.Assert(b.totalSleep, Equals, 30) +} diff --git a/store/tikv/client.go b/store/tikv/client.go new file mode 100644 index 0000000..49ee695 --- /dev/null +++ b/store/tikv/client.go @@ -0,0 +1,232 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tikv provides tcp connection to kvserver. +package tikv + +import ( + "context" + "math" + "sync" + "sync/atomic" + "time" + + "github.com/pingcap-incubator/tinykv/proto/pkg/tinykvpb" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/store/tikv/tikvrpc" + "google.golang.org/grpc" + "google.golang.org/grpc/backoff" + "google.golang.org/grpc/keepalive" +) + +// MaxRecvMsgSize set max gRPC receive message size received from server. If any message size is larger than +// current value, an error will be reported from gRPC. +var MaxRecvMsgSize = math.MaxInt64 + +// Timeout durations. +var ( + dialTimeout = 5 * time.Second + readTimeoutShort = 20 * time.Second // For requests that read/write several key-values. + ReadTimeoutMedium = 60 * time.Second // For requests that may need scan region. + ReadTimeoutLong = 150 * time.Second // For requests that may need scan region multiple times. + GCTimeout = 5 * time.Minute + UnsafeDestroyRangeTimeout = 5 * time.Minute +) + +const ( + grpcInitialWindowSize = 1 << 30 + grpcInitialConnWindowSize = 1 << 30 +) + +// Client is a client that sends RPC. +// It should not be used after calling Close(). +type Client interface { + // Close should release all data. + Close() error + // SendRequest sends Request. + SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) +} + +type connArray struct { + // The target host. + target string + + index uint32 + v []*grpc.ClientConn + done chan struct{} +} + +func newConnArray(maxSize uint, addr string, idleNotify *uint32) (*connArray, error) { + a := &connArray{ + index: 0, + v: make([]*grpc.ClientConn, maxSize), + done: make(chan struct{}), + } + if err := a.Init(addr, idleNotify); err != nil { + return nil, err + } + return a, nil +} + +func (a *connArray) Init(addr string, idleNotify *uint32) error { + a.target = addr + + opt := grpc.WithInsecure() + + var ( + unaryInterceptor grpc.UnaryClientInterceptor + streamInterceptor grpc.StreamClientInterceptor + ) + + for i := range a.v { + ctx, cancel := context.WithTimeout(context.Background(), dialTimeout) + conn, err := grpc.DialContext( + ctx, + addr, + opt, + grpc.WithInitialWindowSize(grpcInitialWindowSize), + grpc.WithInitialConnWindowSize(grpcInitialConnWindowSize), + grpc.WithUnaryInterceptor(unaryInterceptor), + grpc.WithStreamInterceptor(streamInterceptor), + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(MaxRecvMsgSize)), + grpc.WithConnectParams(grpc.ConnectParams{ + Backoff: backoff.Config{ + BaseDelay: 100 * time.Millisecond, // Default was 1s. + Multiplier: 1.6, // Default + Jitter: 0.2, // Default + MaxDelay: 3 * time.Second, // Default was 120s. + }, + MinConnectTimeout: dialTimeout, + }), + grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: time.Duration(10) * time.Second, + Timeout: time.Duration(3) * time.Second, + PermitWithoutStream: true, + }), + ) + cancel() + if err != nil { + // Cleanup if the initialization fails. + a.Close() + return errors.Trace(err) + } + a.v[i] = conn + } + + return nil +} + +func (a *connArray) Get() *grpc.ClientConn { + next := atomic.AddUint32(&a.index, 1) % uint32(len(a.v)) + return a.v[next] +} + +func (a *connArray) Close() { + for i, c := range a.v { + if c != nil { + err := c.Close() + terror.Log(errors.Trace(err)) + a.v[i] = nil + } + } + + close(a.done) +} + +// rpcClient is RPC client struct. +// TODO: Add flow control between RPC clients in TiDB ond RPC servers in TiKV. +// Since we use shared client connection to communicate to the same TiKV, it's possible +// that there are too many concurrent requests which overload the service of TiKV. +type rpcClient struct { + sync.RWMutex + + conns map[string]*connArray + + idleNotify uint32 + // Periodically check whether there is any connection that is idle and then close and remove these idle connections. + // Implement background cleanup. + isClosed bool +} + +func newRPCClient() *rpcClient { + return &rpcClient{ + conns: make(map[string]*connArray), + } +} + +func (c *rpcClient) getConnArray(addr string) (*connArray, error) { + c.RLock() + if c.isClosed { + c.RUnlock() + return nil, errors.Errorf("rpcClient is closed") + } + array, ok := c.conns[addr] + c.RUnlock() + if !ok { + var err error + array, err = c.createConnArray(addr) + if err != nil { + return nil, err + } + } + return array, nil +} + +func (c *rpcClient) createConnArray(addr string) (*connArray, error) { + c.Lock() + defer c.Unlock() + array, ok := c.conns[addr] + if !ok { + var err error + array, err = newConnArray(4, addr, &c.idleNotify) + if err != nil { + return nil, err + } + c.conns[addr] = array + } + return array, nil +} + +func (c *rpcClient) closeConns() { + c.Lock() + if !c.isClosed { + c.isClosed = true + // close all connections + for _, array := range c.conns { + array.Close() + } + } + c.Unlock() +} + +// SendRequest sends a Request to server and receives Response. +func (c *rpcClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) { + connArray, err := c.getConnArray(addr) + if err != nil { + return nil, errors.Trace(err) + } + + clientConn := connArray.Get() + client := tinykvpb.NewTinyKvClient(clientConn) + + ctx1, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + return tikvrpc.CallRPC(ctx1, client, req) +} + +func (c *rpcClient) Close() error { + // TODO: add a unit test for SendRequest After Closed + c.closeConns() + return nil +} diff --git a/store/tikv/client_test.go b/store/tikv/client_test.go new file mode 100644 index 0000000..ba30817 --- /dev/null +++ b/store/tikv/client_test.go @@ -0,0 +1,47 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv + +import ( + . "github.com/pingcap/check" + "testing" +) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + TestingT(t) +} + +type testClientSuite struct { + OneByOneSuite +} + +var _ = Suite(&testClientSuite{}) + +func (s *testClientSuite) TestConn(c *C) { + client := newRPCClient() + + addr := "127.0.0.1:6379" + conn1, err := client.getConnArray(addr) + c.Assert(err, IsNil) + + conn2, err := client.getConnArray(addr) + c.Assert(err, IsNil) + c.Assert(conn2.Get(), Not(Equals), conn1.Get()) + + client.Close() + conn3, err := client.getConnArray(addr) + c.Assert(err, NotNil) + c.Assert(conn3, IsNil) +} diff --git a/store/tikv/coprocessor.go b/store/tikv/coprocessor.go new file mode 100644 index 0000000..5d6428b --- /dev/null +++ b/store/tikv/coprocessor.go @@ -0,0 +1,790 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv + +import ( + "bytes" + "context" + "fmt" + "sort" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/cznic/mathutil" + "github.com/pingcap-incubator/tinykv/proto/pkg/coprocessor" + "github.com/pingcap-incubator/tinykv/proto/pkg/kvrpcpb" + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/store/tikv/tikvrpc" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +// CopClient is coprocessor client. +type CopClient struct { + kv.RequestTypeSupportedChecker + store *tikvStore +} + +// Send builds the request and gets the coprocessor iterator response. +func (c *CopClient) Send(ctx context.Context, req *kv.Request, vars *kv.Variables) kv.Response { + ctx = context.WithValue(ctx, txnStartKey, req.StartTs) + bo := NewBackoffer(ctx, copBuildTaskMaxBackoff).WithVars(vars) + tasks, err := buildCopTasks(bo, c.store.regionCache, &copRanges{mid: req.KeyRanges}, req) + if err != nil { + return copErrorResponse{err} + } + it := &copIterator{ + store: c.store, + req: req, + concurrency: req.Concurrency, + finishCh: make(chan struct{}), + vars: vars, + } + it.minCommitTSPushed.data = make(map[uint64]struct{}, 5) + it.tasks = tasks + if it.concurrency > len(tasks) { + it.concurrency = len(tasks) + } + if it.concurrency < 1 { + // Make sure that there is at least one worker. + it.concurrency = 1 + } + if it.req.KeepOrder { + it.sendRate = newRateLimit(2 * it.concurrency) + } else { + it.respChan = make(chan *copResponse, it.concurrency) + } + it.open(ctx) + return it +} + +// copTask contains a related Region and KeyRange for a kv.Request. +type copTask struct { + region RegionVerID + ranges *copRanges + + respChan chan *copResponse + storeAddr string + cmdType tikvrpc.CmdType +} + +func (r *copTask) String() string { + return fmt.Sprintf("region(%d %d %d) ranges(%d) store(%s)", + r.region.id, r.region.confVer, r.region.ver, r.ranges.len(), r.storeAddr) +} + +// copRanges is like []kv.KeyRange, but may has extra elements at head/tail. +// It's for avoiding alloc big slice during build copTask. +type copRanges struct { + first *kv.KeyRange + mid []kv.KeyRange + last *kv.KeyRange +} + +func (r *copRanges) String() string { + var s string + r.do(func(ran *kv.KeyRange) { + s += fmt.Sprintf("[%q, %q]", ran.StartKey, ran.EndKey) + }) + return s +} + +func (r *copRanges) len() int { + var l int + if r.first != nil { + l++ + } + l += len(r.mid) + if r.last != nil { + l++ + } + return l +} + +func (r *copRanges) at(i int) kv.KeyRange { + if r.first != nil { + if i == 0 { + return *r.first + } + i-- + } + if i < len(r.mid) { + return r.mid[i] + } + return *r.last +} + +func (r *copRanges) slice(from, to int) *copRanges { + var ran copRanges + if r.first != nil { + if from == 0 && to > 0 { + ran.first = r.first + } + if from > 0 { + from-- + } + if to > 0 { + to-- + } + } + if to <= len(r.mid) { + ran.mid = r.mid[from:to] + } else { + if from <= len(r.mid) { + ran.mid = r.mid[from:] + } + if from < to { + ran.last = r.last + } + } + return &ran +} + +func (r *copRanges) do(f func(ran *kv.KeyRange)) { + if r.first != nil { + f(r.first) + } + for _, ran := range r.mid { + f(&ran) + } + if r.last != nil { + f(r.last) + } +} + +func (r *copRanges) toPBRanges() []*coprocessor.KeyRange { + ranges := make([]*coprocessor.KeyRange, 0, r.len()) + r.do(func(ran *kv.KeyRange) { + ranges = append(ranges, &coprocessor.KeyRange{ + Start: ran.StartKey, + End: ran.EndKey, + }) + }) + return ranges +} + +// split ranges into (left, right) by key. +func (r *copRanges) split(key []byte) (*copRanges, *copRanges) { + n := sort.Search(r.len(), func(i int) bool { + cur := r.at(i) + return len(cur.EndKey) == 0 || bytes.Compare(cur.EndKey, key) > 0 + }) + // If a range p contains the key, it will split to 2 parts. + if n < r.len() { + p := r.at(n) + if bytes.Compare(key, p.StartKey) > 0 { + left := r.slice(0, n) + left.last = &kv.KeyRange{StartKey: p.StartKey, EndKey: key} + right := r.slice(n+1, r.len()) + right.first = &kv.KeyRange{StartKey: key, EndKey: p.EndKey} + return left, right + } + } + return r.slice(0, n), r.slice(n, r.len()) +} + +// rangesPerTask limits the length of the ranges slice sent in one copTask. +const rangesPerTask = 25000 + +func buildCopTasks(bo *Backoffer, cache *RegionCache, ranges *copRanges, req *kv.Request) ([]*copTask, error) { + start := time.Now() + cmdType := tikvrpc.CmdCop + + rangesLen := ranges.len() + var tasks []*copTask + appendTask := func(regionWithRangeInfo *KeyLocation, ranges *copRanges) { + // TiKV will return gRPC error if the message is too large. So we need to limit the length of the ranges slice + // to make sure the message can be sent successfully. + rLen := ranges.len() + for i := 0; i < rLen; { + nextI := mathutil.Min(i+rangesPerTask, rLen) + tasks = append(tasks, &copTask{ + region: regionWithRangeInfo.Region, + ranges: ranges.slice(i, nextI), + // Channel buffer is 2 for handling region split. + // In a common case, two region split tasks will not be blocked. + respChan: make(chan *copResponse, 2), + cmdType: cmdType, + }) + i = nextI + } + } + + err := splitRanges(bo, cache, ranges, appendTask) + if err != nil { + return nil, errors.Trace(err) + } + + if req.Desc { + reverseTasks(tasks) + } + if elapsed := time.Since(start); elapsed > time.Millisecond*500 { + logutil.BgLogger().Warn("buildCopTasks takes too much time", + zap.Duration("elapsed", elapsed), + zap.Int("range len", rangesLen), + zap.Int("task len", len(tasks))) + } + return tasks, nil +} + +func splitRanges(bo *Backoffer, cache *RegionCache, ranges *copRanges, fn func(regionWithRangeInfo *KeyLocation, ranges *copRanges)) error { + for ranges.len() > 0 { + loc, err := cache.LocateKey(bo, ranges.at(0).StartKey) + if err != nil { + return errors.Trace(err) + } + + // Iterate to the first range that is not complete in the region. + var i int + for ; i < ranges.len(); i++ { + r := ranges.at(i) + if !(loc.Contains(r.EndKey) || bytes.Equal(loc.EndKey, r.EndKey)) { + break + } + } + // All rest ranges belong to the same region. + if i == ranges.len() { + fn(loc, ranges) + break + } + + r := ranges.at(i) + if loc.Contains(r.StartKey) { + // Part of r is not in the region. We need to split it. + taskRanges := ranges.slice(0, i) + taskRanges.last = &kv.KeyRange{ + StartKey: r.StartKey, + EndKey: loc.EndKey, + } + fn(loc, taskRanges) + + ranges = ranges.slice(i+1, ranges.len()) + ranges.first = &kv.KeyRange{ + StartKey: loc.EndKey, + EndKey: r.EndKey, + } + } else { + // rs[i] is not in the region. + taskRanges := ranges.slice(0, i) + fn(loc, taskRanges) + ranges = ranges.slice(i, ranges.len()) + } + } + + return nil +} + +// SplitRegionRanges get the split ranges from pd region. +func SplitRegionRanges(bo *Backoffer, cache *RegionCache, keyRanges []kv.KeyRange) ([]kv.KeyRange, error) { + ranges := copRanges{mid: keyRanges} + + var ret []kv.KeyRange + appendRange := func(regionWithRangeInfo *KeyLocation, ranges *copRanges) { + for i := 0; i < ranges.len(); i++ { + ret = append(ret, ranges.at(i)) + } + } + + err := splitRanges(bo, cache, &ranges, appendRange) + if err != nil { + return nil, errors.Trace(err) + } + return ret, nil +} + +func reverseTasks(tasks []*copTask) { + for i := 0; i < len(tasks)/2; i++ { + j := len(tasks) - i - 1 + tasks[i], tasks[j] = tasks[j], tasks[i] + } +} + +type copIterator struct { + store *tikvStore + req *kv.Request + concurrency int + finishCh chan struct{} + + // If keepOrder, results are stored in copTask.respChan, read them out one by one. + tasks []*copTask + curr int + // sendRate controls the sending rate of copIteratorTaskSender, if keepOrder, + // to prevent all tasks being done (aka. all of the responses are buffered) + sendRate *rateLimit + + // Otherwise, results are stored in respChan. + respChan chan *copResponse + + vars *kv.Variables + + replicaReadSeed uint32 + + wg sync.WaitGroup + // closed represents when the Close is called. + // There are two cases we need to close the `finishCh` channel, one is when context is done, the other one is + // when the Close is called. we use atomic.CompareAndSwap `closed` to to make sure the channel is not closed twice. + closed uint32 + + minCommitTSPushed +} + +// copIteratorWorker receives tasks from copIteratorTaskSender, handles tasks and sends the copResponse to respChan. +type copIteratorWorker struct { + taskCh <-chan *copTask + wg *sync.WaitGroup + store *tikvStore + req *kv.Request + respChan chan<- *copResponse + finishCh <-chan struct{} + vars *kv.Variables + clientHelper + + replicaReadSeed uint32 +} + +// copIteratorTaskSender sends tasks to taskCh then wait for the workers to exit. +type copIteratorTaskSender struct { + taskCh chan<- *copTask + wg *sync.WaitGroup + tasks []*copTask + finishCh <-chan struct{} + respChan chan<- *copResponse + sendRate *rateLimit +} + +type copResponse struct { + pbResp *coprocessor.Response + err error + respSize int64 + respTime time.Duration +} + +// GetData implements the kv.ResultSubset GetData interface. +func (rs *copResponse) GetData() []byte { + return rs.pbResp.Data +} + +// MemSize returns how many bytes of memory this response use +func (rs *copResponse) MemSize() int64 { + if rs.respSize != 0 { + return rs.respSize + } + + // ignore rs.err + if rs.pbResp != nil { + // Using a approximate size since it's hard to get a accurate value. + rs.respSize += int64(rs.pbResp.Size()) + } + return rs.respSize +} + +func (rs *copResponse) RespTime() time.Duration { + return rs.respTime +} + +const minLogCopTaskTime = 300 * time.Millisecond + +// run is a worker function that get a copTask from channel, handle it and +// send the result back. +func (worker *copIteratorWorker) run(ctx context.Context) { + defer worker.wg.Done() + for task := range worker.taskCh { + respCh := worker.respChan + if respCh == nil { + respCh = task.respChan + } + + bo := NewBackoffer(ctx, copNextMaxBackoff).WithVars(worker.vars) + worker.handleTask(bo, task, respCh) + close(task.respChan) + select { + case <-worker.finishCh: + return + default: + } + } +} + +// open starts workers and sender goroutines. +func (it *copIterator) open(ctx context.Context) { + taskCh := make(chan *copTask, 1) + it.wg.Add(it.concurrency) + // Start it.concurrency number of workers to handle cop requests. + for i := 0; i < it.concurrency; i++ { + worker := &copIteratorWorker{ + taskCh: taskCh, + wg: &it.wg, + store: it.store, + req: it.req, + respChan: it.respChan, + finishCh: it.finishCh, + vars: it.vars, + clientHelper: clientHelper{ + LockResolver: it.store.lockResolver, + RegionCache: it.store.regionCache, + minCommitTSPushed: &it.minCommitTSPushed, + Client: it.store.client, + }, + + replicaReadSeed: it.replicaReadSeed, + } + go worker.run(ctx) + } + taskSender := &copIteratorTaskSender{ + taskCh: taskCh, + wg: &it.wg, + tasks: it.tasks, + finishCh: it.finishCh, + sendRate: it.sendRate, + } + taskSender.respChan = it.respChan + go taskSender.run() +} + +func (sender *copIteratorTaskSender) run() { + // Send tasks to feed the worker goroutines. + for _, t := range sender.tasks { + // If keepOrder, we must control the sending rate to prevent all tasks + // being done (aka. all of the responses are buffered) by copIteratorWorker. + // We keep the number of inflight tasks within the number of concurrency * 2. + // It sends one more task if a task has been finished in copIterator.Next. + if sender.sendRate != nil { + exit := sender.sendRate.getToken(sender.finishCh) + if exit { + break + } + } + exit := sender.sendToTaskCh(t) + if exit { + break + } + } + close(sender.taskCh) + + // Wait for worker goroutines to exit. + sender.wg.Wait() + if sender.respChan != nil { + close(sender.respChan) + } +} + +func (it *copIterator) recvFromRespCh(ctx context.Context, respCh <-chan *copResponse) (resp *copResponse, ok bool, exit bool) { + select { + case resp, ok = <-respCh: + case <-it.finishCh: + exit = true + case <-ctx.Done(): + // We select the ctx.Done() in the thread of `Next` instead of in the worker to avoid the cost of `WithCancel`. + if atomic.CompareAndSwapUint32(&it.closed, 0, 1) { + close(it.finishCh) + } + exit = true + } + return +} + +func (sender *copIteratorTaskSender) sendToTaskCh(t *copTask) (exit bool) { + select { + case sender.taskCh <- t: + case <-sender.finishCh: + exit = true + } + return +} + +func (worker *copIteratorWorker) sendToRespCh(resp *copResponse, respCh chan<- *copResponse, checkOOM bool) (exit bool) { + select { + case respCh <- resp: + case <-worker.finishCh: + exit = true + } + return +} + +// Next returns next coprocessor result. +// NOTE: Use nil to indicate finish, so if the returned ResultSubset is not nil, reader should continue to call Next(). +func (it *copIterator) Next(ctx context.Context) (kv.ResultSubset, error) { + var ( + resp *copResponse + ok bool + closed bool + ) + // If data order matters, response should be returned in the same order as copTask slice. + // Otherwise all responses are returned from a single channel. + if it.respChan != nil { + // Get next fetched resp from chan + resp, ok, closed = it.recvFromRespCh(ctx, it.respChan) + if !ok || closed { + return nil, nil + } + } else { + for { + if it.curr >= len(it.tasks) { + // Resp will be nil if iterator is finishCh. + return nil, nil + } + task := it.tasks[it.curr] + resp, ok, closed = it.recvFromRespCh(ctx, task.respChan) + if closed { + // Close() is already called, so Next() is invalid. + return nil, nil + } + if ok { + break + } + // Switch to next task. + it.tasks[it.curr] = nil + it.curr++ + it.sendRate.putToken() + } + } + + if resp.err != nil { + return nil, errors.Trace(resp.err) + } + + err := it.store.CheckVisibility(it.req.StartTs) + if err != nil { + return nil, errors.Trace(err) + } + return resp, nil +} + +// handleTask handles single copTask, sends the result to channel, retry automatically on error. +func (worker *copIteratorWorker) handleTask(bo *Backoffer, task *copTask, respCh chan<- *copResponse) { + defer func() { + r := recover() + if r != nil { + logutil.BgLogger().Error("copIteratorWork meet panic", + zap.Reflect("r", r), + zap.Stack("stack trace")) + resp := &copResponse{err: errors.Errorf("%v", r)} + // if panic has happened, set checkOOM to false to avoid another panic. + worker.sendToRespCh(resp, respCh, false) + } + }() + remainTasks := []*copTask{task} + for len(remainTasks) > 0 { + tasks, err := worker.handleTaskOnce(bo, remainTasks[0], respCh) + if err != nil { + resp := &copResponse{err: errors.Trace(err)} + worker.sendToRespCh(resp, respCh, true) + return + } + if len(tasks) > 0 { + remainTasks = append(tasks, remainTasks[1:]...) + } else { + remainTasks = remainTasks[1:] + } + } +} + +// handleTaskOnce handles single copTask, successful results are send to channel. +// If error happened, returns error. If region split or meet lock, returns the remain tasks. +func (worker *copIteratorWorker) handleTaskOnce(bo *Backoffer, task *copTask, ch chan<- *copResponse) ([]*copTask, error) { + failpoint.Inject("handleTaskOnceError", func(val failpoint.Value) { + if val.(bool) { + failpoint.Return(nil, errors.New("mock handleTaskOnce error")) + } + }) + + req := tikvrpc.NewRequest(task.cmdType, &coprocessor.Request{ + Tp: worker.req.Tp, + StartTs: worker.req.StartTs, + Data: worker.req.Data, + Ranges: task.ranges.toPBRanges(), + }, kvrpcpb.Context{}) + startTime := time.Now() + resp, rpcCtx, storeAddr, err := worker.SendReqCtx(bo, req, task.region, ReadTimeoutMedium, task.storeAddr) + if err != nil { + return nil, errors.Trace(err) + } + // Set task.storeAddr field so its task.String() method have the store address information. + task.storeAddr = storeAddr + costTime := time.Since(startTime) + if costTime > minLogCopTaskTime { + worker.logTimeCopTask(costTime, task, bo, resp) + } + + return worker.handleCopResponse(bo, rpcCtx, &copResponse{pbResp: resp.Resp.(*coprocessor.Response)}, task, ch) +} + +type minCommitTSPushed struct { + data map[uint64]struct{} + sync.RWMutex +} + +func (m *minCommitTSPushed) Update(from []uint64) { + m.Lock() + for _, v := range from { + m.data[v] = struct{}{} + } + m.Unlock() +} + +func (m *minCommitTSPushed) Get() []uint64 { + m.RLock() + defer m.RUnlock() + if len(m.data) == 0 { + return nil + } + + ret := make([]uint64, 0, len(m.data)) + for k := range m.data { + ret = append(ret, k) + } + return ret +} + +// clientHelper wraps LockResolver and RegionRequestSender. +// It's introduced to support the new lock resolving pattern in the large transaction. +// In the large transaction protocol, sending requests and resolving locks are +// context-dependent. For example, when a send request meets a secondary lock, we'll +// call ResolveLock, and if the lock belongs to a large transaction, we may retry +// the request. If there is no context information about the resolved locks, we'll +// meet the secondary lock again and run into a deadloop. +type clientHelper struct { + *LockResolver + *RegionCache + *minCommitTSPushed + Client +} + +// ResolveLocks wraps the ResolveLocks function and store the resolved result. +func (ch *clientHelper) ResolveLocks(bo *Backoffer, callerStartTS uint64, locks []*Lock) (int64, error) { + msBeforeTxnExpired, resolvedLocks, err := ch.LockResolver.ResolveLocks(bo, callerStartTS, locks) + if err != nil { + return msBeforeTxnExpired, err + } + if len(resolvedLocks) > 0 { + ch.minCommitTSPushed.Update(resolvedLocks) + return 0, nil + } + return msBeforeTxnExpired, nil +} + +// SendReqCtx wraps the SendReqCtx function and use the resolved lock result in the kvrpcpb.Context. +func (ch *clientHelper) SendReqCtx(bo *Backoffer, req *tikvrpc.Request, regionID RegionVerID, timeout time.Duration, directStoreAddr string) (*tikvrpc.Response, *RPCContext, string, error) { + sender := NewRegionRequestSender(ch.RegionCache, ch.Client) + if len(directStoreAddr) > 0 { + sender.storeAddr = directStoreAddr + } + resp, ctx, err := sender.SendReqCtx(bo, req, regionID, timeout) + return resp, ctx, sender.storeAddr, err +} + +const ( + minLogBackoffTime = 100 +) + +func (worker *copIteratorWorker) logTimeCopTask(costTime time.Duration, task *copTask, bo *Backoffer, resp *tikvrpc.Response) { + logStr := fmt.Sprintf("[TIME_COP_PROCESS] resp_time:%s txnStartTS:%d region_id:%d store_addr:%s", costTime, worker.req.StartTs, task.region.id, task.storeAddr) + if bo.totalSleep > minLogBackoffTime { + backoffTypes := strings.Replace(fmt.Sprintf("%v", bo.types), " ", ",", -1) + logStr += fmt.Sprintf(" backoff_ms:%d backoff_types:%s", bo.totalSleep, backoffTypes) + } + logutil.BgLogger().Info(logStr) +} + +// handleCopResponse checks coprocessor Response for region split and lock, +// returns more tasks when that happens, or handles the response if no error. +func (worker *copIteratorWorker) handleCopResponse(bo *Backoffer, rpcCtx *RPCContext, resp *copResponse, task *copTask, ch chan<- *copResponse) ([]*copTask, error) { + if regionErr := resp.pbResp.GetRegionError(); regionErr != nil { + if err := bo.Backoff(BoRegionMiss, errors.New(regionErr.String())); err != nil { + return nil, errors.Trace(err) + } + // We may meet RegionError at the first packet, but not during visiting the stream. + return buildCopTasks(bo, worker.store.regionCache, task.ranges, worker.req) + } + if lockErr := resp.pbResp.GetLocked(); lockErr != nil { + logutil.BgLogger().Debug("coprocessor encounters", + zap.Stringer("lock", lockErr)) + msBeforeExpired, err1 := worker.ResolveLocks(bo, worker.req.StartTs, []*Lock{NewLock(lockErr)}) + if err1 != nil { + return nil, errors.Trace(err1) + } + if msBeforeExpired > 0 { + if err := bo.BackoffWithMaxSleep(boTxnLockFast, int(msBeforeExpired), errors.New(lockErr.String())); err != nil { + return nil, errors.Trace(err) + } + } + return worker.buildCopTasksFromRemain(bo, task) + } + if otherErr := resp.pbResp.GetOtherError(); otherErr != "" { + err := errors.Errorf("other error: %s", otherErr) + logutil.BgLogger().Warn("other error", + zap.Uint64("txnStartTS", worker.req.StartTs), + zap.Uint64("regionID", task.region.id), + zap.String("storeAddr", task.storeAddr), + zap.Error(err)) + return nil, errors.Trace(err) + } + worker.sendToRespCh(resp, ch, true) + return nil, nil +} + +func (worker *copIteratorWorker) buildCopTasksFromRemain(bo *Backoffer, task *copTask) ([]*copTask, error) { + remainedRanges := task.ranges + return buildCopTasks(bo, worker.store.regionCache, remainedRanges, worker.req) +} + +func (it *copIterator) Close() error { + if atomic.CompareAndSwapUint32(&it.closed, 0, 1) { + close(it.finishCh) + } + it.wg.Wait() + return nil +} + +type rateLimit struct { + token chan struct{} +} + +func newRateLimit(n int) *rateLimit { + return &rateLimit{ + token: make(chan struct{}, n), + } +} + +func (r *rateLimit) getToken(done <-chan struct{}) (exit bool) { + select { + case <-done: + return true + case r.token <- struct{}{}: + return false + } +} + +func (r *rateLimit) putToken() { + select { + case <-r.token: + default: + panic("put a redundant token") + } +} + +// copErrorResponse returns error when calling Next() +type copErrorResponse struct{ error } + +func (it copErrorResponse) Next(ctx context.Context) (kv.ResultSubset, error) { + return nil, it.error +} + +func (it copErrorResponse) Close() error { + return nil +} diff --git a/store/tikv/coprocessor_test.go b/store/tikv/coprocessor_test.go new file mode 100644 index 0000000..cdeee11 --- /dev/null +++ b/store/tikv/coprocessor_test.go @@ -0,0 +1,217 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv + +import ( + "context" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/store/mockstore/mocktikv" +) + +type testCoprocessorSuite struct { + OneByOneSuite +} + +var _ = Suite(&testCoprocessorSuite{}) + +func (s *testCoprocessorSuite) TestRebuild(c *C) { + // nil --- 'm' --- nil + // <- 0 -> <- 1 -> + cluster := mocktikv.NewCluster() + storeID, regionIDs, peerIDs := mocktikv.BootstrapWithMultiRegions(cluster, []byte("m")) + pdCli := &codecPDClient{mocktikv.NewPDClient(cluster)} + cache := NewRegionCache(pdCli) + defer cache.Close() + bo := NewBackoffer(context.Background(), 3000) + + req := &kv.Request{} + tasks, err := buildCopTasks(bo, cache, buildCopRanges("a", "z"), req) + c.Assert(err, IsNil) + c.Assert(tasks, HasLen, 2) + s.taskEqual(c, tasks[0], regionIDs[0], "a", "m") + s.taskEqual(c, tasks[1], regionIDs[1], "m", "z") + + // nil -- 'm' -- 'q' -- nil + // <- 0 -> <--1-> <-2--> + regionIDs = append(regionIDs, cluster.AllocID()) + peerIDs = append(peerIDs, cluster.AllocID()) + cluster.Split(regionIDs[1], regionIDs[2], []byte("q"), []uint64{peerIDs[2]}, storeID) + cache.InvalidateCachedRegion(tasks[1].region) + + req.Desc = true + tasks, err = buildCopTasks(bo, cache, buildCopRanges("a", "z"), req) + c.Assert(err, IsNil) + c.Assert(tasks, HasLen, 3) + s.taskEqual(c, tasks[2], regionIDs[0], "a", "m") + s.taskEqual(c, tasks[1], regionIDs[1], "m", "q") + s.taskEqual(c, tasks[0], regionIDs[2], "q", "z") +} + +func buildKeyRanges(keys ...string) []kv.KeyRange { + var ranges []kv.KeyRange + for i := 0; i < len(keys); i += 2 { + ranges = append(ranges, kv.KeyRange{ + StartKey: []byte(keys[i]), + EndKey: []byte(keys[i+1]), + }) + } + return ranges +} + +func buildCopRanges(keys ...string) *copRanges { + ranges := buildKeyRanges(keys...) + return &copRanges{mid: ranges} +} + +func (s *testCoprocessorSuite) taskEqual(c *C, task *copTask, regionID uint64, keys ...string) { + c.Assert(task.region.id, Equals, regionID) + for i := 0; i < task.ranges.len(); i++ { + r := task.ranges.at(i) + c.Assert(string(r.StartKey), Equals, keys[2*i]) + c.Assert(string(r.EndKey), Equals, keys[2*i+1]) + } +} + +func (s *testCoprocessorSuite) TestCopRanges(c *C) { + ranges := []kv.KeyRange{ + {StartKey: []byte("a"), EndKey: []byte("b")}, + {StartKey: []byte("c"), EndKey: []byte("d")}, + {StartKey: []byte("e"), EndKey: []byte("f")}, + } + + s.checkEqual(c, &copRanges{mid: ranges}, ranges, true) + s.checkEqual(c, &copRanges{first: &ranges[0], mid: ranges[1:]}, ranges, true) + s.checkEqual(c, &copRanges{mid: ranges[:2], last: &ranges[2]}, ranges, true) + s.checkEqual(c, &copRanges{first: &ranges[0], mid: ranges[1:2], last: &ranges[2]}, ranges, true) +} + +func (s *testCoprocessorSuite) checkEqual(c *C, copRanges *copRanges, ranges []kv.KeyRange, slice bool) { + c.Assert(copRanges.len(), Equals, len(ranges)) + for i := range ranges { + c.Assert(copRanges.at(i), DeepEquals, ranges[i]) + } + if slice { + for i := 0; i <= copRanges.len(); i++ { + for j := i; j <= copRanges.len(); j++ { + s.checkEqual(c, copRanges.slice(i, j), ranges[i:j], false) + } + } + } +} + +func (s *testCoprocessorSuite) TestCopRangeSplit(c *C) { + first := &kv.KeyRange{StartKey: []byte("a"), EndKey: []byte("b")} + mid := []kv.KeyRange{ + {StartKey: []byte("c"), EndKey: []byte("d")}, + {StartKey: []byte("e"), EndKey: []byte("g")}, + {StartKey: []byte("l"), EndKey: []byte("o")}, + } + last := &kv.KeyRange{StartKey: []byte("q"), EndKey: []byte("t")} + left := true + right := false + + // input range: [c-d) [e-g) [l-o) + ranges := &copRanges{mid: mid} + s.testSplit(c, ranges, right, + splitCase{"c", buildCopRanges("c", "d", "e", "g", "l", "o")}, + splitCase{"d", buildCopRanges("e", "g", "l", "o")}, + splitCase{"f", buildCopRanges("f", "g", "l", "o")}, + ) + + // input range: [a-b) [c-d) [e-g) [l-o) + ranges = &copRanges{first: first, mid: mid} + s.testSplit(c, ranges, right, + splitCase{"a", buildCopRanges("a", "b", "c", "d", "e", "g", "l", "o")}, + splitCase{"c", buildCopRanges("c", "d", "e", "g", "l", "o")}, + splitCase{"m", buildCopRanges("m", "o")}, + ) + + // input range: [a-b) [c-d) [e-g) [l-o) [q-t) + ranges = &copRanges{first: first, mid: mid, last: last} + s.testSplit(c, ranges, right, + splitCase{"f", buildCopRanges("f", "g", "l", "o", "q", "t")}, + splitCase{"h", buildCopRanges("l", "o", "q", "t")}, + splitCase{"r", buildCopRanges("r", "t")}, + ) + + // input range: [c-d) [e-g) [l-o) + ranges = &copRanges{mid: mid} + s.testSplit(c, ranges, left, + splitCase{"m", buildCopRanges("c", "d", "e", "g", "l", "m")}, + splitCase{"g", buildCopRanges("c", "d", "e", "g")}, + splitCase{"g", buildCopRanges("c", "d", "e", "g")}, + ) + + // input range: [a-b) [c-d) [e-g) [l-o) + ranges = &copRanges{first: first, mid: mid} + s.testSplit(c, ranges, left, + splitCase{"d", buildCopRanges("a", "b", "c", "d")}, + splitCase{"d", buildCopRanges("a", "b", "c", "d")}, + splitCase{"o", buildCopRanges("a", "b", "c", "d", "e", "g", "l", "o")}, + ) + + // input range: [a-b) [c-d) [e-g) [l-o) [q-t) + ranges = &copRanges{first: first, mid: mid, last: last} + s.testSplit(c, ranges, left, + splitCase{"o", buildCopRanges("a", "b", "c", "d", "e", "g", "l", "o")}, + splitCase{"p", buildCopRanges("a", "b", "c", "d", "e", "g", "l", "o")}, + splitCase{"t", buildCopRanges("a", "b", "c", "d", "e", "g", "l", "o", "q", "t")}, + ) +} + +func (s *testCoprocessorSuite) TestRateLimit(c *C) { + done := make(chan struct{}, 1) + rl := newRateLimit(1) + c.Assert(rl.putToken, PanicMatches, "put a redundant token") + exit := rl.getToken(done) + c.Assert(exit, Equals, false) + rl.putToken() + c.Assert(rl.putToken, PanicMatches, "put a redundant token") + + exit = rl.getToken(done) + c.Assert(exit, Equals, false) + done <- struct{}{} + exit = rl.getToken(done) // blocked but exit + c.Assert(exit, Equals, true) + + sig := make(chan int, 1) + go func() { + exit = rl.getToken(done) // blocked + c.Assert(exit, Equals, false) + close(sig) + }() + time.Sleep(200 * time.Millisecond) + rl.putToken() + <-sig +} + +type splitCase struct { + key string + *copRanges +} + +func (s *testCoprocessorSuite) testSplit(c *C, ranges *copRanges, checkLeft bool, cases ...splitCase) { + for _, t := range cases { + left, right := ranges.split([]byte(t.key)) + expect := t.copRanges + if checkLeft { + s.checkEqual(c, left, expect.mid, false) + } else { + s.checkEqual(c, right, expect.mid, false) + } + } +} diff --git a/store/tikv/error.go b/store/tikv/error.go new file mode 100644 index 0000000..ca23b7b --- /dev/null +++ b/store/tikv/error.go @@ -0,0 +1,60 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv + +import ( + "github.com/pingcap/errors" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" +) + +var ( + // ErrBodyMissing response body is missing error + ErrBodyMissing = errors.New("response body is missing") + // When TiDB is closing and send request to tikv fail, do not retry, return this error. + errTiDBShuttingDown = errors.New("tidb server shutting down") +) + +// mismatchClusterID represents the message that the cluster ID of the PD client does not match the PD. +const mismatchClusterID = "mismatch cluster id" + +// MySQL error instances. +var ( + ErrTiKVServerTimeout = terror.ClassTiKV.New(mysql.ErrTiKVServerTimeout, mysql.MySQLErrName[mysql.ErrTiKVServerTimeout]) + ErrResolveLockTimeout = terror.ClassTiKV.New(mysql.ErrResolveLockTimeout, mysql.MySQLErrName[mysql.ErrResolveLockTimeout]) + ErrPDServerTimeout = terror.ClassTiKV.New(mysql.ErrPDServerTimeout, mysql.MySQLErrName[mysql.ErrPDServerTimeout]) + ErrRegionUnavailable = terror.ClassTiKV.New(mysql.ErrRegionUnavailable, mysql.MySQLErrName[mysql.ErrRegionUnavailable]) + ErrTiKVServerBusy = terror.ClassTiKV.New(mysql.ErrTiKVServerBusy, mysql.MySQLErrName[mysql.ErrTiKVServerBusy]) + ErrGCTooEarly = terror.ClassTiKV.New(mysql.ErrGCTooEarly, mysql.MySQLErrName[mysql.ErrGCTooEarly]) + ErrQueryInterrupted = terror.ClassTiKV.New(mysql.ErrQueryInterrupted, mysql.MySQLErrName[mysql.ErrQueryInterrupted]) + ErrLockAcquireFailAndNoWaitSet = terror.ClassTiKV.New(mysql.ErrLockAcquireFailAndNoWaitSet, mysql.MySQLErrName[mysql.ErrLockAcquireFailAndNoWaitSet]) + ErrLockWaitTimeout = terror.ClassTiKV.New(mysql.ErrLockWaitTimeout, mysql.MySQLErrName[mysql.ErrLockWaitTimeout]) +) + +func init() { + tikvMySQLErrCodes := map[terror.ErrCode]uint16{ + mysql.ErrTiKVServerTimeout: mysql.ErrTiKVServerTimeout, + mysql.ErrResolveLockTimeout: mysql.ErrResolveLockTimeout, + mysql.ErrPDServerTimeout: mysql.ErrPDServerTimeout, + mysql.ErrRegionUnavailable: mysql.ErrRegionUnavailable, + mysql.ErrTiKVServerBusy: mysql.ErrTiKVServerBusy, + mysql.ErrGCTooEarly: mysql.ErrGCTooEarly, + mysql.ErrTruncatedWrongValue: mysql.ErrTruncatedWrongValue, + mysql.ErrQueryInterrupted: mysql.ErrQueryInterrupted, + mysql.ErrLockAcquireFailAndNoWaitSet: mysql.ErrLockAcquireFailAndNoWaitSet, + mysql.ErrDataOutOfRange: mysql.ErrDataOutOfRange, + mysql.ErrLockWaitTimeout: mysql.ErrLockWaitTimeout, + } + terror.ErrClassToMySQLCodes[terror.ClassTiKV] = tikvMySQLErrCodes +} diff --git a/store/tikv/interface.go b/store/tikv/interface.go new file mode 100644 index 0000000..0cff79c --- /dev/null +++ b/store/tikv/interface.go @@ -0,0 +1,54 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv + +import ( + "time" + + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/store/tikv/oracle" + "github.com/pingcap/tidb/store/tikv/tikvrpc" +) + +// Storage represent the kv.Storage runs on TiKV. +type Storage interface { + kv.Storage + + // GetRegionCache gets the RegionCache. + GetRegionCache() *RegionCache + + // SendReq sends a request to TiKV. + SendReq(bo *Backoffer, req *tikvrpc.Request, regionID RegionVerID, timeout time.Duration) (*tikvrpc.Response, error) + + // GetLockResolver gets the LockResolver. + GetLockResolver() *LockResolver + + // GetSafePointKV gets the SafePointKV. + GetSafePointKV() SafePointKV + + // UpdateSPCache updates the cache of safe point. + UpdateSPCache(cachedSP uint64, cachedTime time.Time) + + // SetOracle sets the Oracle. + SetOracle(oracle oracle.Oracle) + + // SetTiKVClient sets the TiKV client. + SetTiKVClient(client Client) + + // GetTiKVClient gets the TiKV client. + GetTiKVClient() Client + + // Closed returns the closed channel. + Closed() <-chan struct{} +} diff --git a/store/tikv/isolation_test.go b/store/tikv/isolation_test.go new file mode 100644 index 0000000..7b2311a --- /dev/null +++ b/store/tikv/isolation_test.go @@ -0,0 +1,197 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !race + +package tikv + +import ( + "context" + "fmt" + "sort" + "sync" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/terror" +) + +// testIsolationSuite represents test isolation suite. +// The test suite takes too long under the race detector. +type testIsolationSuite struct { + OneByOneSuite + store *tikvStore +} + +var _ = Suite(&testIsolationSuite{}) + +func (s *testIsolationSuite) SetUpSuite(c *C) { + s.OneByOneSuite.SetUpSuite(c) + s.store = NewTestStore(c).(*tikvStore) +} + +func (s *testIsolationSuite) TearDownSuite(c *C) { + s.store.Close() + s.OneByOneSuite.TearDownSuite(c) +} + +type writeRecord struct { + startTS uint64 + commitTS uint64 +} + +type writeRecords []writeRecord + +func (r writeRecords) Len() int { return len(r) } +func (r writeRecords) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r writeRecords) Less(i, j int) bool { return r[i].startTS <= r[j].startTS } + +func (s *testIsolationSuite) SetWithRetry(c *C, k, v []byte) writeRecord { + for { + txn, err := s.store.Begin() + c.Assert(err, IsNil) + + err = txn.Set(k, v) + c.Assert(err, IsNil) + + err = txn.Commit(context.Background()) + if err == nil { + return writeRecord{ + startTS: txn.StartTS(), + commitTS: txn.(*tikvTxn).commitTS, + } + } + c.Assert(kv.IsTxnRetryableError(err) || terror.ErrorEqual(err, terror.ErrResultUndetermined), IsTrue) + } +} + +type readRecord struct { + startTS uint64 + value []byte +} + +type readRecords []readRecord + +func (r readRecords) Len() int { return len(r) } +func (r readRecords) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r readRecords) Less(i, j int) bool { return r[i].startTS <= r[j].startTS } + +func (s *testIsolationSuite) GetWithRetry(c *C, k []byte) readRecord { + for { + txn, err := s.store.Begin() + c.Assert(err, IsNil) + + val, err := txn.Get(context.TODO(), k) + if err == nil { + return readRecord{ + startTS: txn.StartTS(), + value: val, + } + } + c.Assert(kv.IsTxnRetryableError(err), IsTrue) + } +} + +func (s *testIsolationSuite) TestWriteWriteConflict(c *C) { + const ( + threadCount = 10 + setPerThread = 100 + ) + var ( + mu sync.Mutex + writes []writeRecord + wg sync.WaitGroup + ) + wg.Add(threadCount) + for i := 0; i < threadCount; i++ { + go func() { + defer wg.Done() + for j := 0; j < setPerThread; j++ { + w := s.SetWithRetry(c, []byte("k"), []byte("v")) + mu.Lock() + writes = append(writes, w) + mu.Unlock() + } + }() + } + wg.Wait() + + // Check all transactions' [startTS, commitTS] are not overlapped. + sort.Sort(writeRecords(writes)) + for i := 0; i < len(writes)-1; i++ { + c.Assert(writes[i].commitTS, Less, writes[i+1].startTS) + } +} + +func (s *testIsolationSuite) TestReadWriteConflict(c *C) { + const ( + readThreadCount = 10 + writeCount = 10 + ) + + var ( + writes []writeRecord + mu sync.Mutex + reads []readRecord + wg sync.WaitGroup + ) + + s.SetWithRetry(c, []byte("k"), []byte("0")) + + writeDone := make(chan struct{}) + go func() { + for i := 1; i <= writeCount; i++ { + w := s.SetWithRetry(c, []byte("k"), []byte(fmt.Sprintf("%d", i))) + writes = append(writes, w) + time.Sleep(time.Microsecond * 10) + } + close(writeDone) + }() + + wg.Add(readThreadCount) + for i := 0; i < readThreadCount; i++ { + go func() { + defer wg.Done() + for { + select { + case <-writeDone: + return + default: + } + r := s.GetWithRetry(c, []byte("k")) + mu.Lock() + reads = append(reads, r) + mu.Unlock() + } + }() + } + wg.Wait() + + sort.Sort(readRecords(reads)) + + // Check all reads got the value committed before it's startTS. + var i, j int + for ; i < len(writes); i++ { + for ; j < len(reads); j++ { + w, r := writes[i], reads[j] + if r.startTS >= w.commitTS { + break + } + c.Assert(string(r.value), Equals, fmt.Sprintf("%d", i)) + } + } + for ; j < len(reads); j++ { + c.Assert(string(reads[j].value), Equals, fmt.Sprintf("%d", len(writes))) + } +} diff --git a/store/tikv/kv.go b/store/tikv/kv.go new file mode 100644 index 0000000..6a3153a --- /dev/null +++ b/store/tikv/kv.go @@ -0,0 +1,370 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv + +import ( + "context" + "crypto/tls" + "fmt" + "math/rand" + "net/url" + "strings" + "sync" + "time" + + pd "github.com/pingcap-incubator/tinykv/scheduler/client" + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/store/tikv/oracle" + "github.com/pingcap/tidb/store/tikv/oracle/oracles" + "github.com/pingcap/tidb/store/tikv/tikvrpc" + "github.com/pingcap/tidb/util/logutil" + "go.etcd.io/etcd/clientv3" + "go.uber.org/zap" +) + +type storeCache struct { + sync.Mutex + cache map[string]*tikvStore +} + +var mc storeCache + +// Driver implements engine Driver. +type Driver struct { +} + +func createEtcdKV(addrs []string) (*clientv3.Client, error) { + cli, err := clientv3.New(clientv3.Config{ + Endpoints: addrs, + AutoSyncInterval: 30 * time.Second, + DialTimeout: 5 * time.Second, + }) + if err != nil { + return nil, errors.Trace(err) + } + return cli, nil +} + +// Open opens or creates an TiKV storage with given path. +// Path example: tikv://etcd-node1:port,etcd-node2:port?cluster=1&disableGC=false +func (d Driver) Open(path string) (kv.Storage, error) { + mc.Lock() + defer mc.Unlock() + + etcdAddrs, disableGC, err := parsePath(path) + if err != nil { + return nil, errors.Trace(err) + } + + pdCli, err := pd.NewClient(etcdAddrs, pd.SecurityOption{}) + + if err != nil { + return nil, errors.Trace(err) + } + + // FIXME: uuid will be a very long and ugly string, simplify it. + uuid := fmt.Sprintf("tikv-%v", pdCli.GetClusterID(context.TODO())) + if store, ok := mc.cache[uuid]; ok { + return store, nil + } + + spkv, err := NewEtcdSafePointKV(etcdAddrs) + if err != nil { + return nil, errors.Trace(err) + } + + s, err := newTikvStore(uuid, &codecPDClient{pdCli}, spkv, newRPCClient(), !disableGC) + if err != nil { + return nil, errors.Trace(err) + } + s.etcdAddrs = etcdAddrs + + mc.cache[uuid] = s + return s, nil +} + +// EtcdBackend is used for judging a storage is a real TiKV. +type EtcdBackend interface { + EtcdAddrs() []string + TLSConfig() *tls.Config +} + +// update oracle's lastTS every 2000ms. +var oracleUpdateInterval = 2000 + +type tikvStore struct { + clusterID uint64 + uuid string + oracle oracle.Oracle + client Client + pdClient pd.Client + regionCache *RegionCache + lockResolver *LockResolver + etcdAddrs []string + mock bool + enableGC bool + + kv SafePointKV + safePoint uint64 + spTime time.Time + spMutex sync.RWMutex // this is used to update safePoint and spTime + closed chan struct{} // this is used to nofity when the store is closed + + replicaReadSeed uint32 // this is used to load balance followers / learners when replica read is enabled +} + +func (s *tikvStore) UpdateSPCache(cachedSP uint64, cachedTime time.Time) { + s.spMutex.Lock() + s.safePoint = cachedSP + s.spTime = cachedTime + s.spMutex.Unlock() +} + +func (s *tikvStore) CheckVisibility(startTime uint64) error { + s.spMutex.RLock() + cachedSafePoint := s.safePoint + cachedTime := s.spTime + s.spMutex.RUnlock() + diff := time.Since(cachedTime) + + if diff > (GcSafePointCacheInterval - gcCPUTimeInaccuracyBound) { + return ErrPDServerTimeout.GenWithStackByArgs("start timestamp may fall behind safe point") + } + + if startTime < cachedSafePoint { + t1 := oracle.GetTimeFromTS(startTime) + t2 := oracle.GetTimeFromTS(cachedSafePoint) + return ErrGCTooEarly.GenWithStackByArgs(t1, t2) + } + + return nil +} + +func newTikvStore(uuid string, pdClient pd.Client, spkv SafePointKV, client Client, enableGC bool) (*tikvStore, error) { + o, err := oracles.NewPdOracle(pdClient, time.Duration(oracleUpdateInterval)*time.Millisecond) + if err != nil { + return nil, errors.Trace(err) + } + store := &tikvStore{ + clusterID: pdClient.GetClusterID(context.TODO()), + uuid: uuid, + oracle: o, + client: client, + pdClient: pdClient, + regionCache: NewRegionCache(pdClient), + kv: spkv, + safePoint: 0, + spTime: time.Now(), + closed: make(chan struct{}), + replicaReadSeed: rand.Uint32(), + } + store.lockResolver = newLockResolver(store) + store.enableGC = enableGC + + go store.runSafePointChecker() + + return store, nil +} + +func (s *tikvStore) EtcdAddrs() []string { + return s.etcdAddrs +} + +func (s *tikvStore) runSafePointChecker() { + d := gcSafePointUpdateInterval + for { + select { + case spCachedTime := <-time.After(d): + cachedSafePoint, err := loadSafePoint(s.GetSafePointKV()) + if err == nil { + s.UpdateSPCache(cachedSafePoint, spCachedTime) + d = gcSafePointUpdateInterval + } else { + + logutil.BgLogger().Error("fail to load safepoint from pd", zap.Error(err)) + d = gcSafePointQuickRepeatInterval + } + case <-s.Closed(): + return + } + } +} + +func (s *tikvStore) Begin() (kv.Transaction, error) { + txn, err := newTiKVTxn(s) + if err != nil { + return nil, errors.Trace(err) + } + + return txn, nil +} + +// BeginWithStartTS begins a transaction with startTS. +func (s *tikvStore) BeginWithStartTS(startTS uint64) (kv.Transaction, error) { + txn, err := newTikvTxnWithStartTS(s, startTS) + if err != nil { + return nil, errors.Trace(err) + } + + return txn, nil +} + +func (s *tikvStore) GetSnapshot(ver kv.Version) (kv.Snapshot, error) { + snapshot := newTiKVSnapshot(s, ver) + + return snapshot, nil +} + +func (s *tikvStore) Close() error { + mc.Lock() + defer mc.Unlock() + + delete(mc.cache, s.uuid) + s.oracle.Close() + s.pdClient.Close() + + close(s.closed) + if err := s.client.Close(); err != nil { + return errors.Trace(err) + } + + s.regionCache.Close() + return nil +} + +func (s *tikvStore) UUID() string { + return s.uuid +} + +func (s *tikvStore) CurrentVersion() (kv.Version, error) { + bo := NewBackoffer(context.Background(), tsoMaxBackoff) + startTS, err := s.getTimestampWithRetry(bo) + if err != nil { + return kv.NewVersion(0), errors.Trace(err) + } + return kv.NewVersion(startTS), nil +} + +func (s *tikvStore) getTimestampWithRetry(bo *Backoffer) (uint64, error) { + for { + startTS, err := s.oracle.GetTimestamp(bo.ctx) + // mockGetTSErrorInRetry should wait MockCommitErrorOnce first, then will run into retry() logic. + // Then mockGetTSErrorInRetry will return retryable error when first retry. + // Before PR #8743, we don't cleanup txn after meet error such as error like: PD server timeout + // This may cause duplicate data to be written. + failpoint.Inject("mockGetTSErrorInRetry", func(val failpoint.Value) { + if val.(bool) && !kv.IsMockCommitErrorEnable() { + err = ErrPDServerTimeout.GenWithStackByArgs("mock PD timeout") + } + }) + + if err == nil { + return startTS, nil + } + err = bo.Backoff(BoPDRPC, errors.Errorf("get timestamp failed: %v", err)) + if err != nil { + return 0, errors.Trace(err) + } + } +} + +func (s *tikvStore) GetClient() kv.Client { + return &CopClient{ + store: s, + } +} + +func (s *tikvStore) GetOracle() oracle.Oracle { + return s.oracle +} + +func (s *tikvStore) Name() string { + return "TiKV" +} + +func (s *tikvStore) Describe() string { + return "TiKV is a distributed transactional key-value database" +} + +func (s *tikvStore) ShowStatus(ctx context.Context, key string) (interface{}, error) { + return nil, kv.ErrNotImplemented +} + +func (s *tikvStore) SupportDeleteRange() (supported bool) { + return !s.mock +} + +func (s *tikvStore) SendReq(bo *Backoffer, req *tikvrpc.Request, regionID RegionVerID, timeout time.Duration) (*tikvrpc.Response, error) { + sender := NewRegionRequestSender(s.regionCache, s.client) + return sender.SendReq(bo, req, regionID, timeout) +} + +func (s *tikvStore) GetRegionCache() *RegionCache { + return s.regionCache +} + +func (s *tikvStore) GetLockResolver() *LockResolver { + return s.lockResolver +} + +func (s *tikvStore) Closed() <-chan struct{} { + return s.closed +} + +func (s *tikvStore) GetSafePointKV() SafePointKV { + return s.kv +} + +func (s *tikvStore) SetOracle(oracle oracle.Oracle) { + s.oracle = oracle +} + +func (s *tikvStore) SetTiKVClient(client Client) { + s.client = client +} + +func (s *tikvStore) GetTiKVClient() (client Client) { + return s.client +} + +func parsePath(path string) (etcdAddrs []string, disableGC bool, err error) { + var u *url.URL + u, err = url.Parse(path) + if err != nil { + err = errors.Trace(err) + return + } + if strings.ToLower(u.Scheme) != "tikv" { + err = errors.Errorf("Uri scheme expected[tikv] but found [%s]", u.Scheme) + logutil.BgLogger().Error("parsePath error", zap.Error(err)) + return + } + switch strings.ToLower(u.Query().Get("disableGC")) { + case "true": + disableGC = true + case "false", "": + default: + err = errors.New("disableGC flag should be true/false") + return + } + etcdAddrs = strings.Split(u.Host, ",") + return +} + +func init() { + mc.cache = make(map[string]*tikvStore) + rand.Seed(time.Now().UnixNano()) +} diff --git a/store/tikv/lock_resolver.go b/store/tikv/lock_resolver.go new file mode 100644 index 0000000..13e0eea --- /dev/null +++ b/store/tikv/lock_resolver.go @@ -0,0 +1,381 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv + +import ( + "bytes" + "container/list" + "context" + "fmt" + "github.com/pingcap-incubator/tinykv/proto/pkg/kvrpcpb" + "github.com/pingcap-incubator/tinykv/scheduler/client" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/store/tikv/tikvrpc" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" + "sync" +) + +// ResolvedCacheSize is max number of cached txn status. +const ResolvedCacheSize = 2048 + +// bigTxnThreshold : transaction involves keys exceed this threshold can be treated as `big transaction`. +const bigTxnThreshold = 16 + +// LockResolver resolves locks and also caches resolved txn status. +type LockResolver struct { + store Storage + mu struct { + sync.RWMutex + // resolved caches resolved txns (FIFO, txn id -> txnStatus). + resolved map[uint64]TxnStatus + recentResolved *list.List + } +} + +func newLockResolver(store Storage) *LockResolver { + r := &LockResolver{ + store: store, + } + r.mu.resolved = make(map[uint64]TxnStatus) + r.mu.recentResolved = list.New() + return r +} + +// NewLockResolver is exported for other pkg to use, suppress unused warning. +var _ = NewLockResolver + +// NewLockResolver creates a LockResolver. +// It is exported for other pkg to use. For instance, binlog service needs +// to determine a transaction's commit state. +func NewLockResolver(etcdAddrs []string) (*LockResolver, error) { + pdCli, err := pd.NewClient(etcdAddrs, pd.SecurityOption{}) + if err != nil { + return nil, errors.Trace(err) + } + uuid := fmt.Sprintf("tikv-%v", pdCli.GetClusterID(context.TODO())) + + spkv, err := NewEtcdSafePointKV(etcdAddrs) + if err != nil { + return nil, errors.Trace(err) + } + + s, err := newTikvStore(uuid, &codecPDClient{pdCli}, spkv, newRPCClient(), false) + if err != nil { + return nil, errors.Trace(err) + } + return s.lockResolver, nil +} + +// TxnStatus represents a txn's final status. It should be Lock or Commit or Rollback. +type TxnStatus struct { + ttl uint64 + commitTS uint64 + action kvrpcpb.Action +} + +// IsCommitted returns true if the txn's final status is Commit. +func (s TxnStatus) IsCommitted() bool { return s.ttl == 0 && s.commitTS > 0 } + +// CommitTS returns the txn's commitTS. It is valid iff `IsCommitted` is true. +func (s TxnStatus) CommitTS() uint64 { return uint64(s.commitTS) } + +// By default, locks after 3000ms is considered unusual (the client created the +// lock might be dead). Other client may cleanup this kind of lock. +// For locks created recently, we will do backoff and retry. +var defaultLockTTL uint64 = 3000 + +// TODO: Consider if it's appropriate. +var maxLockTTL uint64 = 120000 + +// ttl = ttlFactor * sqrt(writeSizeInMiB) +var ttlFactor = 6000 + +// Lock represents a lock from tikv server. +type Lock struct { + Key []byte + Primary []byte + TxnID uint64 + TTL uint64 + TxnSize uint64 + LockType kvrpcpb.Op +} + +func (l *Lock) String() string { + buf := bytes.NewBuffer(make([]byte, 0, 128)) + buf.WriteString("key: ") + prettyWriteKey(buf, l.Key) + buf.WriteString(", primary: ") + prettyWriteKey(buf, l.Primary) + return fmt.Sprintf("%s, txnStartTS: %d, ttl: %d, type: %s", buf.String(), l.TxnID, l.TTL, l.LockType) +} + +// NewLock creates a new *Lock. +func NewLock(l *kvrpcpb.LockInfo) *Lock { + return &Lock{ + Key: l.GetKey(), + Primary: l.GetPrimaryLock(), + TxnID: l.GetLockVersion(), + TTL: l.GetLockTtl(), + } +} + +func (lr *LockResolver) saveResolved(txnID uint64, status TxnStatus) { + lr.mu.Lock() + defer lr.mu.Unlock() + + if _, ok := lr.mu.resolved[txnID]; ok { + return + } + lr.mu.resolved[txnID] = status + lr.mu.recentResolved.PushBack(txnID) + if len(lr.mu.resolved) > ResolvedCacheSize { + front := lr.mu.recentResolved.Front() + delete(lr.mu.resolved, front.Value.(uint64)) + lr.mu.recentResolved.Remove(front) + } +} + +func (lr *LockResolver) getResolved(txnID uint64) (TxnStatus, bool) { + lr.mu.RLock() + defer lr.mu.RUnlock() + + s, ok := lr.mu.resolved[txnID] + return s, ok +} + +// ResolveLocks tries to resolve Locks. The resolving process is in 3 steps: +// 1) Use the `lockTTL` to pick up all expired locks. Only locks that are too +// old are considered orphan locks and will be handled later. If all locks +// are expired then all locks will be resolved so the returned `ok` will be +// true, otherwise caller should sleep a while before retry. +// 2) For each lock, query the primary key to get txn(which left the lock)'s +// commit status. +// 3) Send `ResolveLock` cmd to the lock's region to resolve all locks belong to +// the same transaction. +func (lr *LockResolver) ResolveLocks(bo *Backoffer, callerStartTS uint64, locks []*Lock) (int64, []uint64 /*pushed*/, error) { + var msBeforeTxnExpired txnExpireTime + if len(locks) == 0 { + return msBeforeTxnExpired.value(), nil, nil + } + + var pushFail bool + // TxnID -> []Region, record resolved Regions. + // TODO: Maybe put it in LockResolver and share by all txns. + cleanTxns := make(map[uint64]map[RegionVerID]struct{}) + pushed := make([]uint64, 0, len(locks)) + for _, l := range locks { + status, err := lr.getTxnStatusFromLock(bo, l, callerStartTS) + if err != nil { + msBeforeTxnExpired.update(0) + err = errors.Trace(err) + return msBeforeTxnExpired.value(), nil, err + } + + if status.ttl == 0 { + // If the lock is committed or rollbacked, resolve lock. + cleanRegions, exists := cleanTxns[l.TxnID] + if !exists { + cleanRegions = make(map[RegionVerID]struct{}) + cleanTxns[l.TxnID] = cleanRegions + } + + err = lr.resolveLock(bo, l, status, cleanRegions) + if err != nil { + msBeforeTxnExpired.update(0) + err = errors.Trace(err) + return msBeforeTxnExpired.value(), nil, err + } + } else { + // Update the txn expire time. + msBeforeLockExpired := lr.store.GetOracle().UntilExpired(l.TxnID, status.ttl) + msBeforeTxnExpired.update(msBeforeLockExpired) + // In the write conflict scenes, callerStartTS is set to 0 to avoid unnecessary push minCommitTS operation. + if callerStartTS > 0 { + pushFail = true + continue + } + } + } + if pushFail { + // If any of the lock fails to push minCommitTS, don't return the pushed array. + pushed = nil + } + + return msBeforeTxnExpired.value(), pushed, nil +} + +type txnExpireTime struct { + initialized bool + txnExpire int64 +} + +func (t *txnExpireTime) update(lockExpire int64) { + if lockExpire <= 0 { + lockExpire = 0 + } + if !t.initialized { + t.txnExpire = lockExpire + t.initialized = true + return + } + if lockExpire < t.txnExpire { + t.txnExpire = lockExpire + } +} + +func (t *txnExpireTime) value() int64 { + if !t.initialized { + return 0 + } + return t.txnExpire +} + +// GetTxnStatus queries tikv-server for a txn's status (commit/rollback). +// If the primary key is still locked, it will launch a Rollback to abort it. +// To avoid unnecessarily aborting too many txns, it is wiser to wait a few +// seconds before calling it after Prewrite. +func (lr *LockResolver) GetTxnStatus(txnID uint64, callerStartTS uint64, primary []byte) (TxnStatus, error) { + var status TxnStatus + bo := NewBackoffer(context.Background(), cleanupMaxBackoff) + currentTS, err := lr.store.GetOracle().GetTimestamp(bo.ctx) + if err != nil { + return status, err + } + return lr.getTxnStatus(bo, txnID, primary, callerStartTS, currentTS, true) +} + +func (lr *LockResolver) getTxnStatusFromLock(bo *Backoffer, l *Lock, callerStartTS uint64) (TxnStatus, error) { + var currentTS uint64 + var err error + var status TxnStatus + currentTS, err = lr.store.GetOracle().GetTimestamp(bo.ctx) + if err != nil { + return TxnStatus{}, err + } + + rollbackIfNotExist := false + for { + status, err = lr.getTxnStatus(bo, l.TxnID, l.Primary, callerStartTS, currentTS, rollbackIfNotExist) + if err == nil { + return status, nil + } + return TxnStatus{}, err + } +} + +// getTxnStatus sends the CheckTxnStatus request to the TiKV server. +// When rollbackIfNotExist is false, the caller should be careful with the txnNotFoundErr error. +func (lr *LockResolver) getTxnStatus(bo *Backoffer, txnID uint64, primary []byte, callerStartTS, currentTS uint64, rollbackIfNotExist bool) (TxnStatus, error) { + if s, ok := lr.getResolved(txnID); ok { + return s, nil + } + + // CheckTxnStatus may meet the following cases: + // 1. LOCK + // 1.1 Lock expired -- orphan lock, fail to update TTL, crash recovery etc. + // 1.2 Lock TTL -- active transaction holding the lock. + // 2. NO LOCK + // 2.1 Txn Committed + // 2.2 Txn Rollbacked -- rollback itself, rollback by others, GC tomb etc. + // 2.3 No lock -- concurrence prewrite. + + var status TxnStatus + req := tikvrpc.NewRequest(tikvrpc.CmdCheckTxnStatus, &kvrpcpb.CheckTxnStatusRequest{ + PrimaryKey: primary, + LockTs: txnID, + CurrentTs: currentTS, + }) + for { + loc, err := lr.store.GetRegionCache().LocateKey(bo, primary) + if err != nil { + return status, errors.Trace(err) + } + resp, err := lr.store.SendReq(bo, req, loc.Region, readTimeoutShort) + if err != nil { + return status, errors.Trace(err) + } + regionErr, err := resp.GetRegionError() + if err != nil { + return status, errors.Trace(err) + } + if regionErr != nil { + err = bo.Backoff(BoRegionMiss, errors.New(regionErr.String())) + if err != nil { + return status, errors.Trace(err) + } + continue + } + if resp.Resp == nil { + return status, errors.Trace(ErrBodyMissing) + } + cmdResp := resp.Resp.(*kvrpcpb.CheckTxnStatusResponse) + status.action = cmdResp.Action + if cmdResp.LockTtl != 0 { + status.ttl = cmdResp.LockTtl + } else { + status.commitTS = cmdResp.CommitVersion + lr.saveResolved(txnID, status) + } + return status, nil + } +} + +func (lr *LockResolver) resolveLock(bo *Backoffer, l *Lock, status TxnStatus, cleanRegions map[RegionVerID]struct{}) error { + cleanWholeRegion := l.TxnSize >= bigTxnThreshold + for { + loc, err := lr.store.GetRegionCache().LocateKey(bo, l.Key) + if err != nil { + return errors.Trace(err) + } + if _, ok := cleanRegions[loc.Region]; ok { + return nil + } + lreq := &kvrpcpb.ResolveLockRequest{ + StartVersion: l.TxnID, + } + if status.IsCommitted() { + lreq.CommitVersion = status.CommitTS() + } + req := tikvrpc.NewRequest(tikvrpc.CmdResolveLock, lreq) + resp, err := lr.store.SendReq(bo, req, loc.Region, readTimeoutShort) + if err != nil { + return errors.Trace(err) + } + regionErr, err := resp.GetRegionError() + if err != nil { + return errors.Trace(err) + } + if regionErr != nil { + err = bo.Backoff(BoRegionMiss, errors.New(regionErr.String())) + if err != nil { + return errors.Trace(err) + } + continue + } + if resp.Resp == nil { + return errors.Trace(ErrBodyMissing) + } + cmdResp := resp.Resp.(*kvrpcpb.ResolveLockResponse) + if keyErr := cmdResp.GetError(); keyErr != nil { + err = errors.Errorf("unexpected resolve err: %s, lock: %v", keyErr, l) + logutil.BgLogger().Error("resolveLock error", zap.Error(err)) + return err + } + if cleanWholeRegion { + cleanRegions[loc.Region] = struct{}{} + } + return nil + } +} diff --git a/store/tikv/lock_test.go b/store/tikv/lock_test.go new file mode 100644 index 0000000..0b7c050 --- /dev/null +++ b/store/tikv/lock_test.go @@ -0,0 +1,368 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv + +import ( + "bytes" + "context" + "fmt" + "math" + "runtime" + "time" + + "github.com/pingcap-incubator/tinykv/proto/pkg/kvrpcpb" + . "github.com/pingcap/check" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/store/tikv/tikvrpc" +) + +type testLockSuite struct { + OneByOneSuite + store *tikvStore +} + +var _ = Suite(&testLockSuite{}) + +func (s *testLockSuite) SetUpTest(c *C) { + s.store = NewTestStore(c).(*tikvStore) +} + +func (s *testLockSuite) TearDownTest(c *C) { + s.store.Close() +} + +func (s *testLockSuite) lockKey(c *C, key, value, primaryKey, primaryValue []byte, commitPrimary bool) (uint64, uint64) { + txn, err := newTiKVTxn(s.store) + c.Assert(err, IsNil) + if len(value) > 0 { + err = txn.Set(key, value) + } else { + err = txn.Delete(key) + } + c.Assert(err, IsNil) + if len(primaryValue) > 0 { + err = txn.Set(primaryKey, primaryValue) + } else { + err = txn.Delete(primaryKey) + } + c.Assert(err, IsNil) + tpc, err := newTwoPhaseCommitterWithInit(txn, 0) + c.Assert(err, IsNil) + if bytes.Equal(key, primaryKey) { + tpc.keys = [][]byte{primaryKey} + } else { + tpc.keys = [][]byte{primaryKey, key} + } + + ctx := context.Background() + err = tpc.prewriteKeys(NewBackoffer(ctx, PrewriteMaxBackoff), tpc.keys) + c.Assert(err, IsNil) + + if commitPrimary { + tpc.commitTS, err = s.store.oracle.GetTimestamp(ctx) + c.Assert(err, IsNil) + err = tpc.commitKeys(NewBackoffer(ctx, CommitMaxBackoff), [][]byte{primaryKey}) + c.Assert(err, IsNil) + } + return txn.startTS, tpc.commitTS +} + +func (s *testLockSuite) putAlphabets(c *C) { + for ch := byte('a'); ch <= byte('z'); ch++ { + s.putKV(c, []byte{ch}, []byte{ch}) + } +} + +func (s *testLockSuite) putKV(c *C, key, value []byte) (uint64, uint64) { + txn, err := s.store.Begin() + c.Assert(err, IsNil) + err = txn.Set(key, value) + c.Assert(err, IsNil) + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + return txn.StartTS(), txn.(*tikvTxn).commitTS +} + +func (s *testLockSuite) prepareAlphabetLocks(c *C) { + s.putKV(c, []byte("c"), []byte("cc")) + s.lockKey(c, []byte("c"), []byte("c"), []byte("z1"), []byte("z1"), true) + s.lockKey(c, []byte("d"), []byte("dd"), []byte("z2"), []byte("z2"), false) + s.lockKey(c, []byte("foo"), []byte("foo"), []byte("z3"), []byte("z3"), false) + s.putKV(c, []byte("bar"), []byte("bar")) + s.lockKey(c, []byte("bar"), nil, []byte("z4"), []byte("z4"), true) +} + +func (s *testLockSuite) TestScanLockResolveWithGet(c *C) { + s.putAlphabets(c) + s.prepareAlphabetLocks(c) + + txn, err := s.store.Begin() + c.Assert(err, IsNil) + for ch := byte('a'); ch <= byte('z'); ch++ { + v, err := txn.Get(context.TODO(), []byte{ch}) + c.Assert(err, IsNil) + c.Assert(v, BytesEquals, []byte{ch}) + } +} + +func (s *testLockSuite) TestScanLockResolveWithSeek(c *C) { + s.putAlphabets(c) + s.prepareAlphabetLocks(c) + + txn, err := s.store.Begin() + c.Assert(err, IsNil) + iter, err := txn.Iter([]byte("a"), nil) + c.Assert(err, IsNil) + for ch := byte('a'); ch <= byte('z'); ch++ { + c.Assert(iter.Valid(), IsTrue) + c.Assert([]byte(iter.Key()), BytesEquals, []byte{ch}) + c.Assert([]byte(iter.Value()), BytesEquals, []byte{ch}) + c.Assert(iter.Next(), IsNil) + } +} + +func (s *testLockSuite) TestScanLockResolveWithSeekKeyOnly(c *C) { + s.putAlphabets(c) + s.prepareAlphabetLocks(c) + + txn, err := s.store.Begin() + c.Assert(err, IsNil) + txn.SetOption(kv.KeyOnly, true) + iter, err := txn.Iter([]byte("a"), nil) + c.Assert(err, IsNil) + for ch := byte('a'); ch <= byte('z'); ch++ { + c.Assert(iter.Valid(), IsTrue) + c.Assert([]byte(iter.Key()), BytesEquals, []byte{ch}) + c.Assert(iter.Next(), IsNil) + } +} + +func (s *testLockSuite) TestCleanLock(c *C) { + for ch := byte('a'); ch <= byte('z'); ch++ { + k := []byte{ch} + s.lockKey(c, k, k, k, k, false) + } + txn, err := s.store.Begin() + c.Assert(err, IsNil) + for ch := byte('a'); ch <= byte('z'); ch++ { + err = txn.Set([]byte{ch}, []byte{ch + 1}) + c.Assert(err, IsNil) + } + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) +} + +func (s *testLockSuite) TestGetTxnStatus(c *C) { + startTS, commitTS := s.putKV(c, []byte("a"), []byte("a")) + status, err := s.store.lockResolver.GetTxnStatus(startTS, startTS, []byte("a")) + c.Assert(err, IsNil) + c.Assert(status.IsCommitted(), IsTrue) + c.Assert(status.CommitTS(), Equals, commitTS) + + startTS, commitTS = s.lockKey(c, []byte("a"), []byte("a"), []byte("a"), []byte("a"), true) + status, err = s.store.lockResolver.GetTxnStatus(startTS, startTS, []byte("a")) + c.Assert(err, IsNil) + c.Assert(status.IsCommitted(), IsTrue) + c.Assert(status.CommitTS(), Equals, commitTS) + + startTS, _ = s.lockKey(c, []byte("a"), []byte("a"), []byte("a"), []byte("a"), false) + status, err = s.store.lockResolver.GetTxnStatus(startTS, startTS, []byte("a")) + c.Assert(err, IsNil) + c.Assert(status.IsCommitted(), IsFalse) + c.Assert(status.ttl, Greater, uint64(0), Commentf("action:%s", status.action)) +} + +func (s *testLockSuite) TestCheckTxnStatusTTL(c *C) { + txn, err := s.store.Begin() + c.Assert(err, IsNil) + txn.Set(kv.Key("key"), []byte("value")) + s.prewriteTxnWithTTL(c, txn.(*tikvTxn), 1000) + + bo := NewBackoffer(context.Background(), PrewriteMaxBackoff) + lr := newLockResolver(s.store) + callerStartTS, err := lr.store.GetOracle().GetTimestamp(bo.ctx) + c.Assert(err, IsNil) + + // Check the lock TTL of a transaction. + status, err := lr.GetTxnStatus(txn.StartTS(), callerStartTS, []byte("key")) + c.Assert(err, IsNil) + c.Assert(status.IsCommitted(), IsFalse) + c.Assert(status.ttl, Greater, uint64(0)) + c.Assert(status.CommitTS(), Equals, uint64(0)) + + // Rollback the txn. + lock := s.mustGetLock(c, []byte("key")) + status = TxnStatus{} + cleanRegions := make(map[RegionVerID]struct{}) + err = newLockResolver(s.store).resolveLock(bo, lock, status, cleanRegions) + c.Assert(err, IsNil) + + // Check its status is rollbacked. + status, err = lr.GetTxnStatus(txn.StartTS(), callerStartTS, []byte("key")) + c.Assert(err, IsNil) + c.Assert(status.ttl, Equals, uint64(0)) + c.Assert(status.commitTS, Equals, uint64(0)) + c.Assert(status.action, Equals, kvrpcpb.Action_NoAction) + + // Check a committed txn. + startTS, commitTS := s.putKV(c, []byte("a"), []byte("a")) + status, err = lr.GetTxnStatus(startTS, callerStartTS, []byte("a")) + c.Assert(err, IsNil) + c.Assert(status.ttl, Equals, uint64(0)) + c.Assert(status.commitTS, Equals, commitTS) +} + +func (s *testLockSuite) TestCheckTxnStatus(c *C) { + txn, err := s.store.Begin() + c.Assert(err, IsNil) + txn.Set(kv.Key("key"), []byte("value")) + txn.Set(kv.Key("second"), []byte("xxx")) + s.prewriteTxnWithTTL(c, txn.(*tikvTxn), 1000) + + oracle := s.store.GetOracle() + currentTS, err := oracle.GetTimestamp(context.Background()) + c.Assert(err, IsNil) + c.Assert(currentTS, Greater, txn.StartTS()) + + bo := NewBackoffer(context.Background(), PrewriteMaxBackoff) + resolver := newLockResolver(s.store) + // Call getTxnStatus to check the lock status. + status, err := resolver.getTxnStatus(bo, txn.StartTS(), []byte("key"), currentTS, currentTS, true) + c.Assert(err, IsNil) + c.Assert(status.IsCommitted(), IsFalse) + c.Assert(status.ttl, Greater, uint64(0)) + c.Assert(status.CommitTS(), Equals, uint64(0)) + + // Test the ResolveLocks API + lock := s.mustGetLock(c, []byte("second")) + timeBeforeExpire, _, err := resolver.ResolveLocks(bo, currentTS, []*Lock{lock}) + c.Assert(err, IsNil) + c.Assert(timeBeforeExpire > int64(0), IsTrue) +} + +func (s *testLockSuite) prewriteTxn(c *C, txn *tikvTxn) { + s.prewriteTxnWithTTL(c, txn, 0) +} + +func (s *testLockSuite) prewriteTxnWithTTL(c *C, txn *tikvTxn, ttl uint64) { + committer, err := newTwoPhaseCommitterWithInit(txn, 0) + c.Assert(err, IsNil) + if ttl > 0 { + elapsed := time.Since(txn.startTime) / time.Millisecond + committer.lockTTL = uint64(elapsed) + ttl + } + err = committer.prewriteKeys(NewBackoffer(context.Background(), PrewriteMaxBackoff), committer.keys) + c.Assert(err, IsNil) +} + +func (s *testLockSuite) mustGetLock(c *C, key []byte) *Lock { + ver, err := s.store.CurrentVersion() + c.Assert(err, IsNil) + bo := NewBackoffer(context.Background(), getMaxBackoff) + req := tikvrpc.NewRequest(tikvrpc.CmdGet, &kvrpcpb.GetRequest{ + Key: key, + Version: ver.Ver, + }) + loc, err := s.store.regionCache.LocateKey(bo, key) + c.Assert(err, IsNil) + resp, err := s.store.SendReq(bo, req, loc.Region, readTimeoutShort) + c.Assert(err, IsNil) + c.Assert(resp.Resp, NotNil) + keyErr := resp.Resp.(*kvrpcpb.GetResponse).GetError() + c.Assert(keyErr, NotNil) + lock, err := extractLockFromKeyErr(keyErr) + c.Assert(err, IsNil) + return lock +} + +func (s *testLockSuite) ttlEquals(c *C, x, y uint64) { + // NOTE: On ppc64le, all integers are by default unsigned integers, + // hence we have to separately cast the value returned by "math.Abs()" function for ppc64le. + if runtime.GOARCH == "ppc64le" { + c.Assert(int(-math.Abs(float64(x-y))), LessEqual, 2) + } else { + c.Assert(int(math.Abs(float64(x-y))), LessEqual, 2) + } + +} + +func (s *testLockSuite) TestLockTTL(c *C) { + txn, err := s.store.Begin() + c.Assert(err, IsNil) + txn.Set(kv.Key("key"), []byte("value")) + time.Sleep(time.Millisecond) + s.prewriteTxnWithTTL(c, txn.(*tikvTxn), 1000) + l := s.mustGetLock(c, []byte("key")) + c.Assert(l.TTL >= defaultLockTTL, IsTrue) + + // Huge txn has a greater TTL. + txn, err = s.store.Begin() + start := time.Now() + c.Assert(err, IsNil) + txn.Set(kv.Key("key"), []byte("value")) + for i := 0; i < 2048; i++ { + k, v := randKV(1024, 1024) + txn.Set(kv.Key(k), []byte(v)) + } + s.prewriteTxn(c, txn.(*tikvTxn)) + l = s.mustGetLock(c, []byte("key")) + s.ttlEquals(c, l.TTL, uint64(ttlFactor*2)+uint64(time.Since(start)/time.Millisecond)) + + // Txn with long read time. + start = time.Now() + txn, err = s.store.Begin() + c.Assert(err, IsNil) + time.Sleep(time.Millisecond * 50) + txn.Set(kv.Key("key"), []byte("value")) + s.prewriteTxn(c, txn.(*tikvTxn)) + l = s.mustGetLock(c, []byte("key")) + s.ttlEquals(c, l.TTL, defaultLockTTL+uint64(time.Since(start)/time.Millisecond)) +} + +func (s *testLockSuite) TestNewLockZeroTTL(c *C) { + l := NewLock(&kvrpcpb.LockInfo{}) + c.Assert(l.TTL, Equals, uint64(0)) +} + +func init() { + // Speed up tests. + defaultLockTTL = 3 + maxLockTTL = 120 + ttlFactor = 6 + oracleUpdateInterval = 2 +} + +func (s *testLockSuite) TestZeroMinCommitTS(c *C) { + txn, err := s.store.Begin() + c.Assert(err, IsNil) + txn.Set(kv.Key("key"), []byte("value")) + bo := NewBackoffer(context.Background(), PrewriteMaxBackoff) + + mockValue := fmt.Sprintf(`return(%d)`, txn.StartTS()) + c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/mockZeroCommitTS", mockValue), IsNil) + s.prewriteTxnWithTTL(c, txn.(*tikvTxn), 1000) + c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/mockZeroCommitTS"), IsNil) + + lock := s.mustGetLock(c, []byte("key")) + expire, pushed, err := newLockResolver(s.store).ResolveLocks(bo, 0, []*Lock{lock}) + c.Assert(err, IsNil) + c.Assert(pushed, HasLen, 0) + c.Assert(expire, Greater, int64(0)) + + expire, pushed, err = newLockResolver(s.store).ResolveLocks(bo, math.MaxUint64, []*Lock{lock}) + c.Assert(err, IsNil) + c.Assert(pushed, HasLen, 0) + c.Assert(expire, Greater, int64(0)) +} diff --git a/store/tikv/oracle/oracle.go b/store/tikv/oracle/oracle.go new file mode 100644 index 0000000..a6e1a5d --- /dev/null +++ b/store/tikv/oracle/oracle.go @@ -0,0 +1,56 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package oracle + +import ( + "context" + "time" +) + +// Oracle is the interface that provides strictly ascending timestamps. +type Oracle interface { + GetTimestamp(ctx context.Context) (uint64, error) + GetTimestampAsync(ctx context.Context) Future + IsExpired(lockTimestamp uint64, TTL uint64) bool + UntilExpired(lockTimeStamp uint64, TTL uint64) int64 + Close() +} + +// Future is a future which promises to return a timestamp. +type Future interface { + Wait() (uint64, error) +} + +const physicalShiftBits = 18 + +// ComposeTS creates a ts from physical and logical parts. +func ComposeTS(physical, logical int64) uint64 { + return uint64((physical << physicalShiftBits) + logical) +} + +// ExtractPhysical returns a ts's physical part. +func ExtractPhysical(ts uint64) int64 { + return int64(ts >> physicalShiftBits) +} + +// GetPhysical returns physical from an instant time with millisecond precision. +func GetPhysical(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond) +} + +// GetTimeFromTS extracts time.Time from a timestamp. +func GetTimeFromTS(ts uint64) time.Time { + ms := ExtractPhysical(ts) + return time.Unix(ms/1e3, (ms%1e3)*1e6) +} diff --git a/store/tikv/oracle/oracles/export_test.go b/store/tikv/oracle/oracles/export_test.go new file mode 100644 index 0000000..895779f --- /dev/null +++ b/store/tikv/oracle/oracles/export_test.go @@ -0,0 +1,46 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package oracles + +import ( + "time" + + "github.com/pingcap/tidb/store/tikv/oracle" +) + +// SetOracleHookCurrentTime exports localOracle's time hook to test. +func SetOracleHookCurrentTime(oc oracle.Oracle, t time.Time) { + switch o := oc.(type) { + case *localOracle: + if o.hook == nil { + o.hook = &struct { + currentTime time.Time + }{} + } + o.hook.currentTime = t + } +} + +// NewEmptyPDOracle exports pdOracle struct to test +func NewEmptyPDOracle() oracle.Oracle { + return &pdOracle{} +} + +// SetEmptyPDOracleLastTs exports PD oracle's last ts to test. +func SetEmptyPDOracleLastTs(oc oracle.Oracle, ts uint64) { + switch o := oc.(type) { + case *pdOracle: + o.lastTS = ts + } +} diff --git a/store/tikv/oracle/oracles/local.go b/store/tikv/oracle/oracles/local.go new file mode 100644 index 0000000..25543ec --- /dev/null +++ b/store/tikv/oracle/oracles/local.go @@ -0,0 +1,92 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package oracles + +import ( + "context" + "sync" + "time" + + "github.com/pingcap/tidb/store/tikv/oracle" +) + +var _ oracle.Oracle = &localOracle{} + +type localOracle struct { + sync.Mutex + lastTimeStampTS uint64 + n uint64 + hook *struct { + currentTime time.Time + } +} + +// NewLocalOracle creates an Oracle that uses local time as data source. +func NewLocalOracle() oracle.Oracle { + return &localOracle{} +} + +func (l *localOracle) IsExpired(lockTS uint64, TTL uint64) bool { + now := time.Now() + if l.hook != nil { + now = l.hook.currentTime + } + return oracle.GetPhysical(now) >= oracle.ExtractPhysical(lockTS)+int64(TTL) +} + +func (l *localOracle) GetTimestamp(context.Context) (uint64, error) { + l.Lock() + defer l.Unlock() + now := time.Now() + if l.hook != nil { + now = l.hook.currentTime + } + physical := oracle.GetPhysical(now) + ts := oracle.ComposeTS(physical, 0) + if l.lastTimeStampTS == ts { + l.n++ + return ts + l.n, nil + } + l.lastTimeStampTS = ts + l.n = 0 + return ts, nil +} + +func (l *localOracle) GetTimestampAsync(ctx context.Context) oracle.Future { + return &future{ + ctx: ctx, + l: l, + } +} + +type future struct { + ctx context.Context + l *localOracle +} + +func (f *future) Wait() (uint64, error) { + return f.l.GetTimestamp(f.ctx) +} + +// UntilExpired implement oracle.Oracle interface. +func (l *localOracle) UntilExpired(lockTimeStamp uint64, TTL uint64) int64 { + now := time.Now() + if l.hook != nil { + now = l.hook.currentTime + } + return oracle.ExtractPhysical(lockTimeStamp) + int64(TTL) - oracle.GetPhysical(now) +} + +func (l *localOracle) Close() { +} diff --git a/store/tikv/oracle/oracles/local_test.go b/store/tikv/oracle/oracles/local_test.go new file mode 100644 index 0000000..678eb00 --- /dev/null +++ b/store/tikv/oracle/oracles/local_test.go @@ -0,0 +1,68 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package oracles_test + +import ( + "context" + "testing" + "time" + + "github.com/pingcap/tidb/store/tikv/oracle/oracles" +) + +func TestLocalOracle(t *testing.T) { + l := oracles.NewLocalOracle() + defer l.Close() + m := map[uint64]struct{}{} + for i := 0; i < 100000; i++ { + ts, err := l.GetTimestamp(context.Background()) + if err != nil { + t.Error(err) + } + m[ts] = struct{}{} + } + + if len(m) != 100000 { + t.Error("generated same ts") + } +} + +func TestIsExpired(t *testing.T) { + o := oracles.NewLocalOracle() + defer o.Close() + start := time.Now() + oracles.SetOracleHookCurrentTime(o, start) + ts, _ := o.GetTimestamp(context.Background()) + oracles.SetOracleHookCurrentTime(o, start.Add(10*time.Millisecond)) + expire := o.IsExpired(uint64(ts), 5) + if !expire { + t.Error("should expired") + } + expire = o.IsExpired(uint64(ts), 200) + if expire { + t.Error("should not expired") + } +} + +func TestLocalOracle_UntilExpired(t *testing.T) { + o := oracles.NewLocalOracle() + defer o.Close() + start := time.Now() + oracles.SetOracleHookCurrentTime(o, start) + ts, _ := o.GetTimestamp(context.Background()) + oracles.SetOracleHookCurrentTime(o, start.Add(10*time.Millisecond)) + if o.UntilExpired(uint64(ts), 5) != -5 || o.UntilExpired(uint64(ts), 15) != 5 { + t.Error("until expired should be +-5") + } +} diff --git a/store/tikv/oracle/oracles/pd.go b/store/tikv/oracle/oracles/pd.go new file mode 100644 index 0000000..f529fcd --- /dev/null +++ b/store/tikv/oracle/oracles/pd.go @@ -0,0 +1,147 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package oracles + +import ( + "context" + "sync/atomic" + "time" + + "github.com/pingcap-incubator/tinykv/scheduler/client" + "github.com/pingcap/errors" + + "github.com/pingcap/tidb/store/tikv/oracle" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +var _ oracle.Oracle = &pdOracle{} + +const slowDist = 30 * time.Millisecond + +// pdOracle is an Oracle that uses a placement driver client as source. +type pdOracle struct { + c pd.Client + lastTS uint64 + quit chan struct{} +} + +// NewPdOracle create an Oracle that uses a pd client source. +// Refer https://github.com/pingcap-incubator/tinykv/scheduler/blob/master/client/client.go for more details. +// PdOracle mantains `lastTS` to store the last timestamp got from PD server. If +// `GetTimestamp()` is not called after `updateInterval`, it will be called by +// itself to keep up with the timestamp on PD server. +func NewPdOracle(pdClient pd.Client, updateInterval time.Duration) (oracle.Oracle, error) { + o := &pdOracle{ + c: pdClient, + quit: make(chan struct{}), + } + ctx := context.TODO() + go o.updateTS(ctx, updateInterval) + // Initialize lastTS by Get. + _, err := o.GetTimestamp(ctx) + if err != nil { + o.Close() + return nil, errors.Trace(err) + } + return o, nil +} + +// IsExpired returns whether lockTS+TTL is expired, both are ms. It uses `lastTS` +// to compare, may return false negative result temporarily. +func (o *pdOracle) IsExpired(lockTS, TTL uint64) bool { + lastTS := atomic.LoadUint64(&o.lastTS) + return oracle.ExtractPhysical(lastTS) >= oracle.ExtractPhysical(lockTS)+int64(TTL) +} + +// GetTimestamp gets a new increasing time. +func (o *pdOracle) GetTimestamp(ctx context.Context) (uint64, error) { + ts, err := o.getTimestamp(ctx) + if err != nil { + return 0, errors.Trace(err) + } + o.setLastTS(ts) + return ts, nil +} + +type tsFuture struct { + pd.TSFuture + o *pdOracle +} + +// Wait implements the oracle.Future interface. +func (f *tsFuture) Wait() (uint64, error) { + physical, logical, err := f.TSFuture.Wait() + + if err != nil { + return 0, errors.Trace(err) + } + ts := oracle.ComposeTS(physical, logical) + f.o.setLastTS(ts) + return ts, nil +} + +func (o *pdOracle) GetTimestampAsync(ctx context.Context) oracle.Future { + ts := o.c.GetTSAsync(ctx) + return &tsFuture{ts, o} +} + +func (o *pdOracle) getTimestamp(ctx context.Context) (uint64, error) { + now := time.Now() + physical, logical, err := o.c.GetTS(ctx) + if err != nil { + return 0, errors.Trace(err) + } + dist := time.Since(now) + if dist > slowDist { + logutil.Logger(ctx).Warn("get timestamp too slow", + zap.Duration("cost time", dist)) + } + return oracle.ComposeTS(physical, logical), nil +} + +func (o *pdOracle) setLastTS(ts uint64) { + lastTS := atomic.LoadUint64(&o.lastTS) + if ts > lastTS { + atomic.CompareAndSwapUint64(&o.lastTS, lastTS, ts) + } +} + +func (o *pdOracle) updateTS(ctx context.Context, interval time.Duration) { + ticker := time.NewTicker(interval) + defer ticker.Stop() + for { + select { + case <-ticker.C: + ts, err := o.getTimestamp(ctx) + if err != nil { + logutil.Logger(ctx).Error("updateTS error", zap.Error(err)) + break + } + o.setLastTS(ts) + case <-o.quit: + return + } + } +} + +// UntilExpired implement oracle.Oracle interface. +func (o *pdOracle) UntilExpired(lockTS uint64, TTL uint64) int64 { + lastTS := atomic.LoadUint64(&o.lastTS) + return oracle.ExtractPhysical(lockTS) + int64(TTL) - oracle.ExtractPhysical(lastTS) +} + +func (o *pdOracle) Close() { + close(o.quit) +} diff --git a/store/tikv/oracle/oracles/pd_test.go b/store/tikv/oracle/oracles/pd_test.go new file mode 100644 index 0000000..e5199fb --- /dev/null +++ b/store/tikv/oracle/oracles/pd_test.go @@ -0,0 +1,39 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package oracles_test + +import ( + "testing" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/store/tikv/oracle" + "github.com/pingcap/tidb/store/tikv/oracle/oracles" +) + +func TestT(t *testing.T) { + TestingT(t) +} + +func TestPDOracle_UntilExpired(t *testing.T) { + lockAfter, lockExp := 10, 15 + o := oracles.NewEmptyPDOracle() + start := time.Now() + oracles.SetEmptyPDOracleLastTs(o, oracle.ComposeTS(oracle.GetPhysical(start), 0)) + lockTs := oracle.ComposeTS(oracle.GetPhysical(start.Add(time.Duration(lockAfter)*time.Millisecond)), 1) + waitTs := o.UntilExpired(lockTs, uint64(lockExp)) + if waitTs != int64(lockAfter+lockExp) { + t.Errorf("waitTs shoulb be %d but got %d", int64(lockAfter+lockExp), waitTs) + } +} diff --git a/store/tikv/pd_codec.go b/store/tikv/pd_codec.go new file mode 100644 index 0000000..3b21f16 --- /dev/null +++ b/store/tikv/pd_codec.go @@ -0,0 +1,101 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv + +import ( + "context" + + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/scheduler/client" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/util/codec" +) + +type codecPDClient struct { + pd.Client +} + +// GetRegion encodes the key before send requests to pd-server and decodes the +// returned StartKey && EndKey from pd-server. +func (c *codecPDClient) GetRegion(ctx context.Context, key []byte) (*metapb.Region, *metapb.Peer, error) { + encodedKey := codec.EncodeBytes([]byte(nil), key) + region, peer, err := c.Client.GetRegion(ctx, encodedKey) + return processRegionResult(region, peer, err) +} + +func (c *codecPDClient) GetPrevRegion(ctx context.Context, key []byte) (*metapb.Region, *metapb.Peer, error) { + encodedKey := codec.EncodeBytes([]byte(nil), key) + region, peer, err := c.Client.GetPrevRegion(ctx, encodedKey) + return processRegionResult(region, peer, err) +} + +// GetRegionByID encodes the key before send requests to pd-server and decodes the +// returned StartKey && EndKey from pd-server. +func (c *codecPDClient) GetRegionByID(ctx context.Context, regionID uint64) (*metapb.Region, *metapb.Peer, error) { + region, peer, err := c.Client.GetRegionByID(ctx, regionID) + return processRegionResult(region, peer, err) +} + +func (c *codecPDClient) ScanRegions(ctx context.Context, startKey []byte, endKey []byte, limit int) ([]*metapb.Region, []*metapb.Peer, error) { + startKey = codec.EncodeBytes([]byte(nil), startKey) + if len(endKey) > 0 { + endKey = codec.EncodeBytes([]byte(nil), endKey) + } + + regions, peers, err := c.Client.ScanRegions(ctx, startKey, endKey, limit) + if err != nil { + return nil, nil, errors.Trace(err) + } + for _, region := range regions { + if region != nil { + err = decodeRegionMetaKey(region) + if err != nil { + return nil, nil, errors.Trace(err) + } + } + } + return regions, peers, nil +} + +func processRegionResult(region *metapb.Region, peer *metapb.Peer, err error) (*metapb.Region, *metapb.Peer, error) { + if err != nil { + return nil, nil, errors.Trace(err) + } + if region == nil { + return nil, nil, nil + } + err = decodeRegionMetaKey(region) + if err != nil { + return nil, nil, errors.Trace(err) + } + return region, peer, nil +} + +func decodeRegionMetaKey(r *metapb.Region) error { + if len(r.StartKey) != 0 { + _, decoded, err := codec.DecodeBytes(r.StartKey, nil) + if err != nil { + return errors.Trace(err) + } + r.StartKey = decoded + } + if len(r.EndKey) != 0 { + _, decoded, err := codec.DecodeBytes(r.EndKey, nil) + if err != nil { + return errors.Trace(err) + } + r.EndKey = decoded + } + return nil +} diff --git a/store/tikv/range_task.go b/store/tikv/range_task.go new file mode 100644 index 0000000..1ff3a50 --- /dev/null +++ b/store/tikv/range_task.go @@ -0,0 +1,285 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv + +import ( + "bytes" + "context" + "sync" + "sync/atomic" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" + + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +const ( + rangeTaskDefaultStatLogInterval = time.Minute * 10 + defaultRegionsPerTask = 128 +) + +// RangeTaskRunner splits a range into many ranges to process concurrently, and convenient to send requests to all +// regions in the range. Because of merging and splitting, it's possible that multiple requests for disjoint ranges are +// sent to the same region. +type RangeTaskRunner struct { + name string + store Storage + concurrency int + handler RangeTaskHandler + statLogInterval time.Duration + regionsPerTask int + + completedRegions int32 + failedRegions int32 +} + +// RangeTaskStat is used to count Regions that completed or failed to do the task. +type RangeTaskStat struct { + CompletedRegions int + FailedRegions int +} + +// RangeTaskHandler is the type of functions that processes a task of a key range. +// The function should calculate Regions that succeeded or failed to the task. +// Returning error from the handler means the error caused the whole task should be stopped. +type RangeTaskHandler = func(ctx context.Context, r kv.KeyRange) (RangeTaskStat, error) + +// NewRangeTaskRunner creates a RangeTaskRunner. +// +// `requestCreator` is the function used to create RPC request according to the given range. +// `responseHandler` is the function to process responses of errors. If `responseHandler` returns error, the whole job +// will be canceled. +func NewRangeTaskRunner( + name string, + store Storage, + concurrency int, + handler RangeTaskHandler, +) *RangeTaskRunner { + return &RangeTaskRunner{ + name: name, + store: store, + concurrency: concurrency, + handler: handler, + statLogInterval: rangeTaskDefaultStatLogInterval, + regionsPerTask: defaultRegionsPerTask, + } +} + +// SetRegionsPerTask sets how many regions is in a divided task. Since regions may split and merge, it's possible that +// a sub task contains not exactly specified number of regions. +func (s *RangeTaskRunner) SetRegionsPerTask(regionsPerTask int) { + if regionsPerTask < 1 { + panic("RangeTaskRunner: regionsPerTask should be at least 1") + } + s.regionsPerTask = regionsPerTask +} + +// SetStatLogInterval sets the time interval to log the stats. +func (s *RangeTaskRunner) SetStatLogInterval(interval time.Duration) { + s.statLogInterval = interval +} + +// RunOnRange runs the task on the given range. +// Empty startKey or endKey means unbounded. +func (s *RangeTaskRunner) RunOnRange(ctx context.Context, startKey, endKey kv.Key) error { + s.completedRegions = 0 + + if len(endKey) != 0 && bytes.Compare(startKey, endKey) >= 0 { + logutil.Logger(ctx).Info("empty range task executed. ignored", + zap.String("name", s.name), + zap.Stringer("startKey", startKey), + zap.Stringer("endKey", endKey)) + return nil + } + + logutil.Logger(ctx).Info("range task started", + zap.String("name", s.name), + zap.Stringer("startKey", startKey), + zap.Stringer("endKey", endKey), + zap.Int("concurrency", s.concurrency)) + + // Periodically log the progress + statLogTicker := time.NewTicker(s.statLogInterval) + + ctx, cancel := context.WithCancel(ctx) + taskCh := make(chan *kv.KeyRange, s.concurrency) + var wg sync.WaitGroup + + // Create workers that concurrently process the whole range. + workers := make([]*rangeTaskWorker, 0, s.concurrency) + for i := 0; i < s.concurrency; i++ { + w := s.createWorker(taskCh, &wg) + workers = append(workers, w) + wg.Add(1) + go w.run(ctx, cancel) + } + + startTime := time.Now() + + // Make sure taskCh is closed exactly once + isClosed := false + defer func() { + if !isClosed { + close(taskCh) + wg.Wait() + } + statLogTicker.Stop() + cancel() + + }() + + // Iterate all regions and send each region's range as a task to the workers. + key := startKey +Loop: + for { + select { + case <-statLogTicker.C: + logutil.Logger(ctx).Info("range task in progress", + zap.String("name", s.name), + zap.Stringer("startKey", startKey), + zap.Stringer("endKey", endKey), + zap.Int("concurrency", s.concurrency), + zap.Duration("cost time", time.Since(startTime)), + zap.Int("completed regions", s.CompletedRegions())) + default: + } + + bo := NewBackoffer(ctx, locateRegionMaxBackoff) + + rangeEndKey, err := s.store.GetRegionCache().BatchLoadRegionsFromKey(bo, key, s.regionsPerTask) + if err != nil { + logutil.Logger(ctx).Info("range task failed", + zap.String("name", s.name), + zap.Stringer("startKey", startKey), + zap.Stringer("endKey", endKey), + zap.Duration("cost time", time.Since(startTime)), + zap.Error(err)) + return errors.Trace(err) + } + task := &kv.KeyRange{ + StartKey: key, + EndKey: rangeEndKey, + } + + isLast := len(task.EndKey) == 0 || (len(endKey) > 0 && bytes.Compare(task.EndKey, endKey) >= 0) + // Let task.EndKey = min(endKey, loc.EndKey) + if isLast { + task.EndKey = endKey + } + select { + case taskCh <- task: + case <-ctx.Done(): + break Loop + } + if isLast { + break + } + + key = task.EndKey + } + + isClosed = true + close(taskCh) + wg.Wait() + for _, w := range workers { + if w.err != nil { + logutil.Logger(ctx).Info("range task failed", + zap.String("name", s.name), + zap.Stringer("startKey", startKey), + zap.Stringer("endKey", endKey), + zap.Duration("cost time", time.Since(startTime)), + zap.Error(w.err)) + return errors.Trace(w.err) + } + } + + logutil.Logger(ctx).Info("range task finished", + zap.String("name", s.name), + zap.Stringer("startKey", startKey), + zap.Stringer("endKey", endKey), + zap.Duration("cost time", time.Since(startTime)), + zap.Int("completed regions", s.CompletedRegions())) + + return nil +} + +// createWorker creates a worker that can process tasks from the given channel. +func (s *RangeTaskRunner) createWorker(taskCh chan *kv.KeyRange, wg *sync.WaitGroup) *rangeTaskWorker { + return &rangeTaskWorker{ + name: s.name, + store: s.store, + handler: s.handler, + taskCh: taskCh, + wg: wg, + + completedRegions: &s.completedRegions, + failedRegions: &s.failedRegions, + } +} + +// CompletedRegions returns how many regions has been sent requests. +func (s *RangeTaskRunner) CompletedRegions() int { + return int(atomic.LoadInt32(&s.completedRegions)) +} + +// FailedRegions returns how many regions has failed to do the task. +func (s *RangeTaskRunner) FailedRegions() int { + return int(atomic.LoadInt32(&s.failedRegions)) +} + +// rangeTaskWorker is used by RangeTaskRunner to process tasks concurrently. +type rangeTaskWorker struct { + name string + store Storage + handler RangeTaskHandler + taskCh chan *kv.KeyRange + wg *sync.WaitGroup + + err error + + completedRegions *int32 + failedRegions *int32 +} + +// run starts the worker. It collects all objects from `w.taskCh` and process them one by one. +func (w *rangeTaskWorker) run(ctx context.Context, cancel context.CancelFunc) { + defer w.wg.Done() + for r := range w.taskCh { + select { + case <-ctx.Done(): + w.err = ctx.Err() + return + default: + } + + stat, err := w.handler(ctx, *r) + + atomic.AddInt32(w.completedRegions, int32(stat.CompletedRegions)) + atomic.AddInt32(w.failedRegions, int32(stat.FailedRegions)) + + if err != nil { + logutil.Logger(ctx).Info("canceling range task because of error", + zap.String("name", w.name), + zap.Stringer("failed startKey", r.StartKey), + zap.Stringer("failed endKey", r.EndKey), + zap.Error(err)) + w.err = err + cancel() + break + } + } +} diff --git a/store/tikv/range_task_test.go b/store/tikv/range_task_test.go new file mode 100644 index 0000000..3056685 --- /dev/null +++ b/store/tikv/range_task_test.go @@ -0,0 +1,229 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv + +import ( + "bytes" + "context" + "errors" + "sort" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/store/mockstore/mocktikv" +) + +type testRangeTaskSuite struct { + OneByOneSuite + cluster *mocktikv.Cluster + store *tikvStore + + testRanges []kv.KeyRange + expectedRanges [][]kv.KeyRange +} + +var _ = Suite(&testRangeTaskSuite{}) + +func makeRange(startKey string, endKey string) kv.KeyRange { + return kv.KeyRange{ + StartKey: []byte(startKey), + EndKey: []byte(endKey), + } +} + +func (s *testRangeTaskSuite) SetUpTest(c *C) { + // Split the store at "a" to "z" + splitKeys := make([][]byte, 0) + for k := byte('a'); k <= byte('z'); k++ { + splitKeys = append(splitKeys, []byte{k}) + } + + // Calculate all region's ranges + allRegionRanges := []kv.KeyRange{makeRange("", "a")} + for i := 0; i < len(splitKeys)-1; i++ { + allRegionRanges = append(allRegionRanges, kv.KeyRange{ + StartKey: splitKeys[i], + EndKey: splitKeys[i+1], + }) + } + allRegionRanges = append(allRegionRanges, makeRange("z", "")) + + s.cluster = mocktikv.NewCluster() + mocktikv.BootstrapWithMultiRegions(s.cluster, splitKeys...) + client, pdClient, err := mocktikv.NewTiKVAndPDClient(s.cluster, nil, "") + c.Assert(err, IsNil) + + store, err := NewTestTiKVStore(client, pdClient, nil, nil) + c.Assert(err, IsNil) + s.store = store.(*tikvStore) + + s.testRanges = []kv.KeyRange{ + makeRange("", ""), + makeRange("", "b"), + makeRange("b", ""), + makeRange("b", "x"), + makeRange("a", "d"), + makeRange("a\x00", "d\x00"), + makeRange("a\xff\xff\xff", "c\xff\xff\xff"), + makeRange("a1", "a2"), + makeRange("a", "a"), + makeRange("a3", "a3"), + } + + s.expectedRanges = [][]kv.KeyRange{ + allRegionRanges, + allRegionRanges[:2], + allRegionRanges[2:], + allRegionRanges[2:24], + { + makeRange("a", "b"), + makeRange("b", "c"), + makeRange("c", "d"), + }, + { + makeRange("a\x00", "b"), + makeRange("b", "c"), + makeRange("c", "d"), + makeRange("d", "d\x00"), + }, + { + makeRange("a\xff\xff\xff", "b"), + makeRange("b", "c"), + makeRange("c", "c\xff\xff\xff"), + }, + { + makeRange("a1", "a2"), + }, + {}, + {}, + } +} + +func (s *testRangeTaskSuite) TearDownTest(c *C) { + err := s.store.Close() + c.Assert(err, IsNil) +} + +func collect(c chan *kv.KeyRange) []kv.KeyRange { + c <- nil + ranges := make([]kv.KeyRange, 0) + + for { + r := <-c + if r == nil { + break + } + + ranges = append(ranges, *r) + } + return ranges +} + +func (s *testRangeTaskSuite) checkRanges(c *C, obtained []kv.KeyRange, expected []kv.KeyRange) { + sort.Slice(obtained, func(i, j int) bool { + return bytes.Compare(obtained[i].StartKey, obtained[j].StartKey) < 0 + }) + + c.Assert(obtained, DeepEquals, expected) +} + +func batchRanges(ranges []kv.KeyRange, batchSize int) []kv.KeyRange { + result := make([]kv.KeyRange, 0, len(ranges)) + + for i := 0; i < len(ranges); i += batchSize { + lastRange := i + batchSize - 1 + if lastRange >= len(ranges) { + lastRange = len(ranges) - 1 + } + + result = append(result, kv.KeyRange{ + StartKey: ranges[i].StartKey, + EndKey: ranges[lastRange].EndKey, + }) + } + + return result +} + +func (s *testRangeTaskSuite) testRangeTaskImpl(c *C, concurrency int) { + c.Logf("Test RangeTask, concurrency: %v", concurrency) + + ranges := make(chan *kv.KeyRange, 100) + + handler := func(ctx context.Context, r kv.KeyRange) (RangeTaskStat, error) { + ranges <- &r + stat := RangeTaskStat{ + CompletedRegions: 1, + } + return stat, nil + } + + runner := NewRangeTaskRunner("test-runner", s.store, concurrency, handler) + + for regionsPerTask := 1; regionsPerTask <= 5; regionsPerTask++ { + for i, r := range s.testRanges { + runner.SetRegionsPerTask(regionsPerTask) + + expectedRanges := batchRanges(s.expectedRanges[i], regionsPerTask) + + err := runner.RunOnRange(context.Background(), r.StartKey, r.EndKey) + c.Assert(err, IsNil) + s.checkRanges(c, collect(ranges), expectedRanges) + c.Assert(runner.CompletedRegions(), Equals, len(expectedRanges)) + c.Assert(runner.FailedRegions(), Equals, 0) + } + } +} + +func (s *testRangeTaskSuite) TestRangeTask(c *C) { + for concurrency := 1; concurrency < 5; concurrency++ { + s.testRangeTaskImpl(c, concurrency) + } +} + +func (s *testRangeTaskSuite) testRangeTaskErrorImpl(c *C, concurrency int) { + for i, r := range s.testRanges { + // Iterate all sub tasks and make it an error + subRanges := s.expectedRanges[i] + for _, subRange := range subRanges { + errKey := subRange.StartKey + c.Logf("Test RangeTask Error concurrency: %v, range: [%+q, %+q), errKey: %+q", concurrency, r.StartKey, r.EndKey, errKey) + + handler := func(ctx context.Context, r kv.KeyRange) (RangeTaskStat, error) { + stat := RangeTaskStat{0, 0} + if bytes.Equal(r.StartKey, errKey) { + stat.FailedRegions++ + return stat, errors.New("test error") + + } + stat.CompletedRegions++ + return stat, nil + } + + runner := NewRangeTaskRunner("test-error-runner", s.store, concurrency, handler) + runner.SetRegionsPerTask(1) + err := runner.RunOnRange(context.Background(), r.StartKey, r.EndKey) + // RunOnRange returns no error only when all sub tasks are done successfully. + c.Assert(err, NotNil) + c.Assert(runner.CompletedRegions(), Less, len(subRanges)) + c.Assert(runner.FailedRegions(), Equals, 1) + } + } +} + +func (s *testRangeTaskSuite) TestRangeTaskError(c *C) { + for concurrency := 1; concurrency < 5; concurrency++ { + s.testRangeTaskErrorImpl(c, concurrency) + } +} diff --git a/store/tikv/rawkv.go b/store/tikv/rawkv.go new file mode 100644 index 0000000..4130999 --- /dev/null +++ b/store/tikv/rawkv.go @@ -0,0 +1,189 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv + +import ( + "context" + + "github.com/pingcap-incubator/tinykv/proto/pkg/kvrpcpb" + "github.com/pingcap-incubator/tinykv/scheduler/client" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/store/tikv/tikvrpc" +) + +var ( + // MaxRawKVScanLimit is the maximum scan limit for rawkv Scan. + MaxRawKVScanLimit = 10240 + // ErrMaxScanLimitExceeded is returned when the limit for rawkv Scan is to large. + ErrMaxScanLimitExceeded = errors.New("limit should be less than MaxRawKVScanLimit") +) + +// RawKVClient is a client of TiKV server which is used as a key-value storage, +// only GET/PUT/DELETE commands are supported. +type RawKVClient struct { + clusterID uint64 + regionCache *RegionCache + pdClient pd.Client + rpcClient Client +} + +// Close closes the client. +func (c *RawKVClient) Close() error { + if c.pdClient != nil { + c.pdClient.Close() + } + if c.regionCache != nil { + c.regionCache.Close() + } + if c.rpcClient == nil { + return nil + } + return c.rpcClient.Close() +} + +// ClusterID returns the TiKV cluster ID. +func (c *RawKVClient) ClusterID() uint64 { + return c.clusterID +} + +// Get queries value with the key. When the key does not exist, it returns `nil, nil`. +func (c *RawKVClient) Get(key []byte) ([]byte, error) { + req := tikvrpc.NewRequest(tikvrpc.CmdRawGet, &kvrpcpb.RawGetRequest{Key: key}) + resp, _, err := c.sendReq(key, req, false) + if err != nil { + return nil, errors.Trace(err) + } + if resp.Resp == nil { + return nil, errors.Trace(ErrBodyMissing) + } + cmdResp := resp.Resp.(*kvrpcpb.RawGetResponse) + if cmdResp.GetError() != "" { + return nil, errors.New(cmdResp.GetError()) + } + if len(cmdResp.Value) == 0 { + return nil, nil + } + return cmdResp.Value, nil +} + +// Put stores a key-value pair to TiKV. +func (c *RawKVClient) Put(key, value []byte) error { + if len(value) == 0 { + return errors.New("empty value is not supported") + } + + req := tikvrpc.NewRequest(tikvrpc.CmdRawPut, &kvrpcpb.RawPutRequest{ + Key: key, + Value: value, + }) + resp, _, err := c.sendReq(key, req, false) + if err != nil { + return errors.Trace(err) + } + if resp.Resp == nil { + return errors.Trace(ErrBodyMissing) + } + cmdResp := resp.Resp.(*kvrpcpb.RawPutResponse) + if cmdResp.GetError() != "" { + return errors.New(cmdResp.GetError()) + } + return nil +} + +// Delete deletes a key-value pair from TiKV. +func (c *RawKVClient) Delete(key []byte) error { + req := tikvrpc.NewRequest(tikvrpc.CmdRawDelete, &kvrpcpb.RawDeleteRequest{ + Key: key, + }) + resp, _, err := c.sendReq(key, req, false) + if err != nil { + return errors.Trace(err) + } + if resp.Resp == nil { + return errors.Trace(ErrBodyMissing) + } + cmdResp := resp.Resp.(*kvrpcpb.RawDeleteResponse) + if cmdResp.GetError() != "" { + return errors.New(cmdResp.GetError()) + } + return nil +} + +// Scan queries continuous kv pairs in range [startKey, endKey), up to limit pairs. +// If endKey is empty, it means unbounded. +// If you want to exclude the startKey or include the endKey, append a '\0' to the key. For example, to scan +// (startKey, endKey], you can write: +// `Scan(append(startKey, '\0'), append(endKey, '\0'), limit)`. +func (c *RawKVClient) Scan(startKey []byte, limit int) (keys [][]byte, values [][]byte, err error) { + if limit > MaxRawKVScanLimit { + return nil, nil, errors.Trace(ErrMaxScanLimitExceeded) + } + + for len(keys) < limit { + req := tikvrpc.NewRequest(tikvrpc.CmdRawScan, &kvrpcpb.RawScanRequest{ + StartKey: startKey, + Limit: uint32(limit - len(keys)), + }) + resp, loc, err := c.sendReq(startKey, req, false) + if err != nil { + return nil, nil, errors.Trace(err) + } + if resp.Resp == nil { + return nil, nil, errors.Trace(ErrBodyMissing) + } + cmdResp := resp.Resp.(*kvrpcpb.RawScanResponse) + for _, pair := range cmdResp.Kvs { + keys = append(keys, pair.Key) + values = append(values, pair.Value) + } + startKey = loc.EndKey + if len(startKey) == 0 { + break + } + } + return +} + +func (c *RawKVClient) sendReq(key []byte, req *tikvrpc.Request, reverse bool) (*tikvrpc.Response, *KeyLocation, error) { + bo := NewBackoffer(context.Background(), rawkvMaxBackoff) + sender := NewRegionRequestSender(c.regionCache, c.rpcClient) + for { + var loc *KeyLocation + var err error + if reverse { + loc, err = c.regionCache.LocateEndKey(bo, key) + } else { + loc, err = c.regionCache.LocateKey(bo, key) + } + if err != nil { + return nil, nil, errors.Trace(err) + } + resp, err := sender.SendReq(bo, req, loc.Region, readTimeoutShort) + if err != nil { + return nil, nil, errors.Trace(err) + } + regionErr, err := resp.GetRegionError() + if err != nil { + return nil, nil, errors.Trace(err) + } + if regionErr != nil { + err := bo.Backoff(BoRegionMiss, errors.New(regionErr.String())) + if err != nil { + return nil, nil, errors.Trace(err) + } + continue + } + return resp, loc, nil + } +} diff --git a/store/tikv/region_cache.go b/store/tikv/region_cache.go new file mode 100644 index 0000000..fe02ffb --- /dev/null +++ b/store/tikv/region_cache.go @@ -0,0 +1,1168 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv + +import ( + "bytes" + "context" + "fmt" + "sync" + "sync/atomic" + "time" + "unsafe" + + "github.com/gogo/protobuf/proto" + "github.com/google/btree" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + pd "github.com/pingcap-incubator/tinykv/scheduler/client" + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/kv" + + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +const ( + btreeDegree = 32 + invalidatedLastAccessTime = -1 +) + +// RegionCacheTTLSec is the max idle time for regions in the region cache. +const RegionCacheTTLSec int64 = 600 + +const ( + updated int32 = iota // region is updated and no need to reload. + needSync // need sync new region info. +) + +// Region presents kv region +type Region struct { + meta *metapb.Region // raw region meta from PD immutable after init + store unsafe.Pointer // point to region store info, see RegionStore + syncFlag int32 // region need be sync in next turn + lastAccess int64 // last region access time, see checkRegionCacheTTL +} + +// RegionStore represents region stores info +// it will be store as unsafe.Pointer and be load at once +type RegionStore struct { + workTiKVIdx int32 // point to current work peer in meta.Peers and work store in stores(same idx) for tikv peer + stores []*Store // stores in this region + storeFails []uint32 // snapshots of store's fail, need reload when `storeFails[curr] != stores[cur].fail` +} + +// clone clones region store struct. +func (r *RegionStore) clone() *RegionStore { + storeFails := make([]uint32, len(r.stores)) + copy(storeFails, r.storeFails) + return &RegionStore{ + workTiKVIdx: r.workTiKVIdx, + stores: r.stores, + storeFails: storeFails, + } +} + +// return next follower store's index +func (r *RegionStore) follower(seed uint32) int32 { + l := uint32(len(r.stores)) + if l <= 1 { + return r.workTiKVIdx + } + + for retry := l - 1; retry > 0; retry-- { + followerIdx := int32(seed % (l - 1)) + if followerIdx >= r.workTiKVIdx { + followerIdx++ + } + if r.storeFails[followerIdx] == atomic.LoadUint32(&r.stores[followerIdx].fail) { + return followerIdx + } + seed++ + } + return r.workTiKVIdx +} + +// init initializes region after constructed. +func (r *Region) init(c *RegionCache) { + // region store pull used store from global store map + // to avoid acquire storeMu in later access. + rs := &RegionStore{ + workTiKVIdx: 0, + stores: make([]*Store, 0, len(r.meta.Peers)), + storeFails: make([]uint32, 0, len(r.meta.Peers)), + } + for _, p := range r.meta.Peers { + c.storeMu.RLock() + store, exists := c.storeMu.stores[p.StoreId] + c.storeMu.RUnlock() + if !exists { + store = c.getStoreByStoreID(p.StoreId) + } + rs.stores = append(rs.stores, store) + rs.storeFails = append(rs.storeFails, atomic.LoadUint32(&store.fail)) + } + atomic.StorePointer(&r.store, unsafe.Pointer(rs)) + + // mark region has been init accessed. + r.lastAccess = time.Now().Unix() +} + +func (r *Region) getStore() (store *RegionStore) { + store = (*RegionStore)(atomic.LoadPointer(&r.store)) + return +} + +func (r *Region) compareAndSwapStore(oldStore, newStore *RegionStore) bool { + return atomic.CompareAndSwapPointer(&r.store, unsafe.Pointer(oldStore), unsafe.Pointer(newStore)) +} + +func (r *Region) checkRegionCacheTTL(ts int64) bool { + for { + lastAccess := atomic.LoadInt64(&r.lastAccess) + if ts-lastAccess > RegionCacheTTLSec { + return false + } + if atomic.CompareAndSwapInt64(&r.lastAccess, lastAccess, ts) { + return true + } + } +} + +// invalidate invalidates a region, next time it will got null result. +func (r *Region) invalidate() { + atomic.StoreInt64(&r.lastAccess, invalidatedLastAccessTime) +} + +// scheduleReload schedules reload region request in next LocateKey. +func (r *Region) scheduleReload() { + oldValue := atomic.LoadInt32(&r.syncFlag) + if oldValue != updated { + return + } + atomic.CompareAndSwapInt32(&r.syncFlag, oldValue, needSync) +} + +// needReload checks whether region need reload. +func (r *Region) needReload() bool { + oldValue := atomic.LoadInt32(&r.syncFlag) + if oldValue == updated { + return false + } + return atomic.CompareAndSwapInt32(&r.syncFlag, oldValue, updated) +} + +// RegionCache caches Regions loaded from PD. +type RegionCache struct { + pdClient pd.Client + + mu struct { + sync.RWMutex // mutex protect cached region + regions map[RegionVerID]*Region // cached regions be organized as regionVerID to region ref mapping + sorted *btree.BTree // cache regions be organized as sorted key to region ref mapping + } + storeMu struct { + sync.RWMutex + stores map[uint64]*Store + } + notifyCheckCh chan struct{} + closeCh chan struct{} +} + +// NewRegionCache creates a RegionCache. +func NewRegionCache(pdClient pd.Client) *RegionCache { + c := &RegionCache{ + pdClient: pdClient, + } + c.mu.regions = make(map[RegionVerID]*Region) + c.mu.sorted = btree.New(btreeDegree) + c.storeMu.stores = make(map[uint64]*Store) + c.notifyCheckCh = make(chan struct{}, 1) + c.closeCh = make(chan struct{}) + go c.asyncCheckAndResolveLoop() + return c +} + +// Close releases region cache's resource. +func (c *RegionCache) Close() { + close(c.closeCh) +} + +// asyncCheckAndResolveLoop with +func (c *RegionCache) asyncCheckAndResolveLoop() { + var needCheckStores []*Store + for { + select { + case <-c.closeCh: + return + case <-c.notifyCheckCh: + needCheckStores = needCheckStores[:0] + c.checkAndResolve(needCheckStores) + } + } +} + +// checkAndResolve checks and resolve addr of failed stores. +// this method isn't thread-safe and only be used by one goroutine. +func (c *RegionCache) checkAndResolve(needCheckStores []*Store) { + defer func() { + r := recover() + if r != nil { + logutil.BgLogger().Error("panic in the checkAndResolve goroutine", + zap.Reflect("r", r), + zap.Stack("stack trace")) + } + }() + + c.storeMu.RLock() + for _, store := range c.storeMu.stores { + state := store.getResolveState() + if state == needCheck { + needCheckStores = append(needCheckStores, store) + } + } + c.storeMu.RUnlock() + + for _, store := range needCheckStores { + store.reResolve(c) + } +} + +// RPCContext contains data that is needed to send RPC to a region. +type RPCContext struct { + Region RegionVerID + Meta *metapb.Region + Peer *metapb.Peer + PeerIdx int + Store *Store + Addr string +} + +// GetStoreID returns StoreID. +func (c *RPCContext) GetStoreID() uint64 { + if c.Store != nil { + return c.Store.storeID + } + return 0 +} + +func (c *RPCContext) String() string { + return fmt.Sprintf("region ID: %d, meta: %s, peer: %s, addr: %s, idx: %d", + c.Region.GetID(), c.Meta, c.Peer, c.Addr, c.PeerIdx) +} + +// GetTiKVRPCContext returns RPCContext for a region. If it returns nil, the region +// must be out of date and already dropped from cache. +func (c *RegionCache) GetTiKVRPCContext(bo *Backoffer, id RegionVerID, replicaRead kv.ReplicaReadType, followerStoreSeed uint32) (*RPCContext, error) { + ts := time.Now().Unix() + + cachedRegion := c.getCachedRegionWithRLock(id) + if cachedRegion == nil { + return nil, nil + } + + if !cachedRegion.checkRegionCacheTTL(ts) { + return nil, nil + } + + regionStore := cachedRegion.getStore() + var store *Store + var peer *metapb.Peer + var storeIdx int + switch replicaRead { + case kv.ReplicaReadFollower: + store, peer, storeIdx = cachedRegion.FollowerStorePeer(regionStore, followerStoreSeed) + default: + store, peer, storeIdx = cachedRegion.WorkStorePeer(regionStore) + } + addr, err := c.getStoreAddr(bo, cachedRegion, store, storeIdx) + if err != nil { + return nil, err + } + // enable by `curl -XPUT -d '1*return("[some-addr]")->return("")' http://host:port/github.com/pingcap/tidb/store/tikv/injectWrongStoreAddr` + failpoint.Inject("injectWrongStoreAddr", func(val failpoint.Value) { + if a, ok := val.(string); ok && len(a) > 0 { + addr = a + } + }) + if store == nil || len(addr) == 0 { + // Store not found, region must be out of date. + cachedRegion.invalidate() + return nil, nil + } + + storeFailEpoch := atomic.LoadUint32(&store.fail) + if storeFailEpoch != regionStore.storeFails[storeIdx] { + cachedRegion.invalidate() + logutil.BgLogger().Info("invalidate current region, because others failed on same store", + zap.Uint64("region", id.GetID()), + zap.String("store", store.addr)) + return nil, nil + } + + return &RPCContext{ + Region: id, + Meta: cachedRegion.meta, + Peer: peer, + PeerIdx: storeIdx, + Store: store, + Addr: addr, + }, nil +} + +// KeyLocation is the region and range that a key is located. +type KeyLocation struct { + Region RegionVerID + StartKey kv.Key + EndKey kv.Key +} + +// Contains checks if key is in [StartKey, EndKey). +func (l *KeyLocation) Contains(key []byte) bool { + return bytes.Compare(l.StartKey, key) <= 0 && + (bytes.Compare(key, l.EndKey) < 0 || len(l.EndKey) == 0) +} + +// LocateKey searches for the region and range that the key is located. +func (c *RegionCache) LocateKey(bo *Backoffer, key []byte) (*KeyLocation, error) { + r, err := c.findRegionByKey(bo, key, false) + if err != nil { + return nil, err + } + return &KeyLocation{ + Region: r.VerID(), + StartKey: r.StartKey(), + EndKey: r.EndKey(), + }, nil +} + +// LocateEndKey searches for the region and range that the key is located. +// Unlike LocateKey, start key of a region is exclusive and end key is inclusive. +func (c *RegionCache) LocateEndKey(bo *Backoffer, key []byte) (*KeyLocation, error) { + r, err := c.findRegionByKey(bo, key, true) + if err != nil { + return nil, err + } + return &KeyLocation{ + Region: r.VerID(), + StartKey: r.StartKey(), + EndKey: r.EndKey(), + }, nil +} + +func (c *RegionCache) findRegionByKey(bo *Backoffer, key []byte, isEndKey bool) (r *Region, err error) { + r = c.searchCachedRegion(key, isEndKey) + if r == nil { + // load region when it is not exists or expired. + lr, err := c.loadRegion(bo, key, isEndKey) + if err != nil { + // no region data, return error if failure. + return nil, err + } + r = lr + c.mu.Lock() + c.insertRegionToCache(r) + c.mu.Unlock() + } else if r.needReload() { + // load region when it be marked as need reload. + lr, err := c.loadRegion(bo, key, isEndKey) + if err != nil { + // ignore error and use old region info. + logutil.Logger(bo.ctx).Error("load region failure", + zap.ByteString("key", key), zap.Error(err)) + } else { + r = lr + c.mu.Lock() + c.insertRegionToCache(r) + c.mu.Unlock() + } + } + return r, nil +} + +// OnSendFail handles send request fail logic. +func (c *RegionCache) OnSendFail(bo *Backoffer, ctx *RPCContext, scheduleReload bool, err error) { + r := c.getCachedRegionWithRLock(ctx.Region) + if r != nil { + c.switchNextPeer(r, ctx.PeerIdx, err) + if scheduleReload { + r.scheduleReload() + } + logutil.Logger(bo.ctx).Info("switch region peer to next due to send request fail", + zap.Stringer("current", ctx), + zap.Bool("needReload", scheduleReload), + zap.Error(err)) + } +} + +// LocateRegionByID searches for the region with ID. +func (c *RegionCache) LocateRegionByID(bo *Backoffer, regionID uint64) (*KeyLocation, error) { + c.mu.RLock() + r := c.getRegionByIDFromCache(regionID) + c.mu.RUnlock() + if r != nil { + if r.needReload() { + lr, err := c.loadRegionByID(bo, regionID) + if err != nil { + // ignore error and use old region info. + logutil.Logger(bo.ctx).Error("load region failure", + zap.Uint64("regionID", regionID), zap.Error(err)) + } else { + r = lr + c.mu.Lock() + c.insertRegionToCache(r) + c.mu.Unlock() + } + } + loc := &KeyLocation{ + Region: r.VerID(), + StartKey: r.StartKey(), + EndKey: r.EndKey(), + } + return loc, nil + } + + r, err := c.loadRegionByID(bo, regionID) + if err != nil { + return nil, errors.Trace(err) + } + + c.mu.Lock() + c.insertRegionToCache(r) + c.mu.Unlock() + return &KeyLocation{ + Region: r.VerID(), + StartKey: r.StartKey(), + EndKey: r.EndKey(), + }, nil +} + +// GroupKeysByRegion separates keys into groups by their belonging Regions. +// Specially it also returns the first key's region which may be used as the +// 'PrimaryLockKey' and should be committed ahead of others. +// filter is used to filter some unwanted keys. +func (c *RegionCache) GroupKeysByRegion(bo *Backoffer, keys [][]byte, filter func(key, regionStartKey []byte) bool) (map[RegionVerID][][]byte, RegionVerID, error) { + groups := make(map[RegionVerID][][]byte) + var first RegionVerID + var lastLoc *KeyLocation + for i, k := range keys { + if lastLoc == nil || !lastLoc.Contains(k) { + var err error + lastLoc, err = c.LocateKey(bo, k) + if err != nil { + return nil, first, errors.Trace(err) + } + if filter != nil && filter(k, lastLoc.StartKey) { + continue + } + } + id := lastLoc.Region + if i == 0 { + first = id + } + groups[id] = append(groups[id], k) + } + return groups, first, nil +} + +// ListRegionIDsInKeyRange lists ids of regions in [start_key,end_key]. +func (c *RegionCache) ListRegionIDsInKeyRange(bo *Backoffer, startKey, endKey []byte) (regionIDs []uint64, err error) { + for { + curRegion, err := c.LocateKey(bo, startKey) + if err != nil { + return nil, errors.Trace(err) + } + regionIDs = append(regionIDs, curRegion.Region.id) + if curRegion.Contains(endKey) { + break + } + startKey = curRegion.EndKey + } + return regionIDs, nil +} + +// LoadRegionsInKeyRange lists ids of regions in [start_key,end_key]. +func (c *RegionCache) LoadRegionsInKeyRange(bo *Backoffer, startKey, endKey []byte) (regions []*Region, err error) { + for { + curRegion, err := c.loadRegion(bo, startKey, false) + if err != nil { + return nil, errors.Trace(err) + } + c.mu.Lock() + c.insertRegionToCache(curRegion) + c.mu.Unlock() + + regions = append(regions, curRegion) + if curRegion.Contains(endKey) { + break + } + startKey = curRegion.EndKey() + } + return regions, nil +} + +// BatchLoadRegionsFromKey loads at most given numbers of regions to the RegionCache, from the given startKey. Returns +// the endKey of the last loaded region. If some of the regions has no leader, their entries in RegionCache will not be +// updated. +func (c *RegionCache) BatchLoadRegionsFromKey(bo *Backoffer, startKey []byte, count int) ([]byte, error) { + regions, err := c.scanRegions(bo, startKey, count) + if err != nil { + return nil, errors.Trace(err) + } + if len(regions) == 0 { + return nil, errors.New("PD returned no region") + } + + c.mu.Lock() + defer c.mu.Unlock() + + for _, region := range regions { + c.insertRegionToCache(region) + } + + return regions[len(regions)-1].EndKey(), nil +} + +// InvalidateCachedRegion removes a cached Region. +func (c *RegionCache) InvalidateCachedRegion(id RegionVerID) { + cachedRegion := c.getCachedRegionWithRLock(id) + if cachedRegion == nil { + return + } + cachedRegion.invalidate() +} + +// UpdateLeader update some region cache with newer leader info. +func (c *RegionCache) UpdateLeader(regionID RegionVerID, leaderStoreID uint64, currentPeerIdx int) { + r := c.getCachedRegionWithRLock(regionID) + if r == nil { + logutil.BgLogger().Debug("regionCache: cannot find region when updating leader", + zap.Uint64("regionID", regionID.GetID()), + zap.Uint64("leaderStoreID", leaderStoreID)) + return + } + + if leaderStoreID == 0 { + c.switchNextPeer(r, currentPeerIdx, nil) + logutil.BgLogger().Info("switch region peer to next due to NotLeader with NULL leader", + zap.Int("currIdx", currentPeerIdx), + zap.Uint64("regionID", regionID.GetID())) + return + } + + if !c.switchToPeer(r, leaderStoreID) { + logutil.BgLogger().Info("invalidate region cache due to cannot find peer when updating leader", + zap.Uint64("regionID", regionID.GetID()), + zap.Int("currIdx", currentPeerIdx), + zap.Uint64("leaderStoreID", leaderStoreID)) + r.invalidate() + } else { + logutil.BgLogger().Info("switch region leader to specific leader due to kv return NotLeader", + zap.Uint64("regionID", regionID.GetID()), + zap.Int("currIdx", currentPeerIdx), + zap.Uint64("leaderStoreID", leaderStoreID)) + } +} + +// insertRegionToCache tries to insert the Region to cache. +func (c *RegionCache) insertRegionToCache(cachedRegion *Region) { + old := c.mu.sorted.ReplaceOrInsert(newBtreeItem(cachedRegion)) + if old != nil { + delete(c.mu.regions, old.(*btreeItem).cachedRegion.VerID()) + } + c.mu.regions[cachedRegion.VerID()] = cachedRegion +} + +// searchCachedRegion finds a region from cache by key. Like `getCachedRegion`, +// it should be called with c.mu.RLock(), and the returned Region should not be +// used after c.mu is RUnlock(). +// If the given key is the end key of the region that you want, you may set the second argument to true. This is useful +// when processing in reverse order. +func (c *RegionCache) searchCachedRegion(key []byte, isEndKey bool) *Region { + ts := time.Now().Unix() + var r *Region + c.mu.RLock() + c.mu.sorted.DescendLessOrEqual(newBtreeSearchItem(key), func(item btree.Item) bool { + r = item.(*btreeItem).cachedRegion + if isEndKey && bytes.Equal(r.StartKey(), key) { + r = nil // clear result + return true // iterate next item + } + if !r.checkRegionCacheTTL(ts) { + r = nil + return true + } + return false + }) + c.mu.RUnlock() + if r != nil && (!isEndKey && r.Contains(key) || isEndKey && r.ContainsByEnd(key)) { + return r + } + return nil +} + +// getRegionByIDFromCache tries to get region by regionID from cache. Like +// `getCachedRegion`, it should be called with c.mu.RLock(), and the returned +// Region should not be used after c.mu is RUnlock(). +func (c *RegionCache) getRegionByIDFromCache(regionID uint64) *Region { + for v, r := range c.mu.regions { + if v.id == regionID { + return r + } + } + return nil +} + +// loadRegion loads region from pd client, and picks the first peer as leader. +// If the given key is the end key of the region that you want, you may set the second argument to true. This is useful +// when processing in reverse order. +func (c *RegionCache) loadRegion(bo *Backoffer, key []byte, isEndKey bool) (*Region, error) { + var backoffErr error + searchPrev := false + for { + if backoffErr != nil { + err := bo.Backoff(BoPDRPC, backoffErr) + if err != nil { + return nil, errors.Trace(err) + } + } + var meta *metapb.Region + var leader *metapb.Peer + var err error + if searchPrev { + meta, leader, err = c.pdClient.GetPrevRegion(bo.ctx, key) + } else { + meta, leader, err = c.pdClient.GetRegion(bo.ctx, key) + } + if err != nil { + backoffErr = errors.Errorf("loadRegion from PD failed, key: %q, err: %v", key, err) + continue + } + if meta == nil { + backoffErr = errors.Errorf("region not found for key %q", key) + continue + } + if len(meta.Peers) == 0 { + return nil, errors.New("receive Region with no peer") + } + if isEndKey && !searchPrev && bytes.Equal(meta.StartKey, key) && len(meta.StartKey) != 0 { + searchPrev = true + continue + } + region := &Region{meta: meta} + region.init(c) + if leader != nil { + c.switchToPeer(region, leader.StoreId) + } + return region, nil + } +} + +// loadRegionByID loads region from pd client, and picks the first peer as leader. +func (c *RegionCache) loadRegionByID(bo *Backoffer, regionID uint64) (*Region, error) { + var backoffErr error + for { + if backoffErr != nil { + err := bo.Backoff(BoPDRPC, backoffErr) + if err != nil { + return nil, errors.Trace(err) + } + } + meta, leader, err := c.pdClient.GetRegionByID(bo.ctx, regionID) + if err != nil { + backoffErr = errors.Errorf("loadRegion from PD failed, regionID: %v, err: %v", regionID, err) + continue + } + if meta == nil { + backoffErr = errors.Errorf("region not found for regionID %q", regionID) + continue + } + if len(meta.Peers) == 0 { + return nil, errors.New("receive Region with no peer") + } + region := &Region{meta: meta} + region.init(c) + if leader != nil { + c.switchToPeer(region, leader.GetStoreId()) + } + return region, nil + } +} + +// scanRegions scans at most `limit` regions from PD, starts from the region containing `startKey` and in key order. +// Regions with no leader will not be returned. +func (c *RegionCache) scanRegions(bo *Backoffer, startKey []byte, limit int) ([]*Region, error) { + if limit == 0 { + return nil, nil + } + + var backoffErr error + for { + if backoffErr != nil { + err := bo.Backoff(BoPDRPC, backoffErr) + if err != nil { + return nil, errors.Trace(err) + } + } + metas, leaders, err := c.pdClient.ScanRegions(bo.ctx, startKey, nil, limit) + if err != nil { + backoffErr = errors.Errorf( + "scanRegion from PD failed, startKey: %q, limit: %q, err: %v", + startKey, + limit, + err) + continue + } + + if len(metas) == 0 { + return nil, errors.New("PD returned no region") + } + if len(metas) != len(leaders) { + return nil, errors.New("PD returned mismatching region metas and leaders") + } + regions := make([]*Region, 0, len(metas)) + for i, meta := range metas { + region := &Region{meta: meta} + region.init(c) + leader := leaders[i] + // Leader id = 0 indicates no leader. + if leader.GetId() != 0 { + c.switchToPeer(region, leader.GetStoreId()) + regions = append(regions, region) + } + } + if len(regions) == 0 { + return nil, errors.New("receive Regions with no peer") + } + if len(regions) < len(metas) { + logutil.Logger(context.Background()).Debug( + "regionCache: scanRegion finished but some regions has no leader.") + } + return regions, nil + } +} + +func (c *RegionCache) getCachedRegionWithRLock(regionID RegionVerID) (r *Region) { + c.mu.RLock() + r = c.mu.regions[regionID] + c.mu.RUnlock() + return +} + +func (c *RegionCache) getStoreAddr(bo *Backoffer, region *Region, store *Store, storeIdx int) (addr string, err error) { + state := store.getResolveState() + switch state { + case resolved, needCheck: + addr = store.addr + return + case unresolved: + addr, err = store.initResolve(bo, c) + return + case deleted: + addr = c.changeToActiveStore(region, store, storeIdx) + return + default: + panic("unsupported resolve state") + } +} + +func (c *RegionCache) changeToActiveStore(region *Region, store *Store, storeIdx int) (addr string) { + c.storeMu.RLock() + store = c.storeMu.stores[store.storeID] + c.storeMu.RUnlock() + for { + oldRegionStore := region.getStore() + newRegionStore := oldRegionStore.clone() + newRegionStore.stores = make([]*Store, 0, len(oldRegionStore.stores)) + for i, s := range oldRegionStore.stores { + if i == storeIdx { + newRegionStore.stores = append(newRegionStore.stores, store) + } else { + newRegionStore.stores = append(newRegionStore.stores, s) + } + } + if region.compareAndSwapStore(oldRegionStore, newRegionStore) { + break + } + } + addr = store.addr + return +} + +func (c *RegionCache) getStoreByStoreID(storeID uint64) (store *Store) { + var ok bool + c.storeMu.Lock() + store, ok = c.storeMu.stores[storeID] + if ok { + c.storeMu.Unlock() + return + } + store = &Store{storeID: storeID} + c.storeMu.stores[storeID] = store + c.storeMu.Unlock() + return +} + +// OnRegionEpochNotMatch removes the old region and inserts new regions into the cache. +func (c *RegionCache) OnRegionEpochNotMatch(bo *Backoffer, ctx *RPCContext, currentRegions []*metapb.Region) error { + // Find whether the region epoch in `ctx` is ahead of TiKV's. If so, backoff. + for _, meta := range currentRegions { + if meta.GetId() == ctx.Region.id && + (meta.GetRegionEpoch().GetConfVer() < ctx.Region.confVer || + meta.GetRegionEpoch().GetVersion() < ctx.Region.ver) { + err := errors.Errorf("region epoch is ahead of tikv. rpc ctx: %+v, currentRegions: %+v", ctx, currentRegions) + logutil.BgLogger().Info("region epoch is ahead of tikv", zap.Error(err)) + return bo.Backoff(BoRegionMiss, err) + } + } + + c.mu.Lock() + defer c.mu.Unlock() + needInvalidateOld := true + // If the region epoch is not ahead of TiKV's, replace region meta in region cache. + for _, meta := range currentRegions { + if _, ok := c.pdClient.(*codecPDClient); ok { + if err := decodeRegionMetaKey(meta); err != nil { + return errors.Errorf("newRegion's range key is not encoded: %v, %v", meta, err) + } + } + region := &Region{meta: meta} + region.init(c) + c.switchToPeer(region, ctx.Store.storeID) + c.insertRegionToCache(region) + if ctx.Region == region.VerID() { + needInvalidateOld = false + } + } + if needInvalidateOld { + cachedRegion, ok := c.mu.regions[ctx.Region] + if ok { + cachedRegion.invalidate() + } + } + return nil +} + +// PDClient returns the pd.Client in RegionCache. +func (c *RegionCache) PDClient() pd.Client { + return c.pdClient +} + +// btreeItem is BTree's Item that uses []byte to compare. +type btreeItem struct { + key []byte + cachedRegion *Region +} + +func newBtreeItem(cr *Region) *btreeItem { + return &btreeItem{ + key: cr.StartKey(), + cachedRegion: cr, + } +} + +func newBtreeSearchItem(key []byte) *btreeItem { + return &btreeItem{ + key: key, + } +} + +func (item *btreeItem) Less(other btree.Item) bool { + return bytes.Compare(item.key, other.(*btreeItem).key) < 0 +} + +// GetID returns id. +func (r *Region) GetID() uint64 { + return r.meta.GetId() +} + +// GetMeta returns region meta. +func (r *Region) GetMeta() *metapb.Region { + return proto.Clone(r.meta).(*metapb.Region) +} + +// GetLeaderID returns leader region ID. +func (r *Region) GetLeaderID() uint64 { + store := r.getStore() + if int(store.workTiKVIdx) >= len(r.meta.Peers) { + return 0 + } + return r.meta.Peers[int(r.getStore().workTiKVIdx)].Id +} + +// GetLeaderStoreID returns the store ID of the leader region. +func (r *Region) GetLeaderStoreID() uint64 { + store := r.getStore() + if int(store.workTiKVIdx) >= len(r.meta.Peers) { + return 0 + } + return r.meta.Peers[int(r.getStore().workTiKVIdx)].StoreId +} + +func (r *Region) getStorePeer(rs *RegionStore, pidx int32) (store *Store, peer *metapb.Peer, idx int) { + store = rs.stores[pidx] + peer = r.meta.Peers[pidx] + idx = int(pidx) + return +} + +// WorkStorePeer returns current work store with work peer. +func (r *Region) WorkStorePeer(rs *RegionStore) (store *Store, peer *metapb.Peer, idx int) { + return r.getStorePeer(rs, rs.workTiKVIdx) +} + +// FollowerStorePeer returns a follower store with follower peer. +func (r *Region) FollowerStorePeer(rs *RegionStore, followerStoreSeed uint32) (*Store, *metapb.Peer, int) { + return r.getStorePeer(rs, rs.follower(followerStoreSeed)) +} + +// RegionVerID is a unique ID that can identify a Region at a specific version. +type RegionVerID struct { + id uint64 + confVer uint64 + ver uint64 +} + +// GetID returns the id of the region +func (r *RegionVerID) GetID() uint64 { + return r.id +} + +// VerID returns the Region's RegionVerID. +func (r *Region) VerID() RegionVerID { + return RegionVerID{ + id: r.meta.GetId(), + confVer: r.meta.GetRegionEpoch().GetConfVer(), + ver: r.meta.GetRegionEpoch().GetVersion(), + } +} + +// StartKey returns StartKey. +func (r *Region) StartKey() []byte { + return r.meta.StartKey +} + +// EndKey returns EndKey. +func (r *Region) EndKey() []byte { + return r.meta.EndKey +} + +// switchToPeer switches current store to the one on specific store. It returns +// false if no peer matches the storeID. +func (c *RegionCache) switchToPeer(r *Region, targetStoreID uint64) (found bool) { + leaderIdx, found := c.getPeerStoreIndex(r, targetStoreID) + c.switchWorkIdx(r, leaderIdx) + return +} + +func (c *RegionCache) switchNextPeer(r *Region, currentPeerIdx int, err error) { + rs := r.getStore() + + if err != nil { // TODO: refine err, only do this for some errors. + s := rs.stores[currentPeerIdx] + epoch := rs.storeFails[currentPeerIdx] + if atomic.CompareAndSwapUint32(&s.fail, epoch, epoch+1) { + logutil.BgLogger().Info("mark store's regions need be refill", zap.String("store", s.addr)) + } + s.markNeedCheck(c.notifyCheckCh) + } + + if int(rs.workTiKVIdx) != currentPeerIdx { + return + } + + nextIdx := (currentPeerIdx + 1) % len(rs.stores) + newRegionStore := rs.clone() + newRegionStore.workTiKVIdx = int32(nextIdx) + r.compareAndSwapStore(rs, newRegionStore) +} + +func (c *RegionCache) getPeerStoreIndex(r *Region, id uint64) (idx int, found bool) { + if len(r.meta.Peers) == 0 { + return + } + for i, p := range r.meta.Peers { + if p.GetStoreId() == id { + idx = i + found = true + return + } + } + return +} + +func (c *RegionCache) switchWorkIdx(r *Region, leaderIdx int) { +retry: + // switch to new leader. + oldRegionStore := r.getStore() + if oldRegionStore.workTiKVIdx == int32(leaderIdx) { + return + } + newRegionStore := oldRegionStore.clone() + newRegionStore.workTiKVIdx = int32(leaderIdx) + if !r.compareAndSwapStore(oldRegionStore, newRegionStore) { + goto retry + } +} + +// Contains checks whether the key is in the region, for the maximum region endKey is empty. +// startKey <= key < endKey. +func (r *Region) Contains(key []byte) bool { + return bytes.Compare(r.meta.GetStartKey(), key) <= 0 && + (bytes.Compare(key, r.meta.GetEndKey()) < 0 || len(r.meta.GetEndKey()) == 0) +} + +// ContainsByEnd check the region contains the greatest key that is less than key. +// for the maximum region endKey is empty. +// startKey < key <= endKey. +func (r *Region) ContainsByEnd(key []byte) bool { + return bytes.Compare(r.meta.GetStartKey(), key) < 0 && + (bytes.Compare(key, r.meta.GetEndKey()) <= 0 || len(r.meta.GetEndKey()) == 0) +} + +// Store contains a kv process's address. +type Store struct { + addr string // loaded store address + storeID uint64 // store's id + state uint64 // unsafe store storeState + resolveMutex sync.Mutex // protect pd from concurrent init requests + fail uint32 // store fail count, see RegionStore.storeFails +} + +type resolveState uint64 + +const ( + unresolved resolveState = iota + resolved + needCheck + deleted +) + +// initResolve resolves addr for store that never resolved. +func (s *Store) initResolve(bo *Backoffer, c *RegionCache) (addr string, err error) { + s.resolveMutex.Lock() + state := s.getResolveState() + defer s.resolveMutex.Unlock() + if state != unresolved { + addr = s.addr + return + } + var store *metapb.Store + for { + store, err = c.pdClient.GetStore(bo.ctx, s.storeID) + if err != nil { + // TODO: more refine PD error status handle. + if errors.Cause(err) == context.Canceled { + return + } + err = errors.Errorf("loadStore from PD failed, id: %d, err: %v", s.storeID, err) + if err = bo.Backoff(BoPDRPC, err); err != nil { + return + } + continue + } + if store == nil { + return + } + addr = store.GetAddress() + s.addr = addr + retry: + state = s.getResolveState() + if state != unresolved { + addr = s.addr + return + } + if !s.compareAndSwapState(state, resolved) { + goto retry + } + return + } +} + +// reResolve try to resolve addr for store that need check. +func (s *Store) reResolve(c *RegionCache) { + var addr string + store, err := c.pdClient.GetStore(context.Background(), s.storeID) + if err != nil { + logutil.BgLogger().Error("loadStore from PD failed", zap.Uint64("id", s.storeID), zap.Error(err)) + // we cannot do backoff in reResolve loop but try check other store and wait tick. + return + } + if store == nil { + // store has be removed in PD, we should invalidate all regions using those store. + logutil.BgLogger().Info("invalidate regions in removed store", + zap.Uint64("store", s.storeID), zap.String("add", s.addr)) + atomic.AddUint32(&s.fail, 1) + return + } + + addr = store.GetAddress() + if s.addr != addr { + state := resolved + newStore := &Store{storeID: s.storeID, addr: addr} + newStore.state = *(*uint64)(unsafe.Pointer(&state)) + c.storeMu.Lock() + c.storeMu.stores[newStore.storeID] = newStore + c.storeMu.Unlock() + retryMarkDel: + // all region used those + oldState := s.getResolveState() + if oldState == deleted { + return + } + newState := deleted + if !s.compareAndSwapState(oldState, newState) { + goto retryMarkDel + } + return + } +retryMarkResolved: + oldState := s.getResolveState() + if oldState != needCheck { + return + } + newState := resolved + if !s.compareAndSwapState(oldState, newState) { + goto retryMarkResolved + } +} + +func (s *Store) getResolveState() resolveState { + var state resolveState + if s == nil { + return state + } + return resolveState(atomic.LoadUint64(&s.state)) +} + +func (s *Store) compareAndSwapState(oldState, newState resolveState) bool { + return atomic.CompareAndSwapUint64(&s.state, uint64(oldState), uint64(newState)) +} + +// markNeedCheck marks resolved store to be async resolve to check store addr change. +func (s *Store) markNeedCheck(notifyCheckCh chan struct{}) { +retry: + oldState := s.getResolveState() + if oldState != resolved { + return + } + if !s.compareAndSwapState(oldState, needCheck) { + goto retry + } + select { + case notifyCheckCh <- struct{}{}: + default: + } + +} diff --git a/store/tikv/region_cache_test.go b/store/tikv/region_cache_test.go new file mode 100644 index 0000000..9f6a957 --- /dev/null +++ b/store/tikv/region_cache_test.go @@ -0,0 +1,1039 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv + +import ( + "context" + "errors" + "fmt" + "math/rand" + "testing" + "time" + + "github.com/google/btree" + "github.com/pingcap-incubator/tinykv/proto/pkg/errorpb" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + . "github.com/pingcap/check" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/store/mockstore/mocktikv" +) + +type testRegionCacheSuite struct { + OneByOneSuite + cluster *mocktikv.Cluster + store1 uint64 // store1 is leader + store2 uint64 // store2 is follower + peer1 uint64 // peer1 is leader + peer2 uint64 // peer2 is follower + region1 uint64 + cache *RegionCache + bo *Backoffer +} + +var _ = Suite(&testRegionCacheSuite{}) + +func (s *testRegionCacheSuite) SetUpTest(c *C) { + s.cluster = mocktikv.NewCluster() + storeIDs, peerIDs, regionID, _ := mocktikv.BootstrapWithMultiStores(s.cluster, 2) + s.region1 = regionID + s.store1 = storeIDs[0] + s.store2 = storeIDs[1] + s.peer1 = peerIDs[0] + s.peer2 = peerIDs[1] + pdCli := &codecPDClient{mocktikv.NewPDClient(s.cluster)} + s.cache = NewRegionCache(pdCli) + s.bo = NewBackoffer(context.Background(), 5000) +} + +func (s *testRegionCacheSuite) TearDownTest(c *C) { + s.cache.Close() +} + +func (s *testRegionCacheSuite) storeAddr(id uint64) string { + return fmt.Sprintf("store%d", id) +} + +func (s *testRegionCacheSuite) checkCache(c *C, len int) { + ts := time.Now().Unix() + c.Assert(validRegions(s.cache.mu.regions, ts), Equals, len) + c.Assert(validRegionsInBtree(s.cache.mu.sorted, ts), Equals, len) +} + +func validRegions(regions map[RegionVerID]*Region, ts int64) (len int) { + for _, region := range regions { + if !region.checkRegionCacheTTL(ts) { + continue + } + len++ + } + return +} + +func validRegionsInBtree(t *btree.BTree, ts int64) (len int) { + t.Descend(func(item btree.Item) bool { + r := item.(*btreeItem).cachedRegion + if !r.checkRegionCacheTTL(ts) { + return true + } + len++ + return true + }) + return +} + +func (s *testRegionCacheSuite) getRegion(c *C, key []byte) *Region { + _, err := s.cache.LocateKey(s.bo, key) + c.Assert(err, IsNil) + r := s.cache.searchCachedRegion(key, false) + c.Assert(r, NotNil) + return r +} + +func (s *testRegionCacheSuite) getRegionWithEndKey(c *C, key []byte) *Region { + _, err := s.cache.LocateEndKey(s.bo, key) + c.Assert(err, IsNil) + r := s.cache.searchCachedRegion(key, true) + c.Assert(r, NotNil) + return r +} + +func (s *testRegionCacheSuite) getAddr(c *C, key []byte, replicaRead kv.ReplicaReadType, seed uint32) string { + loc, err := s.cache.LocateKey(s.bo, key) + c.Assert(err, IsNil) + ctx, err := s.cache.GetTiKVRPCContext(s.bo, loc.Region, replicaRead, seed) + c.Assert(err, IsNil) + if ctx == nil { + return "" + } + return ctx.Addr +} + +func (s *testRegionCacheSuite) TestSimple(c *C) { + seed := rand.Uint32() + r := s.getRegion(c, []byte("a")) + c.Assert(r, NotNil) + c.Assert(r.GetID(), Equals, s.region1) + c.Assert(s.getAddr(c, []byte("a"), kv.ReplicaReadLeader, 0), Equals, s.storeAddr(s.store1)) + c.Assert(s.getAddr(c, []byte("a"), kv.ReplicaReadFollower, seed), Equals, s.storeAddr(s.store2)) + s.checkCache(c, 1) + c.Assert(r.GetMeta(), DeepEquals, r.meta) + c.Assert(r.GetLeaderID(), Equals, r.meta.Peers[r.getStore().workTiKVIdx].Id) + s.cache.mu.regions[r.VerID()].lastAccess = 0 + r = s.cache.searchCachedRegion([]byte("a"), true) + c.Assert(r, IsNil) +} + +func (s *testRegionCacheSuite) TestDropStore(c *C) { + bo := NewBackoffer(context.Background(), 100) + s.cluster.RemoveStore(s.store1) + loc, err := s.cache.LocateKey(bo, []byte("a")) + c.Assert(err, IsNil) + ctx, err := s.cache.GetTiKVRPCContext(bo, loc.Region, kv.ReplicaReadLeader, 0) + c.Assert(err, IsNil) + c.Assert(ctx, IsNil) + ctx, err = s.cache.GetTiKVRPCContext(bo, loc.Region, kv.ReplicaReadFollower, rand.Uint32()) + c.Assert(err, IsNil) + c.Assert(ctx, IsNil) + s.checkCache(c, 0) +} + +func (s *testRegionCacheSuite) TestDropStoreRetry(c *C) { + s.cluster.RemoveStore(s.store1) + done := make(chan struct{}) + go func() { + time.Sleep(time.Millisecond * 10) + s.cluster.AddStore(s.store1, s.storeAddr(s.store1)) + close(done) + }() + loc, err := s.cache.LocateKey(s.bo, []byte("a")) + c.Assert(err, IsNil) + c.Assert(loc.Region.id, Equals, s.region1) + <-done +} + +func (s *testRegionCacheSuite) TestUpdateLeader(c *C) { + seed := rand.Uint32() + loc, err := s.cache.LocateKey(s.bo, []byte("a")) + c.Assert(err, IsNil) + // tikv-server reports `NotLeader` + s.cache.UpdateLeader(loc.Region, s.store2, 0) + + r := s.getRegion(c, []byte("a")) + c.Assert(r, NotNil) + c.Assert(r.GetID(), Equals, s.region1) + c.Assert(s.getAddr(c, []byte("a"), kv.ReplicaReadLeader, 0), Equals, s.storeAddr(s.store2)) + c.Assert(s.getAddr(c, []byte("a"), kv.ReplicaReadFollower, seed), Equals, s.storeAddr(s.store1)) + + r = s.getRegionWithEndKey(c, []byte("z")) + c.Assert(r, NotNil) + c.Assert(r.GetID(), Equals, s.region1) + c.Assert(s.getAddr(c, []byte("z"), kv.ReplicaReadLeader, 0), Equals, s.storeAddr(s.store2)) + c.Assert(s.getAddr(c, []byte("a"), kv.ReplicaReadFollower, seed), Equals, s.storeAddr(s.store1)) +} + +func (s *testRegionCacheSuite) TestUpdateLeader2(c *C) { + seed := rand.Uint32() + loc, err := s.cache.LocateKey(s.bo, []byte("a")) + c.Assert(err, IsNil) + // new store3 becomes leader + store3 := s.cluster.AllocID() + peer3 := s.cluster.AllocID() + s.cluster.AddStore(store3, s.storeAddr(store3)) + s.cluster.AddPeer(s.region1, store3, peer3) + // tikv-server reports `NotLeader` + s.cache.UpdateLeader(loc.Region, store3, 0) + + // Store3 does not exist in cache, causes a reload from PD. + r := s.getRegion(c, []byte("a")) + c.Assert(r, NotNil) + c.Assert(r.GetID(), Equals, s.region1) + c.Assert(s.getAddr(c, []byte("a"), kv.ReplicaReadLeader, 0), Equals, s.storeAddr(s.store1)) + follower := s.getAddr(c, []byte("a"), kv.ReplicaReadFollower, seed) + if seed%2 == 0 { + c.Assert(follower, Equals, s.storeAddr(s.store2)) + } else { + c.Assert(follower, Equals, s.storeAddr(store3)) + } + follower2 := s.getAddr(c, []byte("a"), kv.ReplicaReadFollower, seed+1) + if (seed+1)%2 == 0 { + c.Assert(follower2, Equals, s.storeAddr(s.store2)) + } else { + c.Assert(follower2, Equals, s.storeAddr(store3)) + } + c.Assert(follower, Not(Equals), follower2) + + // tikv-server notifies new leader to pd-server. + s.cluster.ChangeLeader(s.region1, peer3) + // tikv-server reports `NotLeader` again. + s.cache.UpdateLeader(r.VerID(), store3, 0) + r = s.getRegion(c, []byte("a")) + c.Assert(r, NotNil) + c.Assert(r.GetID(), Equals, s.region1) + c.Assert(s.getAddr(c, []byte("a"), kv.ReplicaReadLeader, 0), Equals, s.storeAddr(store3)) + follower = s.getAddr(c, []byte("a"), kv.ReplicaReadFollower, seed) + if seed%2 == 0 { + c.Assert(follower, Equals, s.storeAddr(s.store1)) + } else { + c.Assert(follower, Equals, s.storeAddr(s.store2)) + } + follower2 = s.getAddr(c, []byte("a"), kv.ReplicaReadFollower, seed+1) + if (seed+1)%2 == 0 { + c.Assert(follower2, Equals, s.storeAddr(s.store1)) + } else { + c.Assert(follower2, Equals, s.storeAddr(s.store2)) + } + c.Assert(follower, Not(Equals), follower2) +} + +func (s *testRegionCacheSuite) TestUpdateLeader3(c *C) { + seed := rand.Uint32() + loc, err := s.cache.LocateKey(s.bo, []byte("a")) + c.Assert(err, IsNil) + // store2 becomes leader + s.cluster.ChangeLeader(s.region1, s.peer2) + // store2 gone, store3 becomes leader + s.cluster.RemoveStore(s.store2) + store3 := s.cluster.AllocID() + peer3 := s.cluster.AllocID() + s.cluster.AddStore(store3, s.storeAddr(store3)) + s.cluster.AddPeer(s.region1, store3, peer3) + // tikv-server notifies new leader to pd-server. + s.cluster.ChangeLeader(s.region1, peer3) + // tikv-server reports `NotLeader`(store2 is the leader) + s.cache.UpdateLeader(loc.Region, s.store2, 0) + + // Store2 does not exist any more, causes a reload from PD. + r := s.getRegion(c, []byte("a")) + c.Assert(err, IsNil) + c.Assert(r, NotNil) + c.Assert(r.GetID(), Equals, s.region1) + addr := s.getAddr(c, []byte("a"), kv.ReplicaReadLeader, 0) + c.Assert(addr, Equals, "") + s.getRegion(c, []byte("a")) + // pd-server should return the new leader. + c.Assert(s.getAddr(c, []byte("a"), kv.ReplicaReadLeader, 0), Equals, s.storeAddr(store3)) + addr = s.getAddr(c, []byte("a"), kv.ReplicaReadFollower, seed) + c.Assert(addr == s.storeAddr(s.store1) || len(addr) == 0, IsTrue) + addr2 := s.getAddr(c, []byte("a"), kv.ReplicaReadFollower, seed+1) + c.Assert(addr2 == s.storeAddr(s.store1) || len(addr2) == 0, IsTrue) + c.Assert((len(addr2) == 0 && len(addr) == 0) || addr != addr2, IsTrue) +} + +func (s *testRegionCacheSuite) TestSendFailedButLeaderNotChange(c *C) { + // 3 nodes and no.1 is leader. + store3 := s.cluster.AllocID() + peer3 := s.cluster.AllocID() + s.cluster.AddStore(store3, s.storeAddr(store3)) + s.cluster.AddPeer(s.region1, store3, peer3) + s.cluster.ChangeLeader(s.region1, s.peer1) + + loc, err := s.cache.LocateKey(s.bo, []byte("a")) + c.Assert(err, IsNil) + ctx, err := s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadLeader, 0) + c.Assert(err, IsNil) + c.Assert(ctx.Peer.Id, Equals, s.peer1) + c.Assert(len(ctx.Meta.Peers), Equals, 3) + + // verify follower to be one of store2 and store3 + seed := rand.Uint32() + ctxFollower1, err := s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadFollower, seed) + c.Assert(err, IsNil) + if seed%2 == 0 { + c.Assert(ctxFollower1.Peer.Id, Equals, s.peer2) + } else { + c.Assert(ctxFollower1.Peer.Id, Equals, peer3) + } + ctxFollower2, err := s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadFollower, seed) + c.Assert(err, IsNil) + if seed%2 == 0 { + c.Assert(ctxFollower2.Peer.Id, Equals, s.peer2) + } else { + c.Assert(ctxFollower2.Peer.Id, Equals, peer3) + } + c.Assert(ctxFollower1.Peer.Id, Equals, ctxFollower2.Peer.Id) + + // send fail leader switch to 2 + s.cache.OnSendFail(s.bo, ctx, false, nil) + ctx, err = s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadLeader, 0) + c.Assert(err, IsNil) + c.Assert(ctx.Peer.Id, Equals, s.peer2) + + // verify follower to be one of store1 and store3 + ctxFollower1, err = s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadFollower, seed) + c.Assert(err, IsNil) + if seed%2 == 0 { + c.Assert(ctxFollower1.Peer.Id, Equals, s.peer1) + } else { + c.Assert(ctxFollower1.Peer.Id, Equals, peer3) + } + ctxFollower2, err = s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadFollower, seed+1) + c.Assert(err, IsNil) + if (seed+1)%2 == 0 { + c.Assert(ctxFollower2.Peer.Id, Equals, s.peer1) + } else { + c.Assert(ctxFollower2.Peer.Id, Equals, peer3) + } + c.Assert(ctxFollower1.Peer.Id, Not(Equals), ctxFollower2.Peer.Id) + + // access 1 it will return NotLeader, leader back to 2 again + s.cache.UpdateLeader(loc.Region, s.store2, ctx.PeerIdx) + ctx, err = s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadLeader, 0) + c.Assert(err, IsNil) + c.Assert(ctx.Peer.Id, Equals, s.peer2) + + // verify follower to be one of store1 and store3 + ctxFollower1, err = s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadFollower, seed) + c.Assert(err, IsNil) + if seed%2 == 0 { + c.Assert(ctxFollower1.Peer.Id, Equals, s.peer1) + } else { + c.Assert(ctxFollower1.Peer.Id, Equals, peer3) + } + ctxFollower2, err = s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadFollower, seed+1) + c.Assert(err, IsNil) + if (seed+1)%2 == 0 { + c.Assert(ctxFollower2.Peer.Id, Equals, s.peer1) + } else { + c.Assert(ctxFollower2.Peer.Id, Equals, peer3) + } + c.Assert(ctxFollower1.Peer.Id, Not(Equals), ctxFollower2.Peer.Id) +} + +func (s *testRegionCacheSuite) TestSendFailedInHibernateRegion(c *C) { + // 3 nodes and no.1 is leader. + store3 := s.cluster.AllocID() + peer3 := s.cluster.AllocID() + s.cluster.AddStore(store3, s.storeAddr(store3)) + s.cluster.AddPeer(s.region1, store3, peer3) + s.cluster.ChangeLeader(s.region1, s.peer1) + + loc, err := s.cache.LocateKey(s.bo, []byte("a")) + c.Assert(err, IsNil) + ctx, err := s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadLeader, 0) + c.Assert(err, IsNil) + c.Assert(ctx.Peer.Id, Equals, s.peer1) + c.Assert(len(ctx.Meta.Peers), Equals, 3) + + // verify follower to be one of store2 and store3 + seed := rand.Uint32() + ctxFollower1, err := s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadFollower, seed) + c.Assert(err, IsNil) + if seed%2 == 0 { + c.Assert(ctxFollower1.Peer.Id, Equals, s.peer2) + } else { + c.Assert(ctxFollower1.Peer.Id, Equals, peer3) + } + ctxFollower2, err := s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadFollower, seed) + c.Assert(err, IsNil) + if seed%2 == 0 { + c.Assert(ctxFollower2.Peer.Id, Equals, s.peer2) + } else { + c.Assert(ctxFollower2.Peer.Id, Equals, peer3) + } + c.Assert(ctxFollower1.Peer.Id, Equals, ctxFollower2.Peer.Id) + + // send fail leader switch to 2 + s.cache.OnSendFail(s.bo, ctx, false, nil) + ctx, err = s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadLeader, 0) + c.Assert(err, IsNil) + c.Assert(ctx.Peer.Id, Equals, s.peer2) + + // verify follower to be one of store1 and store3 + ctxFollower1, err = s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadFollower, seed) + c.Assert(err, IsNil) + if seed%2 == 0 { + c.Assert(ctxFollower1.Peer.Id, Equals, s.peer1) + } else { + c.Assert(ctxFollower1.Peer.Id, Equals, peer3) + } + c.Assert(ctxFollower1.Peer.Id == s.peer1 || ctxFollower1.Peer.Id == peer3, IsTrue) + ctxFollower2, err = s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadFollower, seed+1) + c.Assert(err, IsNil) + if (seed+1)%2 == 0 { + c.Assert(ctxFollower2.Peer.Id, Equals, s.peer1) + } else { + c.Assert(ctxFollower2.Peer.Id, Equals, peer3) + } + c.Assert(ctxFollower1.Peer.Id, Not(Equals), ctxFollower2.Peer.Id) + + // access 2, it's in hibernate and return 0 leader, so switch to 3 + s.cache.UpdateLeader(loc.Region, 0, ctx.PeerIdx) + ctx, err = s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadLeader, 0) + c.Assert(err, IsNil) + c.Assert(ctx.Peer.Id, Equals, peer3) + + // verify follower to be one of store1 and store2 + ctxFollower1, err = s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadFollower, seed) + c.Assert(err, IsNil) + if seed%2 == 0 { + c.Assert(ctxFollower1.Peer.Id, Equals, s.peer1) + } else { + c.Assert(ctxFollower1.Peer.Id, Equals, s.peer2) + } + ctxFollower2, err = s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadFollower, seed) + c.Assert(err, IsNil) + if seed%2 == 0 { + c.Assert(ctxFollower2.Peer.Id, Equals, s.peer1) + } else { + c.Assert(ctxFollower2.Peer.Id, Equals, s.peer2) + } + c.Assert(ctxFollower1.Peer.Id, Equals, ctxFollower2.Peer.Id) + + // again peer back to 1 + ctx, err = s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadLeader, 0) + c.Assert(err, IsNil) + s.cache.UpdateLeader(loc.Region, 0, ctx.PeerIdx) + ctx, err = s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadLeader, 0) + c.Assert(err, IsNil) + c.Assert(ctx.Peer.Id, Equals, s.peer1) + + // verify follower to be one of store2 and store3 + ctxFollower1, err = s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadFollower, seed) + c.Assert(err, IsNil) + if seed%2 == 0 { + c.Assert(ctxFollower1.Peer.Id, Equals, s.peer2) + } else { + c.Assert(ctxFollower1.Peer.Id, Equals, peer3) + } + ctxFollower2, err = s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadFollower, seed+1) + c.Assert(err, IsNil) + if (seed+1)%2 == 0 { + c.Assert(ctxFollower2.Peer.Id, Equals, s.peer2) + } else { + c.Assert(ctxFollower2.Peer.Id, Equals, peer3) + } + c.Assert(ctxFollower1.Peer.Id, Not(Equals), ctxFollower2.Peer.Id) +} + +func (s *testRegionCacheSuite) TestSendFailInvalidateRegionsInSameStore(c *C) { + // key range: ['' - 'm' - 'z'] + region2 := s.cluster.AllocID() + newPeers := s.cluster.AllocIDs(2) + s.cluster.Split(s.region1, region2, []byte("m"), newPeers, newPeers[0]) + + // Check the two regions. + loc1, err := s.cache.LocateKey(s.bo, []byte("a")) + c.Assert(err, IsNil) + c.Assert(loc1.Region.id, Equals, s.region1) + loc2, err := s.cache.LocateKey(s.bo, []byte("x")) + c.Assert(err, IsNil) + c.Assert(loc2.Region.id, Equals, region2) + + // Send fail on region1 + ctx, _ := s.cache.GetTiKVRPCContext(s.bo, loc1.Region, kv.ReplicaReadLeader, 0) + s.checkCache(c, 2) + s.cache.OnSendFail(s.bo, ctx, false, errors.New("test error")) + + // Get region2 cache will get nil then reload. + ctx2, err := s.cache.GetTiKVRPCContext(s.bo, loc2.Region, kv.ReplicaReadLeader, 0) + c.Assert(ctx2, IsNil) + c.Assert(err, IsNil) +} + +func (s *testRegionCacheSuite) TestSendFailedInMultipleNode(c *C) { + // 3 nodes and no.1 is leader. + store3 := s.cluster.AllocID() + peer3 := s.cluster.AllocID() + s.cluster.AddStore(store3, s.storeAddr(store3)) + s.cluster.AddPeer(s.region1, store3, peer3) + s.cluster.ChangeLeader(s.region1, s.peer1) + + loc, err := s.cache.LocateKey(s.bo, []byte("a")) + c.Assert(err, IsNil) + ctx, err := s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadLeader, 0) + c.Assert(err, IsNil) + c.Assert(ctx.Peer.Id, Equals, s.peer1) + c.Assert(len(ctx.Meta.Peers), Equals, 3) + + // verify follower to be one of store2 and store3 + seed := rand.Uint32() + ctxFollower1, err := s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadFollower, seed) + c.Assert(err, IsNil) + if seed%2 == 0 { + c.Assert(ctxFollower1.Peer.Id, Equals, s.peer2) + } else { + c.Assert(ctxFollower1.Peer.Id, Equals, peer3) + } + ctxFollower2, err := s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadFollower, seed) + c.Assert(err, IsNil) + if seed%2 == 0 { + c.Assert(ctxFollower2.Peer.Id, Equals, s.peer2) + } else { + c.Assert(ctxFollower2.Peer.Id, Equals, peer3) + } + c.Assert(ctxFollower1.Peer.Id, Equals, ctxFollower2.Peer.Id) + + // send fail leader switch to 2 + s.cache.OnSendFail(s.bo, ctx, false, nil) + ctx, err = s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadLeader, 0) + c.Assert(err, IsNil) + c.Assert(ctx.Peer.Id, Equals, s.peer2) + + // verify follower to be one of store1 and store3 + ctxFollower1, err = s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadFollower, seed) + c.Assert(err, IsNil) + if seed%2 == 0 { + c.Assert(ctxFollower1.Peer.Id, Equals, s.peer1) + } else { + c.Assert(ctxFollower1.Peer.Id, Equals, peer3) + } + ctxFollower2, err = s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadFollower, seed+1) + c.Assert(err, IsNil) + if (seed+1)%2 == 0 { + c.Assert(ctxFollower2.Peer.Id, Equals, s.peer1) + } else { + c.Assert(ctxFollower2.Peer.Id, Equals, peer3) + } + c.Assert(ctxFollower1.Peer.Id, Not(Equals), ctxFollower2.Peer.Id) + + // send 2 fail leader switch to 3 + s.cache.OnSendFail(s.bo, ctx, false, nil) + ctx, err = s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadLeader, 0) + c.Assert(err, IsNil) + c.Assert(ctx.Peer.Id, Equals, peer3) + + // verify follower to be one of store1 and store2 + ctxFollower1, err = s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadFollower, seed) + c.Assert(err, IsNil) + if seed%2 == 0 { + c.Assert(ctxFollower1.Peer.Id, Equals, s.peer1) + } else { + c.Assert(ctxFollower1.Peer.Id, Equals, s.peer2) + } + c.Assert(ctxFollower1.Peer.Id == s.peer1 || ctxFollower1.Peer.Id == s.peer2, IsTrue) + ctxFollower2, err = s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadFollower, seed) + c.Assert(err, IsNil) + if seed%2 == 0 { + c.Assert(ctxFollower2.Peer.Id, Equals, s.peer1) + } else { + c.Assert(ctxFollower2.Peer.Id, Equals, s.peer2) + } + c.Assert(ctxFollower1.Peer.Id, Equals, ctxFollower2.Peer.Id) + + // 3 can be access, so switch to 1 + s.cache.UpdateLeader(loc.Region, s.store1, ctx.PeerIdx) + ctx, err = s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadLeader, 0) + c.Assert(err, IsNil) + c.Assert(ctx.Peer.Id, Equals, s.peer1) + + // verify follower to be one of store2 and store3 + ctxFollower1, err = s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadFollower, seed) + c.Assert(err, IsNil) + if seed%2 == 0 { + c.Assert(ctxFollower1.Peer.Id, Equals, s.peer2) + } else { + c.Assert(ctxFollower1.Peer.Id, Equals, peer3) + } + ctxFollower2, err = s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadFollower, seed+1) + c.Assert(err, IsNil) + if (seed+1)%2 == 0 { + c.Assert(ctxFollower2.Peer.Id, Equals, s.peer2) + } else { + c.Assert(ctxFollower2.Peer.Id, Equals, peer3) + } + c.Assert(ctxFollower1.Peer.Id, Not(Equals), ctxFollower2.Peer.Id) +} + +func (s *testRegionCacheSuite) TestSplit(c *C) { + seed := rand.Uint32() + r := s.getRegion(c, []byte("x")) + c.Assert(r.GetID(), Equals, s.region1) + c.Assert(s.getAddr(c, []byte("x"), kv.ReplicaReadLeader, 0), Equals, s.storeAddr(s.store1)) + c.Assert(s.getAddr(c, []byte("x"), kv.ReplicaReadFollower, seed), Equals, s.storeAddr(s.store2)) + + // split to ['' - 'm' - 'z'] + region2 := s.cluster.AllocID() + newPeers := s.cluster.AllocIDs(2) + s.cluster.Split(s.region1, region2, []byte("m"), newPeers, newPeers[0]) + + // tikv-server reports `NotInRegion` + s.cache.InvalidateCachedRegion(r.VerID()) + s.checkCache(c, 0) + + r = s.getRegion(c, []byte("x")) + c.Assert(r.GetID(), Equals, region2) + c.Assert(s.getAddr(c, []byte("x"), kv.ReplicaReadLeader, 0), Equals, s.storeAddr(s.store1)) + c.Assert(s.getAddr(c, []byte("x"), kv.ReplicaReadFollower, seed), Equals, s.storeAddr(s.store2)) + s.checkCache(c, 1) + + r = s.getRegionWithEndKey(c, []byte("m")) + c.Assert(r.GetID(), Equals, s.region1) + s.checkCache(c, 2) +} + +func (s *testRegionCacheSuite) TestMerge(c *C) { + // key range: ['' - 'm' - 'z'] + region2 := s.cluster.AllocID() + newPeers := s.cluster.AllocIDs(2) + s.cluster.Split(s.region1, region2, []byte("m"), newPeers, newPeers[0]) + + loc, err := s.cache.LocateKey(s.bo, []byte("x")) + c.Assert(err, IsNil) + c.Assert(loc.Region.id, Equals, region2) + + // merge to single region + s.cluster.Merge(s.region1, region2) + + // tikv-server reports `NotInRegion` + s.cache.InvalidateCachedRegion(loc.Region) + s.checkCache(c, 0) + + loc, err = s.cache.LocateKey(s.bo, []byte("x")) + c.Assert(err, IsNil) + c.Assert(loc.Region.id, Equals, s.region1) + s.checkCache(c, 1) +} + +func (s *testRegionCacheSuite) TestReconnect(c *C) { + seed := rand.Uint32() + loc, err := s.cache.LocateKey(s.bo, []byte("a")) + c.Assert(err, IsNil) + + // connect tikv-server failed, cause drop cache + s.cache.InvalidateCachedRegion(loc.Region) + + r := s.getRegion(c, []byte("a")) + c.Assert(r, NotNil) + c.Assert(r.GetID(), Equals, s.region1) + c.Assert(s.getAddr(c, []byte("a"), kv.ReplicaReadLeader, 0), Equals, s.storeAddr(s.store1)) + c.Assert(s.getAddr(c, []byte("x"), kv.ReplicaReadFollower, seed), Equals, s.storeAddr(s.store2)) + s.checkCache(c, 1) +} + +func (s *testRegionCacheSuite) TestRegionEpochAheadOfTiKV(c *C) { + // Create a separated region cache to do this test. + pdCli := &codecPDClient{mocktikv.NewPDClient(s.cluster)} + cache := NewRegionCache(pdCli) + defer cache.Close() + + region := createSampleRegion([]byte("k1"), []byte("k2")) + region.meta.Id = 1 + region.meta.RegionEpoch = &metapb.RegionEpoch{Version: 10, ConfVer: 10} + cache.insertRegionToCache(region) + + r1 := metapb.Region{Id: 1, RegionEpoch: &metapb.RegionEpoch{Version: 9, ConfVer: 10}} + r2 := metapb.Region{Id: 1, RegionEpoch: &metapb.RegionEpoch{Version: 10, ConfVer: 9}} + + bo := NewBackoffer(context.Background(), 2000000) + + err := cache.OnRegionEpochNotMatch(bo, &RPCContext{Region: region.VerID()}, []*metapb.Region{&r1}) + c.Assert(err, IsNil) + err = cache.OnRegionEpochNotMatch(bo, &RPCContext{Region: region.VerID()}, []*metapb.Region{&r2}) + c.Assert(err, IsNil) + c.Assert(len(bo.errors), Equals, 2) +} + +const regionSplitKeyFormat = "t%08d" + +func createClusterWithStoresAndRegions(regionCnt, storeCount int) *mocktikv.Cluster { + cluster := mocktikv.NewCluster() + _, _, regionID, _ := mocktikv.BootstrapWithMultiStores(cluster, storeCount) + for i := 0; i < regionCnt; i++ { + rawKey := []byte(fmt.Sprintf(regionSplitKeyFormat, i)) + ids := cluster.AllocIDs(4) + // Make leaders equally distributed on the 3 stores. + storeID := ids[0] + peerIDs := ids[1:] + leaderPeerID := peerIDs[i%3] + cluster.SplitRaw(regionID, storeID, rawKey, peerIDs, leaderPeerID) + regionID = ids[0] + } + return cluster +} + +func loadRegionsToCache(cache *RegionCache, regionCnt int) { + for i := 0; i < regionCnt; i++ { + rawKey := []byte(fmt.Sprintf(regionSplitKeyFormat, i)) + cache.LocateKey(NewBackoffer(context.Background(), 1), rawKey) + } +} + +func (s *testRegionCacheSuite) TestUpdateStoreAddr(c *C) { + mvccStore := mocktikv.MustNewMVCCStore() + defer mvccStore.Close() + + client := &RawKVClient{ + clusterID: 0, + regionCache: NewRegionCache(mocktikv.NewPDClient(s.cluster)), + rpcClient: mocktikv.NewRPCClient(s.cluster, mvccStore), + } + defer client.Close() + testKey := []byte("test_key") + testValue := []byte("test_value") + err := client.Put(testKey, testValue) + c.Assert(err, IsNil) + // tikv-server reports `StoreNotMatch` And retry + store1Addr := s.storeAddr(s.store1) + s.cluster.UpdateStoreAddr(s.store1, s.storeAddr(s.store2)) + s.cluster.UpdateStoreAddr(s.store2, store1Addr) + + getVal, err := client.Get(testKey) + + c.Assert(err, IsNil) + c.Assert(getVal, BytesEquals, testValue) +} + +func (s *testRegionCacheSuite) TestReplaceAddrWithNewStore(c *C) { + mvccStore := mocktikv.MustNewMVCCStore() + defer mvccStore.Close() + + client := &RawKVClient{ + clusterID: 0, + regionCache: NewRegionCache(mocktikv.NewPDClient(s.cluster)), + rpcClient: mocktikv.NewRPCClient(s.cluster, mvccStore), + } + defer client.Close() + testKey := []byte("test_key") + testValue := []byte("test_value") + err := client.Put(testKey, testValue) + c.Assert(err, IsNil) + + // make store2 using store1's addr and store1 offline + store1Addr := s.storeAddr(s.store1) + s.cluster.UpdateStoreAddr(s.store1, s.storeAddr(s.store2)) + s.cluster.UpdateStoreAddr(s.store2, store1Addr) + s.cluster.RemoveStore(s.store1) + s.cluster.ChangeLeader(s.region1, s.peer2) + s.cluster.RemovePeer(s.region1, s.peer1) + + getVal, err := client.Get(testKey) + + c.Assert(err, IsNil) + c.Assert(getVal, BytesEquals, testValue) +} + +func (s *testRegionCacheSuite) TestReplaceNewAddrAndOldOfflineImmediately(c *C) { + mvccStore := mocktikv.MustNewMVCCStore() + defer mvccStore.Close() + + client := &RawKVClient{ + clusterID: 0, + regionCache: NewRegionCache(mocktikv.NewPDClient(s.cluster)), + rpcClient: mocktikv.NewRPCClient(s.cluster, mvccStore), + } + defer client.Close() + testKey := []byte("test_key") + testValue := []byte("test_value") + err := client.Put(testKey, testValue) + c.Assert(err, IsNil) + + // pre-load store2's address into cache via follower-read. + loc, err := client.regionCache.LocateKey(s.bo, testKey) + c.Assert(err, IsNil) + fctx, err := client.regionCache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadFollower, 0) + c.Assert(err, IsNil) + c.Assert(fctx.Store.storeID, Equals, s.store2) + c.Assert(fctx.Addr, Equals, "store2") + + // make store2 using store1's addr and store1 offline + store1Addr := s.storeAddr(s.store1) + s.cluster.UpdateStoreAddr(s.store1, s.storeAddr(s.store2)) + s.cluster.UpdateStoreAddr(s.store2, store1Addr) + s.cluster.RemoveStore(s.store1) + s.cluster.ChangeLeader(s.region1, s.peer2) + s.cluster.RemovePeer(s.region1, s.peer1) + + getVal, err := client.Get(testKey) + c.Assert(err, IsNil) + c.Assert(getVal, BytesEquals, testValue) +} + +func (s *testRegionCacheSuite) TestListRegionIDsInCache(c *C) { + // ['' - 'm' - 'z'] + region2 := s.cluster.AllocID() + newPeers := s.cluster.AllocIDs(2) + s.cluster.Split(s.region1, region2, []byte("m"), newPeers, newPeers[0]) + + regionIDs, err := s.cache.ListRegionIDsInKeyRange(s.bo, []byte("a"), []byte("z")) + c.Assert(err, IsNil) + c.Assert(regionIDs, DeepEquals, []uint64{s.region1, region2}) + regionIDs, err = s.cache.ListRegionIDsInKeyRange(s.bo, []byte("m"), []byte("z")) + c.Assert(err, IsNil) + c.Assert(regionIDs, DeepEquals, []uint64{region2}) + + regionIDs, err = s.cache.ListRegionIDsInKeyRange(s.bo, []byte("a"), []byte("m")) + c.Assert(err, IsNil) + c.Assert(regionIDs, DeepEquals, []uint64{s.region1, region2}) +} + +func (s *testRegionCacheSuite) TestScanRegions(c *C) { + // Split at "a", "b", "c", "d" + regions := s.cluster.AllocIDs(4) + regions = append([]uint64{s.region1}, regions...) + + peers := [][]uint64{{s.peer1, s.peer2}} + for i := 0; i < 4; i++ { + peers = append(peers, s.cluster.AllocIDs(2)) + } + + for i := 0; i < 4; i++ { + s.cluster.Split(regions[i], regions[i+1], []byte{'a' + byte(i)}, peers[i+1], peers[i+1][0]) + } + + scannedRegions, err := s.cache.scanRegions(s.bo, []byte(""), 100) + c.Assert(err, IsNil) + c.Assert(len(scannedRegions), Equals, 5) + for i := 0; i < 5; i++ { + r := scannedRegions[i] + _, p, _ := r.WorkStorePeer(r.getStore()) + + c.Assert(r.meta.Id, Equals, regions[i]) + c.Assert(p.Id, Equals, peers[i][0]) + } + + scannedRegions, err = s.cache.scanRegions(s.bo, []byte("a"), 3) + c.Assert(err, IsNil) + c.Assert(len(scannedRegions), Equals, 3) + for i := 1; i < 4; i++ { + r := scannedRegions[i-1] + _, p, _ := r.WorkStorePeer(r.getStore()) + + c.Assert(r.meta.Id, Equals, regions[i]) + c.Assert(p.Id, Equals, peers[i][0]) + } + + scannedRegions, err = s.cache.scanRegions(s.bo, []byte("a1"), 1) + c.Assert(err, IsNil) + c.Assert(len(scannedRegions), Equals, 1) + + r0 := scannedRegions[0] + _, p0, _ := r0.WorkStorePeer(r0.getStore()) + c.Assert(r0.meta.Id, Equals, regions[1]) + c.Assert(p0.Id, Equals, peers[1][0]) + + // Test region with no leader + s.cluster.GiveUpLeader(regions[1]) + s.cluster.GiveUpLeader(regions[3]) + scannedRegions, err = s.cache.scanRegions(s.bo, []byte(""), 5) + c.Assert(err, IsNil) + for i := 0; i < 3; i++ { + r := scannedRegions[i] + _, p, _ := r.WorkStorePeer(r.getStore()) + + c.Assert(r.meta.Id, Equals, regions[i*2]) + c.Assert(p.Id, Equals, peers[i*2][0]) + } +} + +func (s *testRegionCacheSuite) TestBatchLoadRegions(c *C) { + // Split at "a", "b", "c", "d" + regions := s.cluster.AllocIDs(4) + regions = append([]uint64{s.region1}, regions...) + + peers := [][]uint64{{s.peer1, s.peer2}} + for i := 0; i < 4; i++ { + peers = append(peers, s.cluster.AllocIDs(2)) + } + + for i := 0; i < 4; i++ { + s.cluster.Split(regions[i], regions[i+1], []byte{'a' + byte(i)}, peers[i+1], peers[i+1][0]) + } + + key, err := s.cache.BatchLoadRegionsFromKey(s.bo, []byte(""), 1) + c.Assert(err, IsNil) + c.Assert(key, DeepEquals, []byte("a")) + + key, err = s.cache.BatchLoadRegionsFromKey(s.bo, []byte("a"), 2) + c.Assert(err, IsNil) + c.Assert(key, DeepEquals, []byte("c")) + + key, err = s.cache.BatchLoadRegionsFromKey(s.bo, []byte("a1"), 2) + c.Assert(err, IsNil) + c.Assert(key, DeepEquals, []byte("c")) + + key, err = s.cache.BatchLoadRegionsFromKey(s.bo, []byte("c"), 2) + c.Assert(err, IsNil) + c.Assert(len(key), Equals, 0) + + key, err = s.cache.BatchLoadRegionsFromKey(s.bo, []byte("d"), 2) + c.Assert(err, IsNil) + c.Assert(len(key), Equals, 0) + + s.checkCache(c, len(regions)) +} + +func (s *testRegionCacheSuite) TestFollowerReadFallback(c *C) { + // 3 nodes and no.1 is leader. + store3 := s.cluster.AllocID() + peer3 := s.cluster.AllocID() + s.cluster.AddStore(store3, s.storeAddr(store3)) + s.cluster.AddPeer(s.region1, store3, peer3) + s.cluster.ChangeLeader(s.region1, s.peer1) + + loc, err := s.cache.LocateKey(s.bo, []byte("a")) + c.Assert(err, IsNil) + ctx, err := s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadLeader, 0) + c.Assert(err, IsNil) + c.Assert(ctx.Peer.Id, Equals, s.peer1) + c.Assert(len(ctx.Meta.Peers), Equals, 3) + + // verify follower to be store2 and store3 + ctxFollower1, err := s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadFollower, 0) + c.Assert(err, IsNil) + c.Assert(ctxFollower1.Peer.Id, Equals, s.peer2) + ctxFollower2, err := s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadFollower, 1) + c.Assert(err, IsNil) + c.Assert(ctxFollower2.Peer.Id, Equals, peer3) + c.Assert(ctxFollower1.Peer.Id, Not(Equals), ctxFollower2.Peer.Id) + + // send fail on store2, next follower read is going to fallback to store3 + s.cache.OnSendFail(s.bo, ctxFollower1, false, errors.New("test error")) + ctx, err = s.cache.GetTiKVRPCContext(s.bo, loc.Region, kv.ReplicaReadFollower, 0) + c.Assert(err, IsNil) + c.Assert(ctx.Peer.Id, Equals, peer3) +} + +func (s *testRegionCacheSuite) TestFollowerMeetEpochNotMatch(c *C) { + // 3 nodes and no.1 is region1 leader. + store3 := s.cluster.AllocID() + peer3 := s.cluster.AllocID() + s.cluster.AddStore(store3, s.storeAddr(store3)) + s.cluster.AddPeer(s.region1, store3, peer3) + s.cluster.ChangeLeader(s.region1, s.peer1) + + // Check the two regions. + loc1, err := s.cache.LocateKey(s.bo, []byte("a")) + c.Assert(err, IsNil) + c.Assert(loc1.Region.id, Equals, s.region1) + + reqSend := NewRegionRequestSender(s.cache, nil) + + // follower read failed on store2 + followReqSeed := uint32(0) + ctxFollower1, err := s.cache.GetTiKVRPCContext(s.bo, loc1.Region, kv.ReplicaReadFollower, followReqSeed) + c.Assert(err, IsNil) + c.Assert(ctxFollower1.Peer.Id, Equals, s.peer2) + c.Assert(ctxFollower1.Store.storeID, Equals, s.store2) + + regionErr := &errorpb.Error{EpochNotMatch: &errorpb.EpochNotMatch{}} + reqSend.onRegionError(s.bo, ctxFollower1, &followReqSeed, regionErr) + c.Assert(followReqSeed, Equals, uint32(1)) +} + +func createSampleRegion(startKey, endKey []byte) *Region { + return &Region{ + meta: &metapb.Region{ + StartKey: startKey, + EndKey: endKey, + }, + } +} + +func (s *testRegionCacheSuite) TestContains(c *C) { + c.Assert(createSampleRegion(nil, nil).Contains([]byte{}), IsTrue) + c.Assert(createSampleRegion(nil, nil).Contains([]byte{10}), IsTrue) + c.Assert(createSampleRegion([]byte{10}, nil).Contains([]byte{}), IsFalse) + c.Assert(createSampleRegion([]byte{10}, nil).Contains([]byte{9}), IsFalse) + c.Assert(createSampleRegion([]byte{10}, nil).Contains([]byte{10}), IsTrue) + c.Assert(createSampleRegion(nil, []byte{10}).Contains([]byte{}), IsTrue) + c.Assert(createSampleRegion(nil, []byte{10}).Contains([]byte{9}), IsTrue) + c.Assert(createSampleRegion(nil, []byte{10}).Contains([]byte{10}), IsFalse) + c.Assert(createSampleRegion([]byte{10}, []byte{20}).Contains([]byte{}), IsFalse) + c.Assert(createSampleRegion([]byte{10}, []byte{20}).Contains([]byte{15}), IsTrue) + c.Assert(createSampleRegion([]byte{10}, []byte{20}).Contains([]byte{30}), IsFalse) +} + +func (s *testRegionCacheSuite) TestContainsByEnd(c *C) { + c.Assert(createSampleRegion(nil, nil).ContainsByEnd([]byte{}), IsFalse) + c.Assert(createSampleRegion(nil, nil).ContainsByEnd([]byte{10}), IsTrue) + c.Assert(createSampleRegion([]byte{10}, nil).ContainsByEnd([]byte{}), IsFalse) + c.Assert(createSampleRegion([]byte{10}, nil).ContainsByEnd([]byte{10}), IsFalse) + c.Assert(createSampleRegion([]byte{10}, nil).ContainsByEnd([]byte{11}), IsTrue) + c.Assert(createSampleRegion(nil, []byte{10}).ContainsByEnd([]byte{}), IsFalse) + c.Assert(createSampleRegion(nil, []byte{10}).ContainsByEnd([]byte{10}), IsTrue) + c.Assert(createSampleRegion(nil, []byte{10}).ContainsByEnd([]byte{11}), IsFalse) + c.Assert(createSampleRegion([]byte{10}, []byte{20}).ContainsByEnd([]byte{}), IsFalse) + c.Assert(createSampleRegion([]byte{10}, []byte{20}).ContainsByEnd([]byte{15}), IsTrue) + c.Assert(createSampleRegion([]byte{10}, []byte{20}).ContainsByEnd([]byte{30}), IsFalse) +} + +func BenchmarkOnRequestFail(b *testing.B) { + /* + This benchmark simulate many concurrent requests call OnSendRequestFail method + after failed on a store, validate that on this scene, requests don't get blocked on the + RegionCache lock. + */ + regionCnt, storeCount := 998, 3 + cluster := createClusterWithStoresAndRegions(regionCnt, storeCount) + cache := NewRegionCache(mocktikv.NewPDClient(cluster)) + defer cache.Close() + loadRegionsToCache(cache, regionCnt) + bo := NewBackoffer(context.Background(), 1) + loc, err := cache.LocateKey(bo, []byte{}) + if err != nil { + b.Fatal(err) + } + region := cache.getRegionByIDFromCache(loc.Region.id) + b.ResetTimer() + regionStore := region.getStore() + store, peer, idx := region.WorkStorePeer(regionStore) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + rpcCtx := &RPCContext{ + Region: loc.Region, + Meta: region.meta, + PeerIdx: idx, + Peer: peer, + Store: store, + } + r := cache.getCachedRegionWithRLock(rpcCtx.Region) + if r == nil { + cache.switchNextPeer(r, rpcCtx.PeerIdx, nil) + } + } + }) + if len(cache.mu.regions) != regionCnt*2/3 { + b.Fatal(len(cache.mu.regions)) + } +} diff --git a/store/tikv/region_request.go b/store/tikv/region_request.go new file mode 100644 index 0000000..a48434e --- /dev/null +++ b/store/tikv/region_request.go @@ -0,0 +1,253 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv + +import ( + "context" + "sync/atomic" + "time" + + "github.com/pingcap-incubator/tinykv/proto/pkg/errorpb" + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/store/tikv/tikvrpc" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// ShuttingDown is a flag to indicate tidb-server is exiting (Ctrl+C signal +// receved for example). If this flag is set, tikv client should not retry on +// network error because tidb-server expect tikv client to exit as soon as possible. +var ShuttingDown uint32 + +// RegionRequestSender sends KV/Cop requests to tikv server. It handles network +// errors and some region errors internally. +// +// Typically, a KV/Cop request is bind to a region, all keys that are involved +// in the request should be located in the region. +// The sending process begins with looking for the address of leader store's +// address of the target region from cache, and the request is then sent to the +// destination tikv server over TCP connection. +// If region is updated, can be caused by leader transfer, region split, region +// merge, or region balance, tikv server may not able to process request and +// send back a RegionError. +// RegionRequestSender takes care of errors that does not relevant to region +// range, such as 'I/O timeout', 'NotLeader', and 'ServerIsBusy'. For other +// errors, since region range have changed, the request may need to split, so we +// simply return the error to caller. +type RegionRequestSender struct { + regionCache *RegionCache + client Client + storeAddr string + rpcError error + failStoreIDs map[uint64]struct{} +} + +// NewRegionRequestSender creates a new sender. +func NewRegionRequestSender(regionCache *RegionCache, client Client) *RegionRequestSender { + return &RegionRequestSender{ + regionCache: regionCache, + client: client, + } +} + +// SendReq sends a request to tikv server. +func (s *RegionRequestSender) SendReq(bo *Backoffer, req *tikvrpc.Request, regionID RegionVerID, timeout time.Duration) (*tikvrpc.Response, error) { + resp, _, err := s.SendReqCtx(bo, req, regionID, timeout) + return resp, err +} + +// SendReqCtx sends a request to tikv server and return response and RPCCtx of this RPC. +func (s *RegionRequestSender) SendReqCtx( + bo *Backoffer, + req *tikvrpc.Request, + regionID RegionVerID, + timeout time.Duration, +) ( + resp *tikvrpc.Response, + rpcCtx *RPCContext, + err error, +) { + failpoint.Inject("tikvStoreSendReqResult", func(val failpoint.Value) { + switch val.(string) { + case "timeout": + failpoint.Return(nil, nil, errors.New("timeout")) + } + }) + + replicaRead := kv.ReplicaReadLeader + seed := req.ReplicaReadSeed + for { + rpcCtx, err = s.regionCache.GetTiKVRPCContext(bo, regionID, replicaRead, seed) + if err != nil { + return nil, nil, err + } + if rpcCtx == nil { + // If the region is not found in cache, it must be out + // of date and already be cleaned up. We can skip the + // RPC by returning RegionError directly. + + // TODO: Change the returned error to something like "region missing in cache", + // and handle this error like EpochNotMatch, which means to re-split the request and retry. + resp, err = tikvrpc.GenRegionErrorResp(req, &errorpb.Error{EpochNotMatch: &errorpb.EpochNotMatch{}}) + return resp, nil, err + } + + s.storeAddr = rpcCtx.Addr + var retry bool + resp, retry, err = s.sendReqToRegion(bo, rpcCtx, req, timeout) + if err != nil { + return nil, nil, errors.Trace(err) + } + if retry { + continue + } + + var regionErr *errorpb.Error + regionErr, err = resp.GetRegionError() + if err != nil { + return nil, nil, errors.Trace(err) + } + if regionErr != nil { + retry, err = s.onRegionError(bo, rpcCtx, &seed, regionErr) + if err != nil { + return nil, nil, errors.Trace(err) + } + if retry { + continue + } + } + return resp, rpcCtx, nil + } +} + +func (s *RegionRequestSender) sendReqToRegion(bo *Backoffer, ctx *RPCContext, req *tikvrpc.Request, timeout time.Duration) (resp *tikvrpc.Response, retry bool, err error) { + if e := tikvrpc.SetContext(req, ctx.Meta, ctx.Peer); e != nil { + return nil, false, errors.Trace(e) + } + resp, err = s.client.SendRequest(bo.ctx, ctx.Addr, req, timeout) + if err != nil { + s.rpcError = err + if e := s.onSendFail(bo, ctx, err); e != nil { + return nil, false, errors.Trace(e) + } + return nil, true, nil + } + return +} + +func (s *RegionRequestSender) onSendFail(bo *Backoffer, ctx *RPCContext, err error) error { + // If it failed because the context is cancelled by ourself, don't retry. + if errors.Cause(err) == context.Canceled { + return errors.Trace(err) + } else if atomic.LoadUint32(&ShuttingDown) > 0 { + return errTiDBShuttingDown + } + if status.Code(errors.Cause(err)) == codes.Canceled { + select { + case <-bo.ctx.Done(): + return errors.Trace(err) + default: + // If we don't cancel, but the error code is Canceled, it must be from grpc remote. + // This may happen when tikv is killed and exiting. + // Backoff and retry in this case. + logutil.BgLogger().Warn("receive a grpc cancel signal from remote", zap.Error(err)) + } + } + + if ctx.Meta != nil { + s.regionCache.OnSendFail(bo, ctx, s.needReloadRegion(ctx), err) + } + + // Retry on send request failure when it's not canceled. + // When a store is not available, the leader of related region should be elected quickly. + // TODO: the number of retry time should be limited:since region may be unavailable + // when some unrecoverable disaster happened. + err = bo.Backoff(boTiKVRPC, errors.Errorf("send tikv request error: %v, ctx: %v, try next peer later", err, ctx)) + return errors.Trace(err) +} + +// needReloadRegion checks is all peers has sent failed, if so need reload. +func (s *RegionRequestSender) needReloadRegion(ctx *RPCContext) (need bool) { + if s.failStoreIDs == nil { + s.failStoreIDs = make(map[uint64]struct{}) + } + s.failStoreIDs[ctx.Store.storeID] = struct{}{} + need = len(s.failStoreIDs) == len(ctx.Meta.Peers) + if need { + s.failStoreIDs = nil + } + return +} + +func (s *RegionRequestSender) onRegionError(bo *Backoffer, ctx *RPCContext, seed *uint32, regionErr *errorpb.Error) (retry bool, err error) { + if notLeader := regionErr.GetNotLeader(); notLeader != nil { + // Retry if error is `NotLeader`. + logutil.BgLogger().Debug("tikv reports `NotLeader` retry later", + zap.String("notLeader", notLeader.String()), + zap.String("ctx", ctx.String())) + s.regionCache.UpdateLeader(ctx.Region, notLeader.GetLeader().GetStoreId(), ctx.PeerIdx) + + var boType backoffType + if notLeader.GetLeader() != nil { + boType = BoUpdateLeader + } else { + boType = BoRegionMiss + } + + if err = bo.Backoff(boType, errors.Errorf("not leader: %v, ctx: %v", notLeader, ctx)); err != nil { + return false, errors.Trace(err) + } + + return true, nil + } + + if storeNotMatch := regionErr.GetStoreNotMatch(); storeNotMatch != nil { + // store not match + logutil.BgLogger().Warn("tikv reports `StoreNotMatch` retry later", + zap.Stringer("storeNotMatch", storeNotMatch), + zap.Stringer("ctx", ctx)) + ctx.Store.markNeedCheck(s.regionCache.notifyCheckCh) + return true, nil + } + + if epochNotMatch := regionErr.GetEpochNotMatch(); epochNotMatch != nil { + logutil.BgLogger().Debug("tikv reports `EpochNotMatch` retry later", + zap.Stringer("EpochNotMatch", epochNotMatch), + zap.Stringer("ctx", ctx)) + if seed != nil { + *seed = *seed + 1 + } + err = s.regionCache.OnRegionEpochNotMatch(bo, ctx, epochNotMatch.CurrentRegions) + return false, errors.Trace(err) + } + if regionErr.GetStaleCommand() != nil { + logutil.BgLogger().Debug("tikv reports `StaleCommand`", zap.Stringer("ctx", ctx)) + return true, nil + } + // For other errors, we only drop cache here. + // Because caller may need to re-split the request. + logutil.BgLogger().Debug("tikv reports region failed", + zap.Stringer("regionErr", regionErr), + zap.Stringer("ctx", ctx)) + // When the request is sent to TiDB, there is no region in the request, so the region id will be 0. + // So when region id is 0, there is no business with region cache. + if ctx.Region.id != 0 { + s.regionCache.InvalidateCachedRegion(ctx.Region) + } + return false, nil +} diff --git a/store/tikv/safepoint.go b/store/tikv/safepoint.go new file mode 100644 index 0000000..6ac69d1 --- /dev/null +++ b/store/tikv/safepoint.go @@ -0,0 +1,168 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv + +import ( + "context" + "strconv" + "strings" + "sync" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/util/logutil" + "go.etcd.io/etcd/clientv3" + "go.etcd.io/etcd/mvcc/mvccpb" + "go.uber.org/zap" +) + +// Safe point constants. +const ( + // This is almost the same as 'tikv_gc_safe_point' in the table 'mysql.tidb', + // save this to pd instead of tikv, because we can't use interface of table + // if the safepoint on tidb is expired. + GcSavedSafePoint = "/tidb/store/gcworker/saved_safe_point" + + GcSafePointCacheInterval = time.Second * 100 + gcCPUTimeInaccuracyBound = time.Second + gcSafePointUpdateInterval = time.Second * 10 + gcSafePointQuickRepeatInterval = time.Second +) + +// SafePointKV is used for a seamingless integration for mockTest and runtime. +type SafePointKV interface { + Put(k string, v string) error + Get(k string) (string, error) + GetWithPrefix(k string) ([]*mvccpb.KeyValue, error) +} + +// MockSafePointKV implements SafePointKV at mock test +type MockSafePointKV struct { + store map[string]string + mockLock sync.RWMutex +} + +// NewMockSafePointKV creates an instance of MockSafePointKV +func NewMockSafePointKV() *MockSafePointKV { + return &MockSafePointKV{ + store: make(map[string]string), + } +} + +// Put implements the Put method for SafePointKV +func (w *MockSafePointKV) Put(k string, v string) error { + w.mockLock.Lock() + defer w.mockLock.Unlock() + w.store[k] = v + return nil +} + +// Get implements the Get method for SafePointKV +func (w *MockSafePointKV) Get(k string) (string, error) { + w.mockLock.RLock() + defer w.mockLock.RUnlock() + elem := w.store[k] + return elem, nil +} + +// GetWithPrefix implements the Get method for SafePointKV +func (w *MockSafePointKV) GetWithPrefix(prefix string) ([]*mvccpb.KeyValue, error) { + w.mockLock.RLock() + defer w.mockLock.RUnlock() + kvs := make([]*mvccpb.KeyValue, 0, len(w.store)) + for k, v := range w.store { + if strings.HasPrefix(k, prefix) { + kvs = append(kvs, &mvccpb.KeyValue{Key: []byte(k), Value: []byte(v)}) + } + } + return kvs, nil +} + +// EtcdSafePointKV implements SafePointKV at runtime +type EtcdSafePointKV struct { + cli *clientv3.Client +} + +// NewEtcdSafePointKV creates an instance of EtcdSafePointKV +func NewEtcdSafePointKV(addrs []string) (*EtcdSafePointKV, error) { + etcdCli, err := createEtcdKV(addrs) + if err != nil { + return nil, errors.Trace(err) + } + return &EtcdSafePointKV{cli: etcdCli}, nil +} + +// Put implements the Put method for SafePointKV +func (w *EtcdSafePointKV) Put(k string, v string) error { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + _, err := w.cli.Put(ctx, k, v) + cancel() + if err != nil { + return errors.Trace(err) + } + return nil +} + +// Get implements the Get method for SafePointKV +func (w *EtcdSafePointKV) Get(k string) (string, error) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + resp, err := w.cli.Get(ctx, k) + cancel() + if err != nil { + return "", errors.Trace(err) + } + if len(resp.Kvs) > 0 { + return string(resp.Kvs[0].Value), nil + } + return "", nil +} + +// GetWithPrefix implements the GetWithPrefix for SafePointKV +func (w *EtcdSafePointKV) GetWithPrefix(k string) ([]*mvccpb.KeyValue, error) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*15) + resp, err := w.cli.Get(ctx, k, clientv3.WithPrefix()) + cancel() + if err != nil { + return nil, errors.Trace(err) + } + return resp.Kvs, nil +} + +func saveSafePoint(kv SafePointKV, t uint64) error { + s := strconv.FormatUint(t, 10) + err := kv.Put(GcSavedSafePoint, s) + if err != nil { + logutil.BgLogger().Error("save safepoint failed", zap.Error(err)) + return errors.Trace(err) + } + return nil +} + +func loadSafePoint(kv SafePointKV) (uint64, error) { + str, err := kv.Get(GcSavedSafePoint) + + if err != nil { + return 0, errors.Trace(err) + } + + if str == "" { + return 0, nil + } + + t, err := strconv.ParseUint(str, 10, 64) + if err != nil { + return 0, errors.Trace(err) + } + return t, nil +} diff --git a/store/tikv/safepoint_test.go b/store/tikv/safepoint_test.go new file mode 100644 index 0000000..1f47fa0 --- /dev/null +++ b/store/tikv/safepoint_test.go @@ -0,0 +1,99 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv + +import ( + "context" + "fmt" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/parser/terror" +) + +type testSafePointSuite struct { + OneByOneSuite + store *tikvStore + prefix string +} + +var _ = Suite(&testSafePointSuite{}) + +func (s *testSafePointSuite) SetUpSuite(c *C) { + s.OneByOneSuite.SetUpSuite(c) + s.store = NewTestStore(c).(*tikvStore) + s.prefix = fmt.Sprintf("seek_%d", time.Now().Unix()) +} + +func (s *testSafePointSuite) TearDownSuite(c *C) { + err := s.store.Close() + c.Assert(err, IsNil) + s.OneByOneSuite.TearDownSuite(c) +} + +func (s *testSafePointSuite) beginTxn(c *C) *tikvTxn { + txn, err := s.store.Begin() + c.Assert(err, IsNil) + return txn.(*tikvTxn) +} + +func (s *testSafePointSuite) waitUntilErrorPlugIn(t uint64) { + for { + saveSafePoint(s.store.GetSafePointKV(), t+10) + cachedTime := time.Now() + newSafePoint, err := loadSafePoint(s.store.GetSafePointKV()) + if err == nil { + s.store.UpdateSPCache(newSafePoint, cachedTime) + break + } + time.Sleep(time.Second) + } +} + +func (s *testSafePointSuite) TestSafePoint(c *C) { + txn := s.beginTxn(c) + for i := 0; i < 10; i++ { + err := txn.Set(encodeKey(s.prefix, s08d("key", i)), valueBytes(i)) + c.Assert(err, IsNil) + } + err := txn.Commit(context.Background()) + c.Assert(err, IsNil) + + // for txn get + txn2 := s.beginTxn(c) + _, err = txn2.Get(context.TODO(), encodeKey(s.prefix, s08d("key", 0))) + c.Assert(err, IsNil) + + s.waitUntilErrorPlugIn(txn2.startTS) + + _, geterr2 := txn2.Get(context.TODO(), encodeKey(s.prefix, s08d("key", 0))) + c.Assert(geterr2, NotNil) + isFallBehind := terror.ErrorEqual(errors.Cause(geterr2), ErrGCTooEarly) + isMayFallBehind := terror.ErrorEqual(errors.Cause(geterr2), ErrPDServerTimeout.GenWithStackByArgs("start timestamp may fall behind safe point")) + isBehind := isFallBehind || isMayFallBehind + c.Assert(isBehind, IsTrue) + + // for txn seek + txn3 := s.beginTxn(c) + + s.waitUntilErrorPlugIn(txn3.startTS) + + _, seekerr := txn3.Iter(encodeKey(s.prefix, ""), nil) + c.Assert(seekerr, NotNil) + isFallBehind = terror.ErrorEqual(errors.Cause(geterr2), ErrGCTooEarly) + isMayFallBehind = terror.ErrorEqual(errors.Cause(geterr2), ErrPDServerTimeout.GenWithStackByArgs("start timestamp may fall behind safe point")) + isBehind = isFallBehind || isMayFallBehind + c.Assert(isBehind, IsTrue) +} diff --git a/store/tikv/scan.go b/store/tikv/scan.go new file mode 100644 index 0000000..b722dee --- /dev/null +++ b/store/tikv/scan.go @@ -0,0 +1,260 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv + +import ( + "bytes" + "context" + + pb "github.com/pingcap-incubator/tinykv/proto/pkg/kvrpcpb" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/store/tikv/tikvrpc" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +// Scanner support tikv scan +type Scanner struct { + snapshot *tikvSnapshot + batchSize int + cache []*pb.KvPair + idx int + nextStartKey kv.Key + endKey kv.Key + + // Use for reverse scan. + nextEndKey kv.Key + reverse bool + + valid bool + eof bool +} + +func newScanner(snapshot *tikvSnapshot, startKey []byte, endKey []byte, batchSize int, reverse bool) (*Scanner, error) { + // It must be > 1. Otherwise scanner won't skipFirst. + if batchSize <= 1 { + batchSize = scanBatchSize + } + scanner := &Scanner{ + snapshot: snapshot, + batchSize: batchSize, + valid: true, + nextStartKey: startKey, + endKey: endKey, + reverse: reverse, + nextEndKey: endKey, + } + err := scanner.Next() + if kv.IsErrNotFound(err) { + return scanner, nil + } + return scanner, errors.Trace(err) +} + +// Valid return valid. +func (s *Scanner) Valid() bool { + return s.valid +} + +// Key return key. +func (s *Scanner) Key() kv.Key { + if s.valid { + return s.cache[s.idx].Key + } + return nil +} + +// Value return value. +func (s *Scanner) Value() []byte { + if s.valid { + return s.cache[s.idx].Value + } + return nil +} + +// Next return next element. +func (s *Scanner) Next() error { + bo := NewBackoffer(context.WithValue(context.Background(), txnStartKey, s.snapshot.version.Ver), scannerNextMaxBackoff) + if !s.valid { + return errors.New("scanner iterator is invalid") + } + var err error + for { + s.idx++ + if s.idx >= len(s.cache) { + if s.eof { + s.Close() + return nil + } + err = s.getData(bo) + if err != nil { + s.Close() + return errors.Trace(err) + } + if s.idx >= len(s.cache) { + continue + } + } + + current := s.cache[s.idx] + if (!s.reverse && (len(s.endKey) > 0 && kv.Key(current.Key).Cmp(kv.Key(s.endKey)) >= 0)) || + (s.reverse && len(s.nextStartKey) > 0 && kv.Key(current.Key).Cmp(kv.Key(s.nextStartKey)) < 0) { + s.eof = true + s.Close() + return nil + } + // Try to resolve the lock + if current.GetError() != nil { + // 'current' would be modified if the lock being resolved + if err := s.resolveCurrentLock(bo, current); err != nil { + s.Close() + return errors.Trace(err) + } + + // The check here does not violate the KeyOnly semantic, because current's value + // is filled by resolveCurrentLock which fetches the value by snapshot.get, so an empty + // value stands for NotExist + if len(current.Value) == 0 { + continue + } + } + return nil + } +} + +// Close close iterator. +func (s *Scanner) Close() { + s.valid = false +} + +func (s *Scanner) startTS() uint64 { + return s.snapshot.version.Ver +} + +func (s *Scanner) resolveCurrentLock(bo *Backoffer, current *pb.KvPair) error { + val, err := s.snapshot.get(bo, kv.Key(current.Key)) + if err != nil { + return errors.Trace(err) + } + current.Error = nil + current.Value = val + return nil +} + +func (s *Scanner) getData(bo *Backoffer) error { + logutil.BgLogger().Debug("txn getData", + zap.Stringer("nextStartKey", s.nextStartKey), + zap.Stringer("nextEndKey", s.nextEndKey), + zap.Bool("reverse", s.reverse), + zap.Uint64("txnStartTS", s.startTS())) + sender := NewRegionRequestSender(s.snapshot.store.regionCache, s.snapshot.store.client) + var reqStartKey []byte + var loc *KeyLocation + var err error + for { + if !s.reverse { + loc, err = s.snapshot.store.regionCache.LocateKey(bo, s.nextStartKey) + } else { + loc, err = s.snapshot.store.regionCache.LocateEndKey(bo, s.nextEndKey) + } + if err != nil { + return errors.Trace(err) + } + + if s.reverse { + reqStartKey = s.nextStartKey + if len(reqStartKey) == 0 || + (len(loc.StartKey) > 0 && bytes.Compare(loc.StartKey, reqStartKey) > 0) { + reqStartKey = loc.StartKey + } + } + + sreq := &pb.ScanRequest{ + StartKey: s.nextStartKey, + Limit: uint32(s.batchSize), + Version: s.startTS(), + } + if s.reverse { + sreq.StartKey = s.nextEndKey + } + req := tikvrpc.NewRequest(tikvrpc.CmdScan, sreq, pb.Context{}) + resp, err := sender.SendReq(bo, req, loc.Region, ReadTimeoutMedium) + if err != nil { + return errors.Trace(err) + } + regionErr, err := resp.GetRegionError() + if err != nil { + return errors.Trace(err) + } + if regionErr != nil { + logutil.BgLogger().Debug("scanner getData failed", + zap.Stringer("regionErr", regionErr)) + err = bo.Backoff(BoRegionMiss, errors.New(regionErr.String())) + if err != nil { + return errors.Trace(err) + } + continue + } + if resp.Resp == nil { + return errors.Trace(ErrBodyMissing) + } + cmdScanResp := resp.Resp.(*pb.ScanResponse) + + err = s.snapshot.store.CheckVisibility(s.startTS()) + if err != nil { + return errors.Trace(err) + } + + kvPairs := cmdScanResp.Pairs + // Check if kvPair contains error, it should be a Lock. + for _, pair := range kvPairs { + if keyErr := pair.GetError(); keyErr != nil { + lock, err := extractLockFromKeyErr(keyErr) + if err != nil { + return errors.Trace(err) + } + pair.Key = lock.Key + } + } + + s.cache, s.idx = kvPairs, 0 + if len(kvPairs) < s.batchSize { + // No more data in current Region. Next getData() starts + // from current Region's endKey. + if !s.reverse { + s.nextStartKey = loc.EndKey + } else { + s.nextEndKey = reqStartKey + } + if (!s.reverse && (len(loc.EndKey) == 0 || (len(s.endKey) > 0 && kv.Key(s.nextStartKey).Cmp(kv.Key(s.endKey)) >= 0))) || + (s.reverse && (len(loc.StartKey) == 0 || (len(s.nextStartKey) > 0 && kv.Key(s.nextStartKey).Cmp(kv.Key(s.nextEndKey)) >= 0))) { + // Current Region is the last one. + s.eof = true + } + return nil + } + // next getData() starts from the last key in kvPairs (but skip + // it by appending a '\x00' to the key). Note that next getData() + // may get an empty response if the Region in fact does not have + // more data. + lastKey := kvPairs[len(kvPairs)-1].GetKey() + if !s.reverse { + s.nextStartKey = kv.Key(lastKey).Next() + } else { + s.nextEndKey = kv.Key(lastKey) + } + return nil + } +} diff --git a/store/tikv/scan_mock_test.go b/store/tikv/scan_mock_test.go new file mode 100644 index 0000000..7d10b1c --- /dev/null +++ b/store/tikv/scan_mock_test.go @@ -0,0 +1,60 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv + +import ( + "context" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/kv" +) + +type testScanMockSuite struct { + OneByOneSuite +} + +var _ = Suite(&testScanMockSuite{}) + +func (s *testScanMockSuite) TestScanMultipleRegions(c *C) { + store := NewTestStore(c).(*tikvStore) + defer store.Close() + + txn, err := store.Begin() + c.Assert(err, IsNil) + for ch := byte('a'); ch <= byte('z'); ch++ { + err = txn.Set([]byte{ch}, []byte{ch}) + c.Assert(err, IsNil) + } + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + + txn, err = store.Begin() + c.Assert(err, IsNil) + snapshot := newTiKVSnapshot(store, kv.Version{Ver: txn.StartTS()}) + scanner, err := newScanner(snapshot, []byte("a"), nil, 10, false) + c.Assert(err, IsNil) + for ch := byte('a'); ch <= byte('z'); ch++ { + c.Assert([]byte{ch}, BytesEquals, []byte(scanner.Key())) + c.Assert(scanner.Next(), IsNil) + } + c.Assert(scanner.Valid(), IsFalse) + + scanner, err = newScanner(snapshot, []byte("a"), []byte("i"), 10, false) + c.Assert(err, IsNil) + for ch := byte('a'); ch <= byte('h'); ch++ { + c.Assert([]byte{ch}, BytesEquals, []byte(scanner.Key())) + c.Assert(scanner.Next(), IsNil) + } + c.Assert(scanner.Valid(), IsFalse) +} diff --git a/store/tikv/snapshot.go b/store/tikv/snapshot.go new file mode 100644 index 0000000..45b99d8 --- /dev/null +++ b/store/tikv/snapshot.go @@ -0,0 +1,278 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + pb "github.com/pingcap-incubator/tinykv/proto/pkg/kvrpcpb" + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/kv" + "strings" + + "github.com/pingcap/tidb/store/tikv/tikvrpc" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +var ( + _ kv.Snapshot = (*tikvSnapshot)(nil) +) + +const ( + scanBatchSize = 256 +) + +// tikvSnapshot implements the kv.Snapshot interface. +type tikvSnapshot struct { + store *tikvStore + version kv.Version + syncLog bool + keyOnly bool + vars *kv.Variables + minCommitTSPushed + + // Cache the result of BatchGet. + // The invariance is that calling BatchGet multiple times using the same start ts, + // the result should not change. + // NOTE: This representation here is different from the BatchGet API. + // cached use len(value)=0 to represent a key-value entry doesn't exist (a reliable truth from TiKV). + // In the BatchGet API, it use no key-value entry to represent non-exist. + // It's OK as long as there are no zero-byte values in the protocol. + cached map[string][]byte +} + +// newTiKVSnapshot creates a snapshot of an TiKV store. +func newTiKVSnapshot(store *tikvStore, ver kv.Version) *tikvSnapshot { + return &tikvSnapshot{ + store: store, + version: ver, + vars: kv.DefaultVars, + minCommitTSPushed: minCommitTSPushed{ + data: make(map[uint64]struct{}, 5), + }, + } +} + +func (s *tikvSnapshot) setSnapshotTS(ts uint64) { + // Invalidate cache if the snapshotTS change! + s.version.Ver = ts + s.cached = nil + // And also the minCommitTS pushed information. + s.minCommitTSPushed.data = make(map[uint64]struct{}, 5) +} + +// Get gets the value for key k from snapshot. +func (s *tikvSnapshot) Get(ctx context.Context, k kv.Key) ([]byte, error) { + ctx = context.WithValue(ctx, txnStartKey, s.version.Ver) + val, err := s.get(NewBackoffer(ctx, getMaxBackoff), k) + if err != nil { + return nil, errors.Trace(err) + } + if len(val) == 0 { + return nil, kv.ErrNotExist + } + return val, nil +} + +func (s *tikvSnapshot) get(bo *Backoffer, k kv.Key) ([]byte, error) { + // Check the cached values first. + if s.cached != nil { + if value, ok := s.cached[string(k)]; ok { + return value, nil + } + } + + failpoint.Inject("snapshot-get-cache-fail", func(_ failpoint.Value) { + if bo.ctx.Value("TestSnapshotCache") != nil { + panic("cache miss") + } + }) + + cli := clientHelper{ + LockResolver: s.store.lockResolver, + RegionCache: s.store.regionCache, + minCommitTSPushed: &s.minCommitTSPushed, + Client: s.store.client, + } + + req := tikvrpc.NewRequest(tikvrpc.CmdGet, + &pb.GetRequest{ + Key: k, + Version: s.version.Ver, + }, pb.Context{}) + for { + loc, err := s.store.regionCache.LocateKey(bo, k) + if err != nil { + return nil, errors.Trace(err) + } + resp, _, _, err := cli.SendReqCtx(bo, req, loc.Region, readTimeoutShort, "") + if err != nil { + return nil, errors.Trace(err) + } + regionErr, err := resp.GetRegionError() + if err != nil { + return nil, errors.Trace(err) + } + if regionErr != nil { + err = bo.Backoff(BoRegionMiss, errors.New(regionErr.String())) + if err != nil { + return nil, errors.Trace(err) + } + continue + } + if resp.Resp == nil { + return nil, errors.Trace(ErrBodyMissing) + } + cmdGetResp := resp.Resp.(*pb.GetResponse) + val := cmdGetResp.GetValue() + if keyErr := cmdGetResp.GetError(); keyErr != nil { + lock, err := extractLockFromKeyErr(keyErr) + if err != nil { + return nil, errors.Trace(err) + } + msBeforeExpired, err := cli.ResolveLocks(bo, s.version.Ver, []*Lock{lock}) + if err != nil { + return nil, errors.Trace(err) + } + if msBeforeExpired > 0 { + err = bo.BackoffWithMaxSleep(boTxnLockFast, int(msBeforeExpired), errors.New(keyErr.String())) + if err != nil { + return nil, errors.Trace(err) + } + } + continue + } + return val, nil + } +} + +// Iter return a list of key-value pair after `k`. +func (s *tikvSnapshot) Iter(k kv.Key, upperBound kv.Key) (kv.Iterator, error) { + scanner, err := newScanner(s, k, upperBound, scanBatchSize, false) + return scanner, errors.Trace(err) +} + +// IterReverse creates a reversed Iterator positioned on the first entry which key is less than k. +func (s *tikvSnapshot) IterReverse(k kv.Key) (kv.Iterator, error) { + scanner, err := newScanner(s, nil, k, scanBatchSize, true) + return scanner, errors.Trace(err) +} + +func extractLockFromKeyErr(keyErr *pb.KeyError) (*Lock, error) { + if locked := keyErr.GetLocked(); locked != nil { + return NewLock(locked), nil + } + return nil, extractKeyErr(keyErr) +} + +func extractKeyErr(keyErr *pb.KeyError) error { + failpoint.Inject("ErrMockRetryableOnly", func(val failpoint.Value) { + if val.(bool) { + keyErr.Conflict = nil + keyErr.Retryable = "mock retryable error" + } + }) + + if keyErr.Conflict != nil { + return newWriteConflictError(keyErr.Conflict) + } + if keyErr.Retryable != "" { + notFoundDetail := prettyLockNotFoundKey(keyErr.GetRetryable()) + return kv.ErrTxnRetryable.GenWithStackByArgs(keyErr.GetRetryable() + " " + notFoundDetail) + } + if keyErr.Abort != "" { + err := errors.Errorf("tikv aborts txn: %s", keyErr.GetAbort()) + logutil.BgLogger().Warn("2PC failed", zap.Error(err)) + return errors.Trace(err) + } + return errors.Errorf("unexpected KeyError: %s", keyErr.String()) +} + +func prettyLockNotFoundKey(rawRetry string) string { + if !strings.Contains(rawRetry, "TxnLockNotFound") { + return "" + } + start := strings.Index(rawRetry, "[") + if start == -1 { + return "" + } + rawRetry = rawRetry[start:] + end := strings.Index(rawRetry, "]") + if end == -1 { + return "" + } + rawRetry = rawRetry[:end+1] + var key []byte + err := json.Unmarshal([]byte(rawRetry), &key) + if err != nil { + return "" + } + var buf bytes.Buffer + prettyWriteKey(&buf, key) + return buf.String() +} + +func newWriteConflictError(conflict *pb.WriteConflict) error { + var buf bytes.Buffer + prettyWriteKey(&buf, conflict.Key) + buf.WriteString(" primary=") + prettyWriteKey(&buf, conflict.Primary) + return kv.ErrWriteConflict.FastGenByArgs(conflict.StartTs, conflict.ConflictTs, buf.String()) +} + +func prettyWriteKey(buf *bytes.Buffer, key []byte) { + tableID, indexID, indexValues, err := tablecodec.DecodeIndexKey(key) + if err == nil { + _, err1 := fmt.Fprintf(buf, "{tableID=%d, indexID=%d, indexValues={", tableID, indexID) + if err1 != nil { + logutil.BgLogger().Error("error", zap.Error(err1)) + } + for _, v := range indexValues { + _, err2 := fmt.Fprintf(buf, "%s, ", v) + if err2 != nil { + logutil.BgLogger().Error("error", zap.Error(err2)) + } + } + buf.WriteString("}}") + return + } + + tableID, handle, err := tablecodec.DecodeRecordKey(key) + if err == nil { + _, err3 := fmt.Fprintf(buf, "{tableID=%d, handle=%d}", tableID, handle) + if err3 != nil { + logutil.BgLogger().Error("error", zap.Error(err3)) + } + return + } + + mKey, mField, err := tablecodec.DecodeMetaKey(key) + if err == nil { + _, err3 := fmt.Fprintf(buf, "{metaKey=true, key=%s, field=%s}", string(mKey), string(mField)) + if err3 != nil { + logutil.Logger(context.Background()).Error("error", zap.Error(err3)) + } + return + } + + _, err4 := fmt.Fprintf(buf, "%#v", key) + if err4 != nil { + logutil.BgLogger().Error("error", zap.Error(err4)) + } +} diff --git a/store/tikv/snapshot_test.go b/store/tikv/snapshot_test.go new file mode 100644 index 0000000..7c62aa7 --- /dev/null +++ b/store/tikv/snapshot_test.go @@ -0,0 +1,69 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv + +import ( + "context" + "fmt" + "time" + + . "github.com/pingcap/check" +) + +type testSnapshotSuite struct { + OneByOneSuite + store *tikvStore + prefix string + rowNums []int +} + +var _ = Suite(&testSnapshotSuite{}) + +func (s *testSnapshotSuite) SetUpSuite(c *C) { + s.OneByOneSuite.SetUpSuite(c) + s.store = NewTestStore(c).(*tikvStore) + s.prefix = fmt.Sprintf("snapshot_%d", time.Now().Unix()) + s.rowNums = append(s.rowNums, 1, 100, 191) +} + +func (s *testSnapshotSuite) TearDownSuite(c *C) { + txn := s.beginTxn(c) + scanner, err := txn.Iter(encodeKey(s.prefix, ""), nil) + c.Assert(err, IsNil) + c.Assert(scanner, NotNil) + for scanner.Valid() { + k := scanner.Key() + err = txn.Delete(k) + c.Assert(err, IsNil) + scanner.Next() + } + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + err = s.store.Close() + c.Assert(err, IsNil) + s.OneByOneSuite.TearDownSuite(c) +} + +func (s *testSnapshotSuite) beginTxn(c *C) *tikvTxn { + txn, err := s.store.Begin() + c.Assert(err, IsNil) + return txn.(*tikvTxn) +} + +func (s *testSnapshotSuite) TestLockNotFoundPrint(c *C) { + msg := "Txn(Mvcc(TxnLockNotFound { start_ts: 408090278408224772, commit_ts: 408090279311835140, " + + "key: [116, 128, 0, 0, 0, 0, 0, 50, 137, 95, 105, 128, 0, 0, 0, 0,0 ,0, 1, 1, 67, 49, 57, 48, 57, 50, 57, 48, 255, 48, 48, 48, 48, 48, 52, 56, 54, 255, 50, 53, 53, 50, 51, 0, 0, 0, 252] }))" + key := prettyLockNotFoundKey(msg) + c.Assert(key, Equals, "{tableID=12937, indexID=1, indexValues={C19092900000048625523, }}") +} diff --git a/store/tikv/sql_fail_test.go b/store/tikv/sql_fail_test.go new file mode 100644 index 0000000..0873183 --- /dev/null +++ b/store/tikv/sql_fail_test.go @@ -0,0 +1,87 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv_test + +import ( + "context" + "os" + "sync" + "testing" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/session" + . "github.com/pingcap/tidb/store/tikv" +) + +var _ = Suite(new(testSQLSuite)) + +type testSQLSuite struct { + OneByOneSuite + store Storage + dom *domain.Domain +} + +func (s *testSQLSuite) SetUpSuite(c *C) { + s.OneByOneSuite.SetUpSuite(c) + var err error + s.store = NewTestStore(c).(Storage) + s.dom, err = session.BootstrapSession(s.store) + c.Assert(err, IsNil) +} + +func (s *testSQLSuite) TearDownSuite(c *C) { + s.dom.Close() + s.store.Close() + s.OneByOneSuite.TearDownSuite(c) +} + +func (s *testSQLSuite) TestFailBusyServerCop(c *C) { + se, err := session.CreateSession4Test(s.store) + c.Assert(err, IsNil) + + var wg sync.WaitGroup + wg.Add(2) + + c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/mockstore/mocktikv/rpcServerBusy", `return(true)`), IsNil) + go func() { + defer wg.Done() + time.Sleep(time.Millisecond * 100) + c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/mockstore/mocktikv/rpcServerBusy"), IsNil) + }() + + go func() { + defer wg.Done() + rs, err := se.Execute(context.Background(), `SELECT variable_value FROM mysql.tidb WHERE variable_name="bootstrapped"`) + if len(rs) > 0 { + defer terror.Call(rs[0].Close) + } + c.Assert(err, IsNil) + req := rs[0].NewChunk() + err = rs[0].Next(context.Background(), req) + c.Assert(err, IsNil) + c.Assert(req.NumRows() == 0, IsFalse) + c.Assert(req.GetRow(0).GetString(0), Equals, "True") + }() + + wg.Wait() +} + +func TestMain(m *testing.M) { + ReadTimeoutMedium = 2 * time.Second + os.Exit(m.Run()) +} diff --git a/store/tikv/test_util.go b/store/tikv/test_util.go new file mode 100644 index 0000000..bf1b999 --- /dev/null +++ b/store/tikv/test_util.go @@ -0,0 +1,41 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv + +import ( + "github.com/google/uuid" + "github.com/pingcap-incubator/tinykv/scheduler/client" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" +) + +// NewTestTiKVStore creates a test store with Option +func NewTestTiKVStore(client Client, pdClient pd.Client, clientHijack func(Client) Client, pdClientHijack func(pd.Client) pd.Client) (kv.Storage, error) { + if clientHijack != nil { + client = clientHijack(client) + } + + pdCli := pd.Client(&codecPDClient{pdClient}) + if pdClientHijack != nil { + pdCli = pdClientHijack(pdCli) + } + + // Make sure the uuid is unique. + uid := uuid.New().String() + spkv := NewMockSafePointKV() + tikvStore, err := newTikvStore(uid, pdCli, spkv, client, false) + + tikvStore.mock = true + return tikvStore, errors.Trace(err) +} diff --git a/store/tikv/ticlient_test.go b/store/tikv/ticlient_test.go new file mode 100644 index 0000000..fe497ad --- /dev/null +++ b/store/tikv/ticlient_test.go @@ -0,0 +1,194 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv + +import ( + "context" + "flag" + "fmt" + "sync" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/store/mockstore/mocktikv" + "github.com/pingcap/tidb/util/codec" +) + +var ( + withTiKVGlobalLock sync.RWMutex + withTiKV = flag.Bool("with-tikv", false, "run tests with TiKV cluster started. (not use the mock server)") + pdAddrs = flag.String("pd-addrs", "127.0.0.1:2379", "pd addrs") +) + +// NewTestStore creates a kv.Storage for testing purpose. +func NewTestStore(c *C) kv.Storage { + if !flag.Parsed() { + flag.Parse() + } + + if *withTiKV { + var d Driver + store, err := d.Open(fmt.Sprintf("tikv://%s", *pdAddrs)) + c.Assert(err, IsNil) + err = clearStorage(store) + c.Assert(err, IsNil) + return store + } + + client, pdClient, err := mocktikv.NewTiKVAndPDClient(nil, nil, "") + c.Assert(err, IsNil) + + store, err := NewTestTiKVStore(client, pdClient, nil, nil) + c.Assert(err, IsNil) + return store +} + +func clearStorage(store kv.Storage) error { + txn, err := store.Begin() + if err != nil { + return errors.Trace(err) + } + iter, err := txn.Iter(nil, nil) + if err != nil { + return errors.Trace(err) + } + for iter.Valid() { + txn.Delete(iter.Key()) + if err := iter.Next(); err != nil { + return errors.Trace(err) + } + } + return txn.Commit(context.Background()) +} + +type testTiclientSuite struct { + OneByOneSuite + store *tikvStore + // prefix is prefix of each key in this test. It is used for table isolation, + // or it may pollute other data. + prefix string +} + +var _ = Suite(&testTiclientSuite{}) + +func (s *testTiclientSuite) SetUpSuite(c *C) { + s.OneByOneSuite.SetUpSuite(c) + s.store = NewTestStore(c).(*tikvStore) + s.prefix = fmt.Sprintf("ticlient_%d", time.Now().Unix()) +} + +func (s *testTiclientSuite) TearDownSuite(c *C) { + // Clean all data, or it may pollute other data. + txn := s.beginTxn(c) + scanner, err := txn.Iter(encodeKey(s.prefix, ""), nil) + c.Assert(err, IsNil) + c.Assert(scanner, NotNil) + for scanner.Valid() { + k := scanner.Key() + err = txn.Delete(k) + c.Assert(err, IsNil) + scanner.Next() + } + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + err = s.store.Close() + c.Assert(err, IsNil) + s.OneByOneSuite.TearDownSuite(c) +} + +func (s *testTiclientSuite) beginTxn(c *C) *tikvTxn { + txn, err := s.store.Begin() + c.Assert(err, IsNil) + return txn.(*tikvTxn) +} + +func (s *testTiclientSuite) TestSingleKey(c *C) { + txn := s.beginTxn(c) + err := txn.Set(encodeKey(s.prefix, "key"), []byte("value")) + c.Assert(err, IsNil) + err = txn.LockKeys(context.Background(), new(kv.LockCtx), encodeKey(s.prefix, "key")) + c.Assert(err, IsNil) + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + + txn = s.beginTxn(c) + val, err := txn.Get(context.TODO(), encodeKey(s.prefix, "key")) + c.Assert(err, IsNil) + c.Assert(val, BytesEquals, []byte("value")) + + txn = s.beginTxn(c) + err = txn.Delete(encodeKey(s.prefix, "key")) + c.Assert(err, IsNil) + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) +} + +func (s *testTiclientSuite) TestMultiKeys(c *C) { + const keyNum = 100 + + txn := s.beginTxn(c) + for i := 0; i < keyNum; i++ { + err := txn.Set(encodeKey(s.prefix, s08d("key", i)), valueBytes(i)) + c.Assert(err, IsNil) + } + err := txn.Commit(context.Background()) + c.Assert(err, IsNil) + + txn = s.beginTxn(c) + for i := 0; i < keyNum; i++ { + val, err1 := txn.Get(context.TODO(), encodeKey(s.prefix, s08d("key", i))) + c.Assert(err1, IsNil) + c.Assert(val, BytesEquals, valueBytes(i)) + } + + txn = s.beginTxn(c) + for i := 0; i < keyNum; i++ { + err = txn.Delete(encodeKey(s.prefix, s08d("key", i))) + c.Assert(err, IsNil) + } + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) +} + +func (s *testTiclientSuite) TestNotExist(c *C) { + txn := s.beginTxn(c) + _, err := txn.Get(context.TODO(), encodeKey(s.prefix, "noSuchKey")) + c.Assert(err, NotNil) +} + +func (s *testTiclientSuite) TestLargeRequest(c *C) { + largeValue := make([]byte, 9*1024*1024) // 9M value. + txn := s.beginTxn(c) + err := txn.Set([]byte("key"), largeValue) + c.Assert(err, NotNil) + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + c.Assert(kv.IsTxnRetryableError(err), IsFalse) +} + +func encodeKey(prefix, s string) []byte { + return codec.EncodeBytes(nil, []byte(fmt.Sprintf("%s_%s", prefix, s))) +} + +func valueBytes(n int) []byte { + return []byte(fmt.Sprintf("value%d", n)) +} + +// s08d is for returning format string "%s%08d" to keep string sorted. +// e.g.: "0002" < "0011", otherwise "2" > "11" +func s08d(prefix string, n int) string { + return fmt.Sprintf("%s%08d", prefix, n) +} diff --git a/store/tikv/tikv_test.go b/store/tikv/tikv_test.go new file mode 100644 index 0000000..d4a5bfe --- /dev/null +++ b/store/tikv/tikv_test.go @@ -0,0 +1,43 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv + +import ( + . "github.com/pingcap/check" +) + +// OneByOneSuite is a suite, When with-tikv flag is true, there is only one storage, so the test suite have to run one by one. +type OneByOneSuite struct{} + +func (s *OneByOneSuite) SetUpSuite(c *C) { + if *withTiKV { + withTiKVGlobalLock.Lock() + } else { + withTiKVGlobalLock.RLock() + } +} + +func (s *OneByOneSuite) TearDownSuite(c *C) { + if *withTiKV { + withTiKVGlobalLock.Unlock() + } else { + withTiKVGlobalLock.RUnlock() + } +} + +type testTiKVSuite struct { + OneByOneSuite +} + +var _ = Suite(&testTiKVSuite{}) diff --git a/store/tikv/tikvrpc/tikvrpc.go b/store/tikv/tikvrpc/tikvrpc.go new file mode 100644 index 0000000..651f989 --- /dev/null +++ b/store/tikv/tikvrpc/tikvrpc.go @@ -0,0 +1,327 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikvrpc + +import ( + "context" + "fmt" + + "github.com/pingcap-incubator/tinykv/proto/pkg/coprocessor" + "github.com/pingcap-incubator/tinykv/proto/pkg/errorpb" + "github.com/pingcap-incubator/tinykv/proto/pkg/kvrpcpb" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/proto/pkg/tinykvpb" + "github.com/pingcap/errors" +) + +// CmdType represents the concrete request type in Request or response type in Response. +type CmdType uint16 + +// CmdType values. +const ( + CmdGet CmdType = 1 + iota + CmdScan + CmdPrewrite + CmdCommit + CmdBatchRollback + CmdResolveLock + CmdCheckTxnStatus + + CmdRawGet CmdType = 256 + iota + CmdRawPut + CmdRawDelete + CmdRawScan + + CmdCop CmdType = 512 + iota +) + +func (t CmdType) String() string { + switch t { + case CmdGet: + return "Get" + case CmdScan: + return "Scan" + case CmdPrewrite: + return "Prewrite" + case CmdCommit: + return "Commit" + case CmdBatchRollback: + return "BatchRollback" + case CmdResolveLock: + return "ResolveLock" + case CmdRawGet: + return "RawGet" + case CmdRawPut: + return "RawPut" + case CmdRawDelete: + return "RawDelete" + case CmdRawScan: + return "RawScan" + case CmdCop: + return "Cop" + case CmdCheckTxnStatus: + return "CheckTxnStatus" + } + return "Unknown" +} + +// Request wraps all kv/coprocessor requests. +type Request struct { + Type CmdType + req interface{} + kvrpcpb.Context + ReplicaReadSeed uint32 +} + +// NewRequest returns new kv rpc request. +func NewRequest(typ CmdType, pointer interface{}, ctxs ...kvrpcpb.Context) *Request { + if len(ctxs) > 0 { + return &Request{ + Type: typ, + req: pointer, + Context: ctxs[0], + } + } + return &Request{ + Type: typ, + req: pointer, + } +} + +// Get returns GetRequest in request. +func (req *Request) Get() *kvrpcpb.GetRequest { + return req.req.(*kvrpcpb.GetRequest) +} + +// Scan returns ScanRequest in request. +func (req *Request) Scan() *kvrpcpb.ScanRequest { + return req.req.(*kvrpcpb.ScanRequest) +} + +// Prewrite returns PrewriteRequest in request. +func (req *Request) Prewrite() *kvrpcpb.PrewriteRequest { + return req.req.(*kvrpcpb.PrewriteRequest) +} + +// Commit returns CommitRequest in request. +func (req *Request) Commit() *kvrpcpb.CommitRequest { + return req.req.(*kvrpcpb.CommitRequest) +} + +// BatchRollback returns BatchRollbackRequest in request. +func (req *Request) BatchRollback() *kvrpcpb.BatchRollbackRequest { + return req.req.(*kvrpcpb.BatchRollbackRequest) +} + +// ResolveLock returns ResolveLockRequest in request. +func (req *Request) ResolveLock() *kvrpcpb.ResolveLockRequest { + return req.req.(*kvrpcpb.ResolveLockRequest) +} + +// RawGet returns RawGetRequest in request. +func (req *Request) RawGet() *kvrpcpb.RawGetRequest { + return req.req.(*kvrpcpb.RawGetRequest) +} + +// RawPut returns RawPutRequest in request. +func (req *Request) RawPut() *kvrpcpb.RawPutRequest { + return req.req.(*kvrpcpb.RawPutRequest) +} + +// RawDelete returns PrewriteRequest in request. +func (req *Request) RawDelete() *kvrpcpb.RawDeleteRequest { + return req.req.(*kvrpcpb.RawDeleteRequest) +} + +// RawScan returns RawScanRequest in request. +func (req *Request) RawScan() *kvrpcpb.RawScanRequest { + return req.req.(*kvrpcpb.RawScanRequest) +} + +// Cop returns coprocessor request in request. +func (req *Request) Cop() *coprocessor.Request { + return req.req.(*coprocessor.Request) +} + +// CheckTxnStatus returns CheckTxnStatusRequest in request. +func (req *Request) CheckTxnStatus() *kvrpcpb.CheckTxnStatusRequest { + return req.req.(*kvrpcpb.CheckTxnStatusRequest) +} + +// Response wraps all kv/coprocessor responses. +type Response struct { + Resp interface{} +} + +// SetContext set the Context field for the given req to the specified ctx. +func SetContext(req *Request, region *metapb.Region, peer *metapb.Peer) error { + ctx := &req.Context + if region != nil { + ctx.RegionId = region.Id + ctx.RegionEpoch = region.RegionEpoch + } + ctx.Peer = peer + + switch req.Type { + case CmdGet: + req.Get().Context = ctx + case CmdScan: + req.Scan().Context = ctx + case CmdPrewrite: + req.Prewrite().Context = ctx + case CmdCommit: + req.Commit().Context = ctx + case CmdBatchRollback: + req.BatchRollback().Context = ctx + case CmdResolveLock: + req.ResolveLock().Context = ctx + case CmdRawGet: + req.RawGet().Context = ctx + case CmdRawPut: + req.RawPut().Context = ctx + case CmdRawDelete: + req.RawDelete().Context = ctx + case CmdRawScan: + req.RawScan().Context = ctx + case CmdCop: + req.Cop().Context = ctx + case CmdCheckTxnStatus: + req.CheckTxnStatus().Context = ctx + default: + return fmt.Errorf("invalid request type %v", req.Type) + } + return nil +} + +// GenRegionErrorResp returns corresponding Response with specified RegionError +// according to the given req. +func GenRegionErrorResp(req *Request, e *errorpb.Error) (*Response, error) { + var p interface{} + resp := &Response{} + switch req.Type { + case CmdGet: + p = &kvrpcpb.GetResponse{ + RegionError: e, + } + case CmdScan: + p = &kvrpcpb.ScanResponse{ + RegionError: e, + } + case CmdPrewrite: + p = &kvrpcpb.PrewriteResponse{ + RegionError: e, + } + case CmdCommit: + p = &kvrpcpb.CommitResponse{ + RegionError: e, + } + case CmdBatchRollback: + p = &kvrpcpb.BatchRollbackResponse{ + RegionError: e, + } + case CmdResolveLock: + p = &kvrpcpb.ResolveLockResponse{ + RegionError: e, + } + case CmdRawGet: + p = &kvrpcpb.RawGetResponse{ + RegionError: e, + } + case CmdRawPut: + p = &kvrpcpb.RawPutResponse{ + RegionError: e, + } + case CmdRawDelete: + p = &kvrpcpb.RawDeleteResponse{ + RegionError: e, + } + case CmdRawScan: + p = &kvrpcpb.RawScanResponse{ + RegionError: e, + } + case CmdCop: + p = &coprocessor.Response{ + RegionError: e, + } + case CmdCheckTxnStatus: + p = &kvrpcpb.CheckTxnStatusResponse{ + RegionError: e, + } + default: + return nil, fmt.Errorf("invalid request type %v", req.Type) + } + resp.Resp = p + return resp, nil +} + +type getRegionError interface { + GetRegionError() *errorpb.Error +} + +// GetRegionError returns the RegionError of the underlying concrete response. +func (resp *Response) GetRegionError() (*errorpb.Error, error) { + if resp.Resp == nil { + return nil, nil + } + err, ok := resp.Resp.(getRegionError) + if !ok { + return nil, fmt.Errorf("invalid response type %v", resp) + } + return err.GetRegionError(), nil +} + +// CallRPC launches a rpc call. +// ch is needed to implement timeout for coprocessor streaing, the stream object's +// cancel function will be sent to the channel, together with a lease checked by a background goroutine. +func CallRPC(ctx context.Context, client tinykvpb.TinyKvClient, req *Request) (*Response, error) { + resp := &Response{} + var err error + switch req.Type { + case CmdGet: + resp.Resp, err = client.KvGet(ctx, req.Get()) + case CmdScan: + resp.Resp, err = client.KvScan(ctx, req.Scan()) + case CmdPrewrite: + resp.Resp, err = client.KvPrewrite(ctx, req.Prewrite()) + case CmdCommit: + resp.Resp, err = client.KvCommit(ctx, req.Commit()) + case CmdBatchRollback: + resp.Resp, err = client.KvBatchRollback(ctx, req.BatchRollback()) + case CmdResolveLock: + resp.Resp, err = client.KvResolveLock(ctx, req.ResolveLock()) + case CmdRawGet: + resp.Resp, err = client.RawGet(ctx, req.RawGet()) + case CmdRawPut: + resp.Resp, err = client.RawPut(ctx, req.RawPut()) + case CmdRawDelete: + resp.Resp, err = client.RawDelete(ctx, req.RawDelete()) + case CmdRawScan: + resp.Resp, err = client.RawScan(ctx, req.RawScan()) + case CmdCop: + resp.Resp, err = client.Coprocessor(ctx, req.Cop()) + case CmdCheckTxnStatus: + resp.Resp, err = client.KvCheckTxnStatus(ctx, req.CheckTxnStatus()) + default: + return nil, errors.Errorf("invalid request type: %v", req.Type) + } + if err != nil { + return nil, errors.Trace(err) + } + return resp, nil +} + +// Lease is used to implement grpc stream timeout. +type Lease struct { + Cancel context.CancelFunc +} diff --git a/store/tikv/txn.go b/store/tikv/txn.go new file mode 100644 index 0000000..00125a4 --- /dev/null +++ b/store/tikv/txn.go @@ -0,0 +1,251 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tikv + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/kv" + + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +var ( + _ kv.Transaction = (*tikvTxn)(nil) +) + +// tikvTxn implements kv.Transaction. +type tikvTxn struct { + snapshot *tikvSnapshot + us kv.UnionStore + store *tikvStore // for connection to region. + startTS uint64 + startTime time.Time // Monotonic timestamp for recording txn time consuming. + commitTS uint64 + lockKeys [][]byte + lockedMap map[string]struct{} + mu sync.Mutex // For thread-safe LockKeys function. + setCnt int64 + vars *kv.Variables + committer *twoPhaseCommitter + + valid bool + dirty bool +} + +func newTiKVTxn(store *tikvStore) (*tikvTxn, error) { + bo := NewBackoffer(context.Background(), tsoMaxBackoff) + startTS, err := store.getTimestampWithRetry(bo) + if err != nil { + return nil, errors.Trace(err) + } + return newTikvTxnWithStartTS(store, startTS) +} + +// newTikvTxnWithStartTS creates a txn with startTS. +func newTikvTxnWithStartTS(store *tikvStore, startTS uint64) (*tikvTxn, error) { + ver := kv.NewVersion(startTS) + snapshot := newTiKVSnapshot(store, ver) + return &tikvTxn{ + snapshot: snapshot, + us: kv.NewUnionStore(snapshot), + lockedMap: map[string]struct{}{}, + store: store, + startTS: startTS, + startTime: time.Now(), + valid: true, + vars: kv.DefaultVars, + }, nil +} + +func (txn *tikvTxn) SetVars(vars *kv.Variables) { + txn.vars = vars + txn.snapshot.vars = vars +} + +// SetCap sets the transaction's MemBuffer capability, to reduce memory allocations. +func (txn *tikvTxn) SetCap(cap int) { + txn.us.SetCap(cap) +} + +// Reset reset tikvTxn's membuf. +func (txn *tikvTxn) Reset() { + txn.us.Reset() +} + +// Get implements transaction interface. +func (txn *tikvTxn) Get(ctx context.Context, k kv.Key) ([]byte, error) { + ret, err := txn.us.Get(ctx, k) + if kv.IsErrNotFound(err) { + return nil, err + } + if err != nil { + return nil, errors.Trace(err) + } + + err = txn.store.CheckVisibility(txn.startTS) + if err != nil { + return nil, errors.Trace(err) + } + + return ret, nil +} + +func (txn *tikvTxn) Set(k kv.Key, v []byte) error { + txn.setCnt++ + + txn.dirty = true + return txn.us.Set(k, v) +} + +func (txn *tikvTxn) String() string { + return fmt.Sprintf("%d", txn.StartTS()) +} + +func (txn *tikvTxn) Iter(k kv.Key, upperBound kv.Key) (kv.Iterator, error) { + return txn.us.Iter(k, upperBound) +} + +// IterReverse creates a reversed Iterator positioned on the first entry which key is less than k. +func (txn *tikvTxn) IterReverse(k kv.Key) (kv.Iterator, error) { + return txn.us.IterReverse(k) +} + +func (txn *tikvTxn) Delete(k kv.Key) error { + txn.dirty = true + return txn.us.Delete(k) +} + +func (txn *tikvTxn) SetOption(opt kv.Option, val interface{}) { + txn.us.SetOption(opt, val) + switch opt { + case kv.SyncLog: + txn.snapshot.syncLog = val.(bool) + case kv.KeyOnly: + txn.snapshot.keyOnly = val.(bool) + case kv.SnapshotTS: + txn.snapshot.setSnapshotTS(val.(uint64)) + } +} + +func (txn *tikvTxn) DelOption(opt kv.Option) { + txn.us.DelOption(opt) +} + +func (txn *tikvTxn) Commit(ctx context.Context) error { + if !txn.valid { + return kv.ErrInvalidTxn + } + defer txn.close() + + failpoint.Inject("mockCommitError", func(val failpoint.Value) { + if val.(bool) && kv.IsMockCommitErrorEnable() { + kv.MockCommitErrorDisable() + failpoint.Return(errors.New("mock commit error")) + } + }) + + // connID is used for log. + var connID uint64 + val := ctx.Value(sessionctx.ConnID) + if val != nil { + connID = val.(uint64) + } + + var err error + committer := txn.committer + if committer == nil { + committer, err = newTwoPhaseCommitter(txn, connID) + if err != nil { + return errors.Trace(err) + } + } + if err := committer.initKeysAndMutations(); err != nil { + return errors.Trace(err) + } + if len(committer.keys) == 0 { + return nil + } + + err = committer.execute(ctx) + return errors.Trace(err) +} + +func (txn *tikvTxn) close() { + txn.valid = false +} + +func (txn *tikvTxn) Rollback() error { + if !txn.valid { + return kv.ErrInvalidTxn + } + txn.close() + logutil.BgLogger().Debug("[kv] rollback txn", zap.Uint64("txnStartTS", txn.StartTS())) + return nil +} + +// lockWaitTime in ms, except that kv.LockAlwaysWait(0) means always wait lock, kv.LockNowait(-1) means nowait lock +func (txn *tikvTxn) LockKeys(ctx context.Context, lockCtx *kv.LockCtx, keysInput ...kv.Key) error { + // Exclude keys that are already locked. + keys := make([][]byte, 0, len(keysInput)) + txn.mu.Lock() + for _, key := range keysInput { + if _, ok := txn.lockedMap[string(key)]; !ok { + keys = append(keys, key) + } + } + txn.mu.Unlock() + if len(keys) == 0 { + return nil + } + txn.mu.Lock() + txn.lockKeys = append(txn.lockKeys, keys...) + for _, key := range keys { + txn.lockedMap[string(key)] = struct{}{} + } + txn.dirty = true + txn.mu.Unlock() + return nil +} + +func (txn *tikvTxn) IsReadOnly() bool { + return !txn.dirty +} + +func (txn *tikvTxn) StartTS() uint64 { + return txn.startTS +} + +func (txn *tikvTxn) Valid() bool { + return txn.valid +} + +func (txn *tikvTxn) Len() int { + return txn.us.Len() +} + +func (txn *tikvTxn) Size() int { + return txn.us.Size() +} + +func (txn *tikvTxn) GetMemBuffer() kv.MemBuffer { + return txn.us.GetMemBuffer() +} diff --git a/structure/hash.go b/structure/hash.go new file mode 100644 index 0000000..9d19a05 --- /dev/null +++ b/structure/hash.go @@ -0,0 +1,353 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package structure + +import ( + "bytes" + "context" + "encoding/binary" + "strconv" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" +) + +// HashPair is the pair for (field, value) in a hash. +type HashPair struct { + Field []byte + Value []byte +} + +type hashMeta struct { + FieldCount int64 +} + +func (meta hashMeta) Value() []byte { + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf[0:8], uint64(meta.FieldCount)) + return buf +} + +func (meta hashMeta) IsEmpty() bool { + return meta.FieldCount <= 0 +} + +// HSet sets the string value of a hash field. +func (t *TxStructure) HSet(key []byte, field []byte, value []byte) error { + if t.readWriter == nil { + return ErrWriteOnSnapshot + } + return t.updateHash(key, field, func([]byte) ([]byte, error) { + return value, nil + }) +} + +// HGet gets the value of a hash field. +func (t *TxStructure) HGet(key []byte, field []byte) ([]byte, error) { + dataKey := t.encodeHashDataKey(key, field) + value, err := t.reader.Get(context.TODO(), dataKey) + if kv.ErrNotExist.Equal(err) { + err = nil + } + return value, errors.Trace(err) +} + +func (t *TxStructure) hashFieldIntegerVal(val int64) []byte { + return []byte(strconv.FormatInt(val, 10)) +} + +// EncodeHashAutoIDKeyValue returns the hash key-value generated by the key and the field +func (t *TxStructure) EncodeHashAutoIDKeyValue(key []byte, field []byte, val int64) (k, v []byte) { + return t.encodeHashDataKey(key, field), t.hashFieldIntegerVal(val) +} + +// HInc increments the integer value of a hash field, by step, returns +// the value after the increment. +func (t *TxStructure) HInc(key []byte, field []byte, step int64) (int64, error) { + if t.readWriter == nil { + return 0, ErrWriteOnSnapshot + } + base := int64(0) + err := t.updateHash(key, field, func(oldValue []byte) ([]byte, error) { + if oldValue != nil { + var err error + base, err = strconv.ParseInt(string(oldValue), 10, 64) + if err != nil { + return nil, errors.Trace(err) + } + } + base += step + return t.hashFieldIntegerVal(base), nil + }) + + return base, errors.Trace(err) +} + +// HGetInt64 gets int64 value of a hash field. +func (t *TxStructure) HGetInt64(key []byte, field []byte) (int64, error) { + value, err := t.HGet(key, field) + if err != nil || value == nil { + return 0, errors.Trace(err) + } + + var n int64 + n, err = strconv.ParseInt(string(value), 10, 64) + return n, errors.Trace(err) +} + +func (t *TxStructure) updateHash(key []byte, field []byte, fn func(oldValue []byte) ([]byte, error)) error { + dataKey := t.encodeHashDataKey(key, field) + oldValue, err := t.loadHashValue(dataKey) + if err != nil { + return errors.Trace(err) + } + + newValue, err := fn(oldValue) + if err != nil { + return errors.Trace(err) + } + + // Check if new value is equal to old value. + if bytes.Equal(oldValue, newValue) { + return nil + } + + if err = t.readWriter.Set(dataKey, newValue); err != nil { + return errors.Trace(err) + } + + metaKey := t.encodeHashMetaKey(key) + meta, err := t.loadHashMeta(metaKey) + if err != nil { + return errors.Trace(err) + } + + if oldValue == nil { + meta.FieldCount++ + if err = t.readWriter.Set(metaKey, meta.Value()); err != nil { + return errors.Trace(err) + } + } + + return nil +} + +// HLen gets the number of fields in a hash. +func (t *TxStructure) HLen(key []byte) (int64, error) { + metaKey := t.encodeHashMetaKey(key) + meta, err := t.loadHashMeta(metaKey) + if err != nil { + return 0, errors.Trace(err) + } + return meta.FieldCount, nil +} + +// HDel deletes one or more hash fields. +func (t *TxStructure) HDel(key []byte, fields ...[]byte) error { + if t.readWriter == nil { + return ErrWriteOnSnapshot + } + metaKey := t.encodeHashMetaKey(key) + meta, err := t.loadHashMeta(metaKey) + if err != nil || meta.IsEmpty() { + return errors.Trace(err) + } + + var value []byte + for _, field := range fields { + dataKey := t.encodeHashDataKey(key, field) + + value, err = t.loadHashValue(dataKey) + if err != nil { + return errors.Trace(err) + } + + if value != nil { + if err = t.readWriter.Delete(dataKey); err != nil { + return errors.Trace(err) + } + + meta.FieldCount-- + } + } + + if meta.IsEmpty() { + err = t.readWriter.Delete(metaKey) + } else { + err = t.readWriter.Set(metaKey, meta.Value()) + } + + return errors.Trace(err) +} + +// HKeys gets all the fields in a hash. +func (t *TxStructure) HKeys(key []byte) ([][]byte, error) { + var keys [][]byte + err := t.iterateHash(key, func(field []byte, value []byte) error { + keys = append(keys, append([]byte{}, field...)) + return nil + }) + + return keys, errors.Trace(err) +} + +// HGetAll gets all the fields and values in a hash. +func (t *TxStructure) HGetAll(key []byte) ([]HashPair, error) { + var res []HashPair + err := t.iterateHash(key, func(field []byte, value []byte) error { + pair := HashPair{ + Field: append([]byte{}, field...), + Value: append([]byte{}, value...), + } + res = append(res, pair) + return nil + }) + + return res, errors.Trace(err) +} + +// HGetLastN gets latest N fields and values in hash. +func (t *TxStructure) HGetLastN(key []byte, num int) ([]HashPair, error) { + res := make([]HashPair, 0, num) + err := t.iterReverseHash(key, func(field []byte, value []byte) (bool, error) { + pair := HashPair{ + Field: append([]byte{}, field...), + Value: append([]byte{}, value...), + } + res = append(res, pair) + if len(res) >= num { + return false, nil + } + return true, nil + }) + return res, errors.Trace(err) +} + +// HClear removes the hash value of the key. +func (t *TxStructure) HClear(key []byte) error { + metaKey := t.encodeHashMetaKey(key) + meta, err := t.loadHashMeta(metaKey) + if err != nil || meta.IsEmpty() { + return errors.Trace(err) + } + + err = t.iterateHash(key, func(field []byte, value []byte) error { + k := t.encodeHashDataKey(key, field) + return errors.Trace(t.readWriter.Delete(k)) + }) + + if err != nil { + return errors.Trace(err) + } + + return errors.Trace(t.readWriter.Delete(metaKey)) +} + +func (t *TxStructure) iterateHash(key []byte, fn func(k []byte, v []byte) error) error { + dataPrefix := t.hashDataKeyPrefix(key) + it, err := t.reader.Iter(dataPrefix, dataPrefix.PrefixNext()) + if err != nil { + return errors.Trace(err) + } + + var field []byte + + for it.Valid() { + if !it.Key().HasPrefix(dataPrefix) { + break + } + + _, field, err = t.decodeHashDataKey(it.Key()) + if err != nil { + return errors.Trace(err) + } + + if err = fn(field, it.Value()); err != nil { + return errors.Trace(err) + } + + err = it.Next() + if err != nil { + return errors.Trace(err) + } + } + + return nil +} + +func (t *TxStructure) iterReverseHash(key []byte, fn func(k []byte, v []byte) (bool, error)) error { + dataPrefix := t.hashDataKeyPrefix(key) + it, err := t.reader.IterReverse(dataPrefix.PrefixNext()) + if err != nil { + return errors.Trace(err) + } + + var field []byte + for it.Valid() { + if !it.Key().HasPrefix(dataPrefix) { + break + } + + _, field, err = t.decodeHashDataKey(it.Key()) + if err != nil { + return errors.Trace(err) + } + + more, err := fn(field, it.Value()) + if !more || err != nil { + return errors.Trace(err) + } + + err = it.Next() + if err != nil { + return errors.Trace(err) + } + } + return nil +} + +func (t *TxStructure) loadHashMeta(metaKey []byte) (hashMeta, error) { + v, err := t.reader.Get(context.TODO(), metaKey) + if kv.ErrNotExist.Equal(err) { + err = nil + } + if err != nil { + return hashMeta{}, errors.Trace(err) + } + + meta := hashMeta{FieldCount: 0} + if v == nil { + return meta, nil + } + + if len(v) != 8 { + return meta, ErrInvalidListMetaData + } + + meta.FieldCount = int64(binary.BigEndian.Uint64(v[0:8])) + return meta, nil +} + +func (t *TxStructure) loadHashValue(dataKey []byte) ([]byte, error) { + v, err := t.reader.Get(context.TODO(), dataKey) + if kv.ErrNotExist.Equal(err) { + err = nil + v = nil + } + if err != nil { + return nil, errors.Trace(err) + } + + return v, nil +} diff --git a/structure/list.go b/structure/list.go new file mode 100644 index 0000000..c55109a --- /dev/null +++ b/structure/list.go @@ -0,0 +1,245 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package structure + +import ( + "context" + "encoding/binary" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" +) + +type listMeta struct { + LIndex int64 + RIndex int64 +} + +func (meta listMeta) Value() []byte { + buf := make([]byte, 16) + binary.BigEndian.PutUint64(buf[0:8], uint64(meta.LIndex)) + binary.BigEndian.PutUint64(buf[8:16], uint64(meta.RIndex)) + return buf +} + +func (meta listMeta) IsEmpty() bool { + return meta.LIndex >= meta.RIndex +} + +// LPush prepends one or multiple values to a list. +func (t *TxStructure) LPush(key []byte, values ...[]byte) error { + return t.listPush(key, true, values...) +} + +// RPush appends one or multiple values to a list. +func (t *TxStructure) RPush(key []byte, values ...[]byte) error { + return t.listPush(key, false, values...) +} + +func (t *TxStructure) listPush(key []byte, left bool, values ...[]byte) error { + if t.readWriter == nil { + return ErrWriteOnSnapshot + } + if len(values) == 0 { + return nil + } + + metaKey := t.encodeListMetaKey(key) + meta, err := t.loadListMeta(metaKey) + if err != nil { + return errors.Trace(err) + } + + var index int64 + for _, v := range values { + if left { + meta.LIndex-- + index = meta.LIndex + } else { + index = meta.RIndex + meta.RIndex++ + } + + dataKey := t.encodeListDataKey(key, index) + if err = t.readWriter.Set(dataKey, v); err != nil { + return errors.Trace(err) + } + } + + return t.readWriter.Set(metaKey, meta.Value()) +} + +// LPop removes and gets the first element in a list. +func (t *TxStructure) LPop(key []byte) ([]byte, error) { + return t.listPop(key, true) +} + +// RPop removes and gets the last element in a list. +func (t *TxStructure) RPop(key []byte) ([]byte, error) { + return t.listPop(key, false) +} + +func (t *TxStructure) listPop(key []byte, left bool) ([]byte, error) { + if t.readWriter == nil { + return nil, ErrWriteOnSnapshot + } + metaKey := t.encodeListMetaKey(key) + meta, err := t.loadListMeta(metaKey) + if err != nil || meta.IsEmpty() { + return nil, errors.Trace(err) + } + + var index int64 + if left { + index = meta.LIndex + meta.LIndex++ + } else { + meta.RIndex-- + index = meta.RIndex + } + + dataKey := t.encodeListDataKey(key, index) + + var data []byte + data, err = t.reader.Get(context.TODO(), dataKey) + if err != nil { + return nil, errors.Trace(err) + } + + if err = t.readWriter.Delete(dataKey); err != nil { + return nil, errors.Trace(err) + } + + if !meta.IsEmpty() { + err = t.readWriter.Set(metaKey, meta.Value()) + } else { + err = t.readWriter.Delete(metaKey) + } + + return data, errors.Trace(err) +} + +// LLen gets the length of a list. +func (t *TxStructure) LLen(key []byte) (int64, error) { + metaKey := t.encodeListMetaKey(key) + meta, err := t.loadListMeta(metaKey) + return meta.RIndex - meta.LIndex, errors.Trace(err) +} + +// LGetAll gets all elements of this list in order from right to left. +func (t *TxStructure) LGetAll(key []byte) ([][]byte, error) { + metaKey := t.encodeListMetaKey(key) + meta, err := t.loadListMeta(metaKey) + if err != nil || meta.IsEmpty() { + return nil, errors.Trace(err) + } + + length := int(meta.RIndex - meta.LIndex) + elements := make([][]byte, 0, length) + for index := meta.RIndex - 1; index >= meta.LIndex; index-- { + e, err := t.reader.Get(context.TODO(), t.encodeListDataKey(key, index)) + if err != nil { + return nil, errors.Trace(err) + } + elements = append(elements, e) + } + return elements, nil +} + +// LIndex gets an element from a list by its index. +func (t *TxStructure) LIndex(key []byte, index int64) ([]byte, error) { + metaKey := t.encodeListMetaKey(key) + meta, err := t.loadListMeta(metaKey) + if err != nil || meta.IsEmpty() { + return nil, errors.Trace(err) + } + + index = adjustIndex(index, meta.LIndex, meta.RIndex) + + if index >= meta.LIndex && index < meta.RIndex { + return t.reader.Get(context.TODO(), t.encodeListDataKey(key, index)) + } + return nil, nil +} + +// LSet updates an element in the list by its index. +func (t *TxStructure) LSet(key []byte, index int64, value []byte) error { + if t.readWriter == nil { + return ErrWriteOnSnapshot + } + metaKey := t.encodeListMetaKey(key) + meta, err := t.loadListMeta(metaKey) + if err != nil || meta.IsEmpty() { + return errors.Trace(err) + } + + index = adjustIndex(index, meta.LIndex, meta.RIndex) + + if index >= meta.LIndex && index < meta.RIndex { + return t.readWriter.Set(t.encodeListDataKey(key, index), value) + } + return ErrInvalidListIndex.GenWithStack("invalid list index %d", index) +} + +// LClear removes the list of the key. +func (t *TxStructure) LClear(key []byte) error { + if t.readWriter == nil { + return ErrWriteOnSnapshot + } + metaKey := t.encodeListMetaKey(key) + meta, err := t.loadListMeta(metaKey) + if err != nil || meta.IsEmpty() { + return errors.Trace(err) + } + + for index := meta.LIndex; index < meta.RIndex; index++ { + dataKey := t.encodeListDataKey(key, index) + if err = t.readWriter.Delete(dataKey); err != nil { + return errors.Trace(err) + } + } + + return t.readWriter.Delete(metaKey) +} + +func (t *TxStructure) loadListMeta(metaKey []byte) (listMeta, error) { + v, err := t.reader.Get(context.TODO(), metaKey) + if kv.ErrNotExist.Equal(err) { + err = nil + } + if err != nil { + return listMeta{}, errors.Trace(err) + } + + meta := listMeta{0, 0} + if v == nil { + return meta, nil + } + + if len(v) != 16 { + return meta, ErrInvalidListMetaData + } + + meta.LIndex = int64(binary.BigEndian.Uint64(v[0:8])) + meta.RIndex = int64(binary.BigEndian.Uint64(v[8:16])) + return meta, nil +} + +func adjustIndex(index int64, min, max int64) int64 { + if index >= 0 { + return index + min + } + + return index + max +} diff --git a/structure/string.go b/structure/string.go new file mode 100644 index 0000000..57a1fa4 --- /dev/null +++ b/structure/string.go @@ -0,0 +1,80 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package structure + +import ( + "context" + "strconv" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" +) + +// Set sets the string value of the key. +func (t *TxStructure) Set(key []byte, value []byte) error { + if t.readWriter == nil { + return ErrWriteOnSnapshot + } + ek := t.encodeStringDataKey(key) + return t.readWriter.Set(ek, value) +} + +// Get gets the string value of a key. +func (t *TxStructure) Get(key []byte) ([]byte, error) { + ek := t.encodeStringDataKey(key) + value, err := t.reader.Get(context.TODO(), ek) + if kv.ErrNotExist.Equal(err) { + err = nil + } + return value, errors.Trace(err) +} + +// GetInt64 gets the int64 value of a key. +func (t *TxStructure) GetInt64(key []byte) (int64, error) { + v, err := t.Get(key) + if err != nil || v == nil { + return 0, errors.Trace(err) + } + + n, err := strconv.ParseInt(string(v), 10, 64) + return n, errors.Trace(err) +} + +// Inc increments the integer value of a key by step, returns +// the value after the increment. +func (t *TxStructure) Inc(key []byte, step int64) (int64, error) { + if t.readWriter == nil { + return 0, ErrWriteOnSnapshot + } + ek := t.encodeStringDataKey(key) + // txn Inc will lock this key, so we don't lock it here. + n, err := kv.IncInt64(t.readWriter, ek, step) + if kv.ErrNotExist.Equal(err) { + err = nil + } + return n, errors.Trace(err) +} + +// Clear removes the string value of the key. +func (t *TxStructure) Clear(key []byte) error { + if t.readWriter == nil { + return ErrWriteOnSnapshot + } + ek := t.encodeStringDataKey(key) + err := t.readWriter.Delete(ek) + if kv.ErrNotExist.Equal(err) { + err = nil + } + return errors.Trace(err) +} diff --git a/structure/structure.go b/structure/structure.go new file mode 100644 index 0000000..917f427 --- /dev/null +++ b/structure/structure.go @@ -0,0 +1,59 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package structure + +import ( + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" +) + +var ( + // ErrInvalidHashKeyFlag used by structure + ErrInvalidHashKeyFlag = terror.ClassStructure.New(mysql.ErrInvalidHashKeyFlag, mysql.MySQLErrName[mysql.ErrInvalidHashKeyFlag]) + // ErrInvalidListIndex used by structure + ErrInvalidListIndex = terror.ClassStructure.New(mysql.ErrInvalidListIndex, mysql.MySQLErrName[mysql.ErrInvalidListIndex]) + // ErrInvalidListMetaData used by structure + ErrInvalidListMetaData = terror.ClassStructure.New(mysql.ErrInvalidListMetaData, mysql.MySQLErrName[mysql.ErrInvalidListMetaData]) + // ErrWriteOnSnapshot used by structure + ErrWriteOnSnapshot = terror.ClassStructure.New(mysql.ErrWriteOnSnapshot, mysql.MySQLErrName[mysql.ErrWriteOnSnapshot]) +) + +// NewStructure creates a TxStructure with Retriever, RetrieverMutator and key prefix. +func NewStructure(reader kv.Retriever, readWriter kv.RetrieverMutator, prefix []byte) *TxStructure { + return &TxStructure{ + reader: reader, + readWriter: readWriter, + prefix: prefix, + } +} + +// TxStructure supports some simple data structures like string, hash, list, etc... and +// you can use these in a transaction. +type TxStructure struct { + reader kv.Retriever + readWriter kv.RetrieverMutator + prefix []byte +} + +func init() { + // Register terror to mysql error map. + mySQLErrCodes := map[terror.ErrCode]uint16{ + mysql.ErrInvalidHashKeyFlag: mysql.ErrInvalidHashKeyFlag, + mysql.ErrInvalidListIndex: mysql.ErrInvalidListIndex, + mysql.ErrInvalidListMetaData: mysql.ErrInvalidListMetaData, + mysql.ErrWriteOnSnapshot: mysql.ErrWriteOnSnapshot, + } + terror.ErrClassToMySQLCodes[terror.ClassStructure] = mySQLErrCodes +} diff --git a/structure/structure_test.go b/structure/structure_test.go new file mode 100644 index 0000000..dd2bba2 --- /dev/null +++ b/structure/structure_test.go @@ -0,0 +1,412 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package structure_test + +import ( + "context" + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/structure" + "github.com/pingcap/tidb/util/testleak" +) + +func TestTxStructure(t *testing.T) { + CustomVerboseFlag = true + TestingT(t) +} + +var _ = Suite(&testTxStructureSuite{}) + +type testTxStructureSuite struct { + store kv.Storage +} + +func (s *testTxStructureSuite) SetUpSuite(c *C) { + testleak.BeforeTest() + store, err := mockstore.NewMockTikvStore() + c.Assert(err, IsNil) + s.store = store +} + +func (s *testTxStructureSuite) TearDownSuite(c *C) { + err := s.store.Close() + c.Assert(err, IsNil) + testleak.AfterTest(c)() +} + +func (s *testTxStructureSuite) TestString(c *C) { + txn, err := s.store.Begin() + c.Assert(err, IsNil) + defer txn.Rollback() + + tx := structure.NewStructure(txn, txn, []byte{0x00}) + + key := []byte("a") + value := []byte("1") + err = tx.Set(key, value) + c.Assert(err, IsNil) + + v, err := tx.Get(key) + c.Assert(err, IsNil) + c.Assert(v, DeepEquals, value) + + n, err := tx.Inc(key, 1) + c.Assert(err, IsNil) + c.Assert(n, Equals, int64(2)) + + v, err = tx.Get(key) + c.Assert(err, IsNil) + c.Assert(v, DeepEquals, []byte("2")) + + n, err = tx.GetInt64(key) + c.Assert(err, IsNil) + c.Assert(n, Equals, int64(2)) + + err = tx.Clear(key) + c.Assert(err, IsNil) + + v, err = tx.Get(key) + c.Assert(err, IsNil) + c.Assert(v, IsNil) + + tx1 := structure.NewStructure(txn, nil, []byte{0x01}) + err = tx1.Set(key, value) + c.Assert(err, NotNil) + + _, err = tx1.Inc(key, 1) + c.Assert(err, NotNil) + + err = tx1.Clear(key) + c.Assert(err, NotNil) + + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) +} + +func (s *testTxStructureSuite) TestList(c *C) { + txn, err := s.store.Begin() + c.Assert(err, IsNil) + defer txn.Rollback() + + tx := structure.NewStructure(txn, txn, []byte{0x00}) + + key := []byte("a") + err = tx.LPush(key, []byte("3"), []byte("2"), []byte("1")) + c.Assert(err, IsNil) + + // Test LGetAll. + err = tx.LPush(key, []byte("11")) + c.Assert(err, IsNil) + values, err := tx.LGetAll(key) + c.Assert(err, IsNil) + c.Assert(values, DeepEquals, [][]byte{[]byte("3"), []byte("2"), []byte("1"), []byte("11")}) + value, err := tx.LPop(key) + c.Assert(err, IsNil) + c.Assert(value, DeepEquals, []byte("11")) + + l, err := tx.LLen(key) + c.Assert(err, IsNil) + c.Assert(l, Equals, int64(3)) + + value, err = tx.LIndex(key, 1) + c.Assert(err, IsNil) + c.Assert(value, DeepEquals, []byte("2")) + + err = tx.LSet(key, 1, []byte("4")) + c.Assert(err, IsNil) + + value, err = tx.LIndex(key, 1) + c.Assert(err, IsNil) + c.Assert(value, DeepEquals, []byte("4")) + + err = tx.LSet(key, 1, []byte("2")) + c.Assert(err, IsNil) + + err = tx.LSet(key, 100, []byte("2")) + c.Assert(err, NotNil) + + value, err = tx.LIndex(key, -1) + c.Assert(err, IsNil) + c.Assert(value, DeepEquals, []byte("3")) + + value, err = tx.LPop(key) + c.Assert(err, IsNil) + c.Assert(value, DeepEquals, []byte("1")) + + l, err = tx.LLen(key) + c.Assert(err, IsNil) + c.Assert(l, Equals, int64(2)) + + err = tx.RPush(key, []byte("4")) + c.Assert(err, IsNil) + + l, err = tx.LLen(key) + c.Assert(err, IsNil) + c.Assert(l, Equals, int64(3)) + + value, err = tx.LIndex(key, -1) + c.Assert(err, IsNil) + c.Assert(value, DeepEquals, []byte("4")) + + value, err = tx.RPop(key) + c.Assert(err, IsNil) + c.Assert(value, DeepEquals, []byte("4")) + + value, err = tx.RPop(key) + c.Assert(err, IsNil) + c.Assert(value, DeepEquals, []byte("3")) + + value, err = tx.RPop(key) + c.Assert(err, IsNil) + c.Assert(value, DeepEquals, []byte("2")) + + l, err = tx.LLen(key) + c.Assert(err, IsNil) + c.Assert(l, Equals, int64(0)) + + err = tx.LPush(key, []byte("1")) + c.Assert(err, IsNil) + + err = tx.LClear(key) + c.Assert(err, IsNil) + + l, err = tx.LLen(key) + c.Assert(err, IsNil) + c.Assert(l, Equals, int64(0)) + + tx1 := structure.NewStructure(txn, nil, []byte{0x01}) + err = tx1.LPush(key, []byte("1")) + c.Assert(err, NotNil) + + _, err = tx1.RPop(key) + c.Assert(err, NotNil) + + err = tx1.LSet(key, 1, []byte("2")) + c.Assert(err, NotNil) + + err = tx1.LClear(key) + c.Assert(err, NotNil) + + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) +} + +func (s *testTxStructureSuite) TestHash(c *C) { + txn, err := s.store.Begin() + c.Assert(err, IsNil) + defer txn.Rollback() + + tx := structure.NewStructure(txn, txn, []byte{0x00}) + + key := []byte("a") + + tx.EncodeHashAutoIDKeyValue(key, key, 5) + + err = tx.HSet(key, []byte("1"), []byte("1")) + c.Assert(err, IsNil) + + err = tx.HSet(key, []byte("2"), []byte("2")) + c.Assert(err, IsNil) + + l, err := tx.HLen(key) + c.Assert(err, IsNil) + c.Assert(l, Equals, int64(2)) + + value, err := tx.HGet(key, []byte("1")) + c.Assert(err, IsNil) + c.Assert(value, DeepEquals, []byte("1")) + + value, err = tx.HGet(key, []byte("fake")) + c.Assert(err, IsNil) + c.Assert(value, IsNil) + + keys, err := tx.HKeys(key) + c.Assert(err, IsNil) + c.Assert(keys, DeepEquals, [][]byte{[]byte("1"), []byte("2")}) + + res, err := tx.HGetAll(key) + c.Assert(err, IsNil) + c.Assert(res, DeepEquals, []structure.HashPair{ + {Field: []byte("1"), Value: []byte("1")}, + {Field: []byte("2"), Value: []byte("2")}}) + + res, err = tx.HGetLastN(key, 1) + c.Assert(err, IsNil) + c.Assert(res, DeepEquals, []structure.HashPair{ + {Field: []byte("2"), Value: []byte("2")}}) + + res, err = tx.HGetLastN(key, 2) + c.Assert(err, IsNil) + c.Assert(res, DeepEquals, []structure.HashPair{ + {Field: []byte("2"), Value: []byte("2")}, + {Field: []byte("1"), Value: []byte("1")}}) + + err = tx.HDel(key, []byte("1")) + c.Assert(err, IsNil) + + value, err = tx.HGet(key, []byte("1")) + c.Assert(err, IsNil) + c.Assert(value, IsNil) + + l, err = tx.HLen(key) + c.Assert(err, IsNil) + c.Assert(l, Equals, int64(1)) + + n, err := tx.HInc(key, []byte("1"), 1) + c.Assert(err, IsNil) + c.Assert(n, Equals, int64(1)) + + l, err = tx.HLen(key) + c.Assert(err, IsNil) + c.Assert(l, Equals, int64(2)) + + // Test set new value which equals to old value. + value, err = tx.HGet(key, []byte("1")) + c.Assert(err, IsNil) + c.Assert(value, DeepEquals, []byte("1")) + + err = tx.HSet(key, []byte("1"), []byte("1")) + c.Assert(err, IsNil) + + value, err = tx.HGet(key, []byte("1")) + c.Assert(err, IsNil) + c.Assert(value, DeepEquals, []byte("1")) + + l, err = tx.HLen(key) + c.Assert(err, IsNil) + c.Assert(l, Equals, int64(2)) + + n, err = tx.HInc(key, []byte("1"), 1) + c.Assert(err, IsNil) + c.Assert(n, Equals, int64(2)) + + l, err = tx.HLen(key) + c.Assert(err, IsNil) + c.Assert(l, Equals, int64(2)) + + n, err = tx.HInc(key, []byte("1"), 1) + c.Assert(err, IsNil) + c.Assert(n, Equals, int64(3)) + + l, err = tx.HLen(key) + c.Assert(err, IsNil) + c.Assert(l, Equals, int64(2)) + + n, err = tx.HGetInt64(key, []byte("1")) + c.Assert(err, IsNil) + c.Assert(n, Equals, int64(3)) + + l, err = tx.HLen(key) + c.Assert(err, IsNil) + c.Assert(l, Equals, int64(2)) + + err = tx.HClear(key) + c.Assert(err, IsNil) + + l, err = tx.HLen(key) + c.Assert(err, IsNil) + c.Assert(l, Equals, int64(0)) + + err = tx.HDel(key, []byte("fake_key")) + c.Assert(err, IsNil) + + // Test set nil value. + value, err = tx.HGet(key, []byte("nil_key")) + c.Assert(err, IsNil) + c.Assert(value, IsNil) + + l, err = tx.HLen(key) + c.Assert(err, IsNil) + c.Assert(l, Equals, int64(0)) + + err = tx.HSet(key, []byte("nil_key"), nil) + c.Assert(err, IsNil) + + l, err = tx.HLen(key) + c.Assert(err, IsNil) + c.Assert(l, Equals, int64(0)) + + err = tx.HSet(key, []byte("nil_key"), []byte("1")) + c.Assert(err, IsNil) + + l, err = tx.HLen(key) + c.Assert(err, IsNil) + c.Assert(l, Equals, int64(1)) + + value, err = tx.HGet(key, []byte("nil_key")) + c.Assert(err, IsNil) + c.Assert(value, DeepEquals, []byte("1")) + + err = tx.HSet(key, []byte("nil_key"), nil) + c.Assert(err, NotNil) + + l, err = tx.HLen(key) + c.Assert(err, IsNil) + c.Assert(l, Equals, int64(1)) + + value, err = tx.HGet(key, []byte("nil_key")) + c.Assert(err, IsNil) + c.Assert(value, DeepEquals, []byte("1")) + + err = tx.HSet(key, []byte("nil_key"), []byte("2")) + c.Assert(err, IsNil) + + l, err = tx.HLen(key) + c.Assert(err, IsNil) + c.Assert(l, Equals, int64(1)) + + value, err = tx.HGet(key, []byte("nil_key")) + c.Assert(err, IsNil) + c.Assert(value, DeepEquals, []byte("2")) + + tx1 := structure.NewStructure(txn, nil, []byte{0x01}) + _, err = tx1.HInc(key, []byte("1"), 1) + c.Assert(err, NotNil) + + err = tx1.HDel(key, []byte("1")) + c.Assert(err, NotNil) + + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + + err = kv.RunInNewTxn(s.store, false, func(txn kv.Transaction) error { + t := structure.NewStructure(txn, txn, []byte{0x00}) + err = t.Set(key, []byte("abc")) + c.Assert(err, IsNil) + + value, err = t.Get(key) + c.Assert(err, IsNil) + c.Assert(value, DeepEquals, []byte("abc")) + return nil + }) + c.Assert(err, IsNil) +} + +func (*testTxStructureSuite) TestError(c *C) { + kvErrs := []*terror.Error{ + structure.ErrInvalidHashKeyFlag, + structure.ErrInvalidListIndex, + structure.ErrInvalidListMetaData, + structure.ErrWriteOnSnapshot, + } + for _, err := range kvErrs { + code := err.ToSQLError().Code + c.Assert(code != mysql.ErrUnknown && code == uint16(err.Code()), IsTrue, Commentf("err: %v", err)) + } +} diff --git a/structure/type.go b/structure/type.go new file mode 100644 index 0000000..3411aad --- /dev/null +++ b/structure/type.go @@ -0,0 +1,121 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package structure + +import ( + "bytes" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/util/codec" +) + +// TypeFlag is for data structure meta/data flag. +type TypeFlag byte + +const ( + // StringMeta is the flag for string meta. + StringMeta TypeFlag = 'S' + // StringData is the flag for string data. + StringData TypeFlag = 's' + // HashMeta is the flag for hash meta. + HashMeta TypeFlag = 'H' + // HashData is the flag for hash data. + HashData TypeFlag = 'h' + // ListMeta is the flag for list meta. + ListMeta TypeFlag = 'L' + // ListData is the flag for list data. + ListData TypeFlag = 'l' +) + +func (t *TxStructure) encodeStringDataKey(key []byte) kv.Key { + // for codec Encode, we may add extra bytes data, so here and following encode + // we will use extra length like 4 for a little optimization. + ek := make([]byte, 0, len(t.prefix)+len(key)+24) + ek = append(ek, t.prefix...) + ek = codec.EncodeBytes(ek, key) + return codec.EncodeUint(ek, uint64(StringData)) +} + +func (t *TxStructure) encodeHashMetaKey(key []byte) kv.Key { + ek := make([]byte, 0, len(t.prefix)+len(key)+24) + ek = append(ek, t.prefix...) + ek = codec.EncodeBytes(ek, key) + return codec.EncodeUint(ek, uint64(HashMeta)) +} + +func (t *TxStructure) encodeHashDataKey(key []byte, field []byte) kv.Key { + ek := make([]byte, 0, len(t.prefix)+len(key)+len(field)+30) + ek = append(ek, t.prefix...) + ek = codec.EncodeBytes(ek, key) + ek = codec.EncodeUint(ek, uint64(HashData)) + return codec.EncodeBytes(ek, field) +} + +// EncodeHashDataKey exports for tests. +func (t *TxStructure) EncodeHashDataKey(key []byte, field []byte) kv.Key { + return t.encodeHashDataKey(key, field) +} + +func (t *TxStructure) decodeHashDataKey(ek kv.Key) ([]byte, []byte, error) { + var ( + key []byte + field []byte + err error + tp uint64 + ) + + if !bytes.HasPrefix(ek, t.prefix) { + return nil, nil, errors.New("invalid encoded hash data key prefix") + } + + ek = ek[len(t.prefix):] + + ek, key, err = codec.DecodeBytes(ek, nil) + if err != nil { + return nil, nil, errors.Trace(err) + } + + ek, tp, err = codec.DecodeUint(ek) + if err != nil { + return nil, nil, errors.Trace(err) + } else if TypeFlag(tp) != HashData { + return nil, nil, ErrInvalidHashKeyFlag.GenWithStack("invalid encoded hash data key flag %c", byte(tp)) + } + + _, field, err = codec.DecodeBytes(ek, nil) + return key, field, errors.Trace(err) +} + +func (t *TxStructure) hashDataKeyPrefix(key []byte) kv.Key { + ek := make([]byte, 0, len(t.prefix)+len(key)+24) + ek = append(ek, t.prefix...) + ek = codec.EncodeBytes(ek, key) + return codec.EncodeUint(ek, uint64(HashData)) +} + +func (t *TxStructure) encodeListMetaKey(key []byte) kv.Key { + ek := make([]byte, 0, len(t.prefix)+len(key)+24) + ek = append(ek, t.prefix...) + ek = codec.EncodeBytes(ek, key) + return codec.EncodeUint(ek, uint64(ListMeta)) +} + +func (t *TxStructure) encodeListDataKey(key []byte, index int64) kv.Key { + ek := make([]byte, 0, len(t.prefix)+len(key)+36) + ek = append(ek, t.prefix...) + ek = codec.EncodeBytes(ek, key) + ek = codec.EncodeUint(ek, uint64(ListData)) + return codec.EncodeInt(ek, index) +} diff --git a/table/column.go b/table/column.go new file mode 100644 index 0000000..e571bdb --- /dev/null +++ b/table/column.go @@ -0,0 +1,394 @@ +// Copyright 2016 The ql Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSES/QL-LICENSE file. + +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package table + +import ( + "strconv" + "strings" + "unicode/utf8" + + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/charset" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + field_types "github.com/pingcap/tidb/parser/types" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/hack" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +// Column provides meta data describing a table column. +type Column struct { + *model.ColumnInfo + // If this column is a generated column, the expression will be stored here. + GeneratedExpr ast.ExprNode +} + +// String implements fmt.Stringer interface. +func (c *Column) String() string { + ans := []string{c.Name.O, types.TypeToStr(c.Tp, c.Charset)} + if mysql.HasAutoIncrementFlag(c.Flag) { + ans = append(ans, "AUTO_INCREMENT") + } + if mysql.HasNotNullFlag(c.Flag) { + ans = append(ans, "NOT NULL") + } + return strings.Join(ans, " ") +} + +// ToInfo casts Column to model.ColumnInfo +// NOTE: DONT modify return value. +func (c *Column) ToInfo() *model.ColumnInfo { + return c.ColumnInfo +} + +// FindCol finds column in cols by name. +func FindCol(cols []*Column, name string) *Column { + for _, col := range cols { + if strings.EqualFold(col.Name.O, name) { + return col + } + } + return nil +} + +// ToColumn converts a *model.ColumnInfo to *Column. +func ToColumn(col *model.ColumnInfo) *Column { + return &Column{ + col, + nil, + } +} + +// FindCols finds columns in cols by names. +// If pkIsHandle is false and name is ExtraHandleName, the extra handle column will be added. +func FindCols(cols []*Column, names []string, pkIsHandle bool) ([]*Column, error) { + var rcols []*Column + for _, name := range names { + col := FindCol(cols, name) + if col != nil { + rcols = append(rcols, col) + } else if name == model.ExtraHandleName.L && !pkIsHandle { + col := &Column{} + col.ColumnInfo = model.NewExtraHandleColInfo() + col.ColumnInfo.Offset = len(cols) + rcols = append(rcols, col) + } else { + return nil, errUnknownColumn.GenWithStack("unknown column %s", name) + } + } + + return rcols, nil +} + +// FindOnUpdateCols finds columns which have OnUpdateNow flag. +func FindOnUpdateCols(cols []*Column) []*Column { + var rcols []*Column + for _, col := range cols { + if mysql.HasOnUpdateNowFlag(col.Flag) { + rcols = append(rcols, col) + } + } + + return rcols +} + +// truncateTrailingSpaces trancates trailing spaces for CHAR[(M)] column. +// fix: https://github.com/pingcap/tidb/issues/3660 +func truncateTrailingSpaces(v *types.Datum) { + if v.Kind() == types.KindNull { + return + } + b := v.GetBytes() + length := len(b) + for length > 0 && b[length-1] == ' ' { + length-- + } + b = b[:length] + str := string(hack.String(b)) + v.SetString(str) +} + +func handleWrongUtf8Value(ctx sessionctx.Context, col *model.ColumnInfo, casted *types.Datum, str string, i int) (types.Datum, error) { + sc := ctx.GetSessionVars().StmtCtx + err := ErrTruncatedWrongValueForField.FastGen("incorrect utf8 value %x(%s) for column %s", casted.GetBytes(), str, col.Name) + logutil.BgLogger().Error("incorrect UTF-8 value", zap.Uint64("conn", ctx.GetSessionVars().ConnectionID), zap.Error(err)) + // Truncate to valid utf8 string. + truncateVal := types.NewStringDatum(str[:i]) + err = sc.HandleTruncate(err) + return truncateVal, err +} + +// CastValue casts a value based on column type. +// TODO: change the third arg to TypeField. Not pass ColumnInfo. +func CastValue(ctx sessionctx.Context, val types.Datum, col *model.ColumnInfo) (casted types.Datum, err error) { + sc := ctx.GetSessionVars().StmtCtx + casted, err = val.ConvertTo(sc, &col.FieldType) + // TODO: make sure all truncate errors are handled by ConvertTo. + if types.ErrTruncated.Equal(err) { + str, err1 := val.ToString() + if err1 != nil { + logutil.BgLogger().Warn("Datum ToString failed", zap.Stringer("Datum", val), zap.Error(err1)) + } + err = sc.HandleTruncate(types.ErrTruncatedWrongVal.GenWithStackByArgs(col.FieldType.CompactStr(), str)) + } else { + err = sc.HandleTruncate(err) + } + if err != nil { + return casted, err + } + + if col.Tp == mysql.TypeString && !types.IsBinaryStr(&col.FieldType) { + truncateTrailingSpaces(&casted) + } + + if ctx.GetSessionVars().SkipUTF8Check { + return casted, nil + } + if !mysql.IsUTF8Charset(col.Charset) { + return casted, nil + } + str := casted.GetString() + utf8Charset := col.Charset == mysql.UTF8Charset + for i, w := 0, 0; i < len(str); i += w { + runeValue, width := utf8.DecodeRuneInString(str[i:]) + if runeValue == utf8.RuneError { + if strings.HasPrefix(str[i:], string(utf8.RuneError)) { + w = width + continue + } + casted, err = handleWrongUtf8Value(ctx, col, &casted, str, i) + break + } else if width > 3 && utf8Charset { + // Handle non-BMP characters. + casted, err = handleWrongUtf8Value(ctx, col, &casted, str, i) + break + } + w = width + } + + return casted, err +} + +// ColDesc describes column information like MySQL desc and show columns do. +type ColDesc struct { + Field string + Type string + // Charset is nil if the column doesn't have a charset, or a string indicating the charset name. + Charset interface{} + // Collation is nil if the column doesn't have a collation, or a string indicating the collation name. + Collation interface{} + Null string + Key string + DefaultValue interface{} + Extra string + Privileges string + Comment string +} + +const defaultPrivileges = "select,insert,update,references" + +// NewColDesc returns a new ColDesc for a column. +func NewColDesc(col *Column) *ColDesc { + // TODO: if we have no primary key and a unique index which's columns are all not null + // we will set these columns' flag as PriKeyFlag + // see https://dev.mysql.com/doc/refman/5.7/en/show-columns.html + // create table + name := col.Name + nullFlag := "YES" + if mysql.HasNotNullFlag(col.Flag) { + nullFlag = "NO" + } + keyFlag := "" + if mysql.HasPriKeyFlag(col.Flag) { + keyFlag = "PRI" + } else if mysql.HasUniKeyFlag(col.Flag) { + keyFlag = "UNI" + } else if mysql.HasMultipleKeyFlag(col.Flag) { + keyFlag = "MUL" + } + + extra := "" + if mysql.HasAutoIncrementFlag(col.Flag) { + extra = "auto_increment" + } else if mysql.HasOnUpdateNowFlag(col.Flag) { + //in order to match the rules of mysql 8.0.16 version + //see https://github.com/pingcap/tidb/issues/10337 + extra = "DEFAULT_GENERATED on update CURRENT_TIMESTAMP" + OptionalFsp(&col.FieldType) + } + + desc := &ColDesc{ + Field: name.O, + Type: col.GetTypeDesc(), + Charset: col.Charset, + Collation: col.Collate, + Null: nullFlag, + Key: keyFlag, + Extra: extra, + Privileges: defaultPrivileges, + Comment: col.Comment, + } + if !field_types.HasCharset(&col.ColumnInfo.FieldType) { + desc.Charset = nil + desc.Collation = nil + } + return desc +} + +// ColDescFieldNames returns the fields name in result set for desc and show columns. +func ColDescFieldNames(full bool) []string { + if full { + return []string{"Field", "Type", "Collation", "Null", "Key", "Default", "Extra", "Privileges", "Comment"} + } + return []string{"Field", "Type", "Null", "Key", "Default", "Extra"} +} + +// CheckOnce checks if there are duplicated column names in cols. +func CheckOnce(cols []*Column) error { + m := map[string]struct{}{} + for _, col := range cols { + name := col.Name + _, ok := m[name.L] + if ok { + return errDuplicateColumn.GenWithStackByArgs(name) + } + + m[name.L] = struct{}{} + } + + return nil +} + +// CheckNotNull checks if nil value set to a column with NotNull flag is set. +func (c *Column) CheckNotNull(data types.Datum) error { + if (mysql.HasNotNullFlag(c.Flag) || mysql.HasPreventNullInsertFlag(c.Flag)) && data.IsNull() { + return ErrColumnCantNull.GenWithStackByArgs(c.Name) + } + return nil +} + +// HandleBadNull handles the bad null error. +// If BadNullAsWarning is true, it will append the error as a warning, else return the error. +func (c *Column) HandleBadNull(d types.Datum, sc *stmtctx.StatementContext) (types.Datum, error) { + if err := c.CheckNotNull(d); err != nil { + if sc.BadNullAsWarning { + sc.AppendWarning(err) + return GetZeroValue(c.ToInfo()), nil + } + return types.Datum{}, err + } + return d, nil +} + +// IsPKHandleColumn checks if the column is primary key handle column. +func (c *Column) IsPKHandleColumn(tbInfo *model.TableInfo) bool { + return mysql.HasPriKeyFlag(c.Flag) && tbInfo.PKIsHandle +} + +// CheckNotNull checks if row has nil value set to a column with NotNull flag set. +func CheckNotNull(cols []*Column, row []types.Datum) error { + for _, c := range cols { + if err := c.CheckNotNull(row[c.Offset]); err != nil { + return err + } + } + return nil +} + +// GetColOriginDefaultValue gets default value of the column from original default value. +func GetColOriginDefaultValue(ctx sessionctx.Context, col *model.ColumnInfo) (types.Datum, error) { + return getColDefaultValue(ctx, col, col.OriginDefaultValue) +} + +// GetColDefaultValue gets default value of the column. +func GetColDefaultValue(ctx sessionctx.Context, col *model.ColumnInfo) (types.Datum, error) { + return getColDefaultValue(ctx, col, col.GetDefaultValue()) +} + +func getColDefaultValue(ctx sessionctx.Context, col *model.ColumnInfo, defaultVal interface{}) (types.Datum, error) { + if defaultVal == nil { + return getColDefaultValueFromNil(ctx, col) + } + + value, err := CastValue(ctx, types.NewDatum(defaultVal), col) + if err != nil { + return types.Datum{}, err + } + return value, nil +} + +func getColDefaultValueFromNil(ctx sessionctx.Context, col *model.ColumnInfo) (types.Datum, error) { + if !mysql.HasNotNullFlag(col.Flag) { + return types.Datum{}, nil + } + if mysql.HasAutoIncrementFlag(col.Flag) { + // Auto increment column doesn't has default value and we should not return error. + return GetZeroValue(col), nil + } + vars := ctx.GetSessionVars() + sc := vars.StmtCtx + if sc.BadNullAsWarning { + sc.AppendWarning(ErrColumnCantNull.GenWithStackByArgs(col.Name)) + return GetZeroValue(col), nil + } + if !vars.StrictSQLMode { + sc.AppendWarning(ErrNoDefaultValue.GenWithStackByArgs(col.Name)) + return GetZeroValue(col), nil + } + return types.Datum{}, ErrNoDefaultValue.GenWithStackByArgs(col.Name) +} + +// GetZeroValue gets zero value for given column type. +func GetZeroValue(col *model.ColumnInfo) types.Datum { + var d types.Datum + switch col.Tp { + case mysql.TypeTiny, mysql.TypeInt24, mysql.TypeShort, mysql.TypeLong, mysql.TypeLonglong, mysql.TypeYear: + if mysql.HasUnsignedFlag(col.Flag) { + d.SetUint64(0) + } else { + d.SetInt64(0) + } + case mysql.TypeFloat: + d.SetFloat32(0) + case mysql.TypeDouble: + d.SetFloat64(0) + case mysql.TypeString: + if col.Flen > 0 && col.Charset == charset.CharsetBin { + d.SetBytes(make([]byte, col.Flen)) + } else { + d.SetString("") + } + case mysql.TypeVarString, mysql.TypeVarchar: + d.SetString("") + case mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob: + d.SetBytes([]byte{}) + } + return d +} + +// OptionalFsp convert a FieldType.Decimal to string. +func OptionalFsp(fieldType *types.FieldType) string { + fsp := fieldType.Decimal + if fsp == 0 { + return "" + } + return "(" + strconv.Itoa(fsp) + ")" +} diff --git a/table/column_test.go b/table/column_test.go new file mode 100644 index 0000000..650c84b --- /dev/null +++ b/table/column_test.go @@ -0,0 +1,272 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package table + +import ( + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/charset" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/mock" + "github.com/pingcap/tidb/util/testleak" +) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + TestingT(t) +} + +func (t *testTableSuite) TestString(c *C) { + defer testleak.AfterTest(c)() + col := ToColumn(&model.ColumnInfo{ + FieldType: *types.NewFieldType(mysql.TypeTiny), + State: model.StatePublic, + }) + col.Flen = 2 + col.Decimal = 1 + col.Charset = mysql.DefaultCharset + col.Collate = mysql.DefaultCollationName + col.Flag |= mysql.ZerofillFlag | mysql.UnsignedFlag | mysql.BinaryFlag | mysql.AutoIncrementFlag | mysql.NotNullFlag + + c.Assert(col.GetTypeDesc(), Equals, "tinyint(2) unsigned zerofill") + col.ToInfo() + tbInfo := &model.TableInfo{} + c.Assert(col.IsPKHandleColumn(tbInfo), Equals, false) + tbInfo.PKIsHandle = true + col.Flag |= mysql.PriKeyFlag + c.Assert(col.IsPKHandleColumn(tbInfo), Equals, true) + + cs := col.String() + c.Assert(len(cs), Greater, 0) + + col.Tp = mysql.TypeEnum + col.Flag = 0 + col.Elems = []string{"a", "b"} + + c.Assert(col.GetTypeDesc(), Equals, "enum('a','b')") + + col.Elems = []string{"'a'", "b"} + c.Assert(col.GetTypeDesc(), Equals, "enum('''a''','b')") + + col.Tp = mysql.TypeFloat + col.Flen = 8 + col.Decimal = -1 + c.Assert(col.GetTypeDesc(), Equals, "float") + + col.Decimal = 1 + c.Assert(col.GetTypeDesc(), Equals, "float(8,1)") + + col.Tp = mysql.TypeDatetime + col.Decimal = 6 + c.Assert(col.GetTypeDesc(), Equals, "datetime(6)") + + col.Decimal = 0 + c.Assert(col.GetTypeDesc(), Equals, "datetime") + + col.Decimal = -1 + c.Assert(col.GetTypeDesc(), Equals, "datetime") +} + +func (t *testTableSuite) TestFind(c *C) { + defer testleak.AfterTest(c)() + cols := []*Column{ + newCol("a"), + newCol("b"), + newCol("c"), + } + FindCols(cols, []string{"a"}, true) + FindCols(cols, []string{"d"}, true) + cols[0].Flag |= mysql.OnUpdateNowFlag + FindOnUpdateCols(cols) +} + +func (t *testTableSuite) TestCheck(c *C) { + defer testleak.AfterTest(c)() + col := newCol("a") + col.Flag = mysql.AutoIncrementFlag + cols := []*Column{col, col} + CheckOnce(cols) + cols = cols[:1] + CheckNotNull(cols, types.MakeDatums(nil)) + cols[0].Flag |= mysql.NotNullFlag + CheckNotNull(cols, types.MakeDatums(nil)) + CheckOnce([]*Column{}) +} + +func (t *testTableSuite) TestHandleBadNull(c *C) { + col := newCol("a") + sc := new(stmtctx.StatementContext) + d, err := col.HandleBadNull(types.Datum{}, sc) + c.Assert(err, IsNil) + cmp, err := d.CompareDatum(sc, &types.Datum{}) + c.Assert(err, IsNil) + c.Assert(cmp, Equals, 0) + + col.Flag |= mysql.NotNullFlag + d, err = col.HandleBadNull(types.Datum{}, sc) + c.Assert(err, NotNil) + + sc.BadNullAsWarning = true + d, err = col.HandleBadNull(types.Datum{}, sc) + c.Assert(err, IsNil) +} + +func (t *testTableSuite) TestGetZeroValue(c *C) { + tests := []struct { + ft *types.FieldType + value types.Datum + }{ + { + types.NewFieldType(mysql.TypeLong), + types.NewIntDatum(0), + }, + { + &types.FieldType{ + Tp: mysql.TypeLonglong, + Flag: mysql.UnsignedFlag, + }, + types.NewUintDatum(0), + }, + { + types.NewFieldType(mysql.TypeFloat), + types.NewFloat32Datum(0), + }, + { + types.NewFieldType(mysql.TypeDouble), + types.NewFloat64Datum(0), + }, + { + types.NewFieldType(mysql.TypeVarchar), + types.NewStringDatum(""), + }, + { + types.NewFieldType(mysql.TypeBlob), + types.NewBytesDatum([]byte{}), + }, + { + &types.FieldType{ + Tp: mysql.TypeString, + Flen: 2, + Charset: charset.CharsetBin, + Collate: charset.CollationBin, + }, + types.NewDatum(make([]byte, 2)), + }, + { + &types.FieldType{ + Tp: mysql.TypeString, + Flen: 2, + Charset: charset.CharsetUTF8MB4, + Collate: charset.CollationBin, + }, + types.NewDatum(""), + }, + } + sc := new(stmtctx.StatementContext) + for _, tt := range tests { + colInfo := &model.ColumnInfo{FieldType: *tt.ft} + zv := GetZeroValue(colInfo) + c.Assert(zv.Kind(), Equals, tt.value.Kind()) + cmp, err := zv.CompareDatum(sc, &tt.value) + c.Assert(err, IsNil) + c.Assert(cmp, Equals, 0) + } +} + +func (t *testTableSuite) TestGetDefaultValue(c *C) { + ctx := mock.NewContext() + tests := []struct { + colInfo *model.ColumnInfo + strict bool + val types.Datum + err error + }{ + { + &model.ColumnInfo{ + FieldType: types.FieldType{ + Tp: mysql.TypeLonglong, + Flag: mysql.NotNullFlag, + }, + OriginDefaultValue: 1.0, + DefaultValue: 1.0, + }, + false, + types.NewIntDatum(1), + nil, + }, + { + &model.ColumnInfo{ + FieldType: types.FieldType{ + Tp: mysql.TypeLonglong, + Flag: mysql.NotNullFlag, + }, + }, + false, + types.NewIntDatum(0), + nil, + }, + { + &model.ColumnInfo{ + FieldType: types.FieldType{ + Tp: mysql.TypeLonglong, + }, + }, + false, + types.Datum{}, + nil, + }, + { + &model.ColumnInfo{ + FieldType: types.FieldType{ + Tp: mysql.TypeLonglong, + Flag: mysql.NotNullFlag | mysql.AutoIncrementFlag, + }, + }, + true, + types.NewIntDatum(0), + nil, + }, + } + + for _, tt := range tests { + ctx.GetSessionVars().StmtCtx.BadNullAsWarning = !tt.strict + val, err := GetColDefaultValue(ctx, tt.colInfo) + if err != nil { + c.Assert(tt.err, NotNil, Commentf("%v", err)) + continue + } + c.Assert(val, DeepEquals, tt.val) + } + + for _, tt := range tests { + ctx.GetSessionVars().StmtCtx.BadNullAsWarning = !tt.strict + val, err := GetColOriginDefaultValue(ctx, tt.colInfo) + if err != nil { + c.Assert(tt.err, NotNil, Commentf("%v", err)) + continue + } + c.Assert(val, DeepEquals, tt.val) + } +} + +func newCol(name string) *Column { + return ToColumn(&model.ColumnInfo{ + Name: model.NewCIStr(name), + State: model.StatePublic, + }) +} diff --git a/table/index.go b/table/index.go new file mode 100644 index 0000000..ede6e03 --- /dev/null +++ b/table/index.go @@ -0,0 +1,90 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package table + +import ( + "context" + + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" +) + +// IndexIterator is the interface for iterator of index data on KV store. +type IndexIterator interface { + Next() (k []types.Datum, h int64, err error) + Close() +} + +// CreateIdxOpt contains the options will be used when creating an index. +type CreateIdxOpt struct { + SkipHandleCheck bool // If true, skip the handle constraint check. + SkipCheck bool // If true, skip all the unique indices constraint check. + Ctx context.Context + Untouched bool // If true, the index key/value is no need to commit. +} + +// CreateIdxOptFunc is defined for the Create() method of Index interface. +// Here is a blog post about how to use this pattern: +// https://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis +type CreateIdxOptFunc func(*CreateIdxOpt) + +// SkipHandleCheck is a defined value of CreateIdxFunc. +var SkipHandleCheck CreateIdxOptFunc = func(opt *CreateIdxOpt) { + opt.SkipHandleCheck = true +} + +// SkipCheck is a defined value of CreateIdxFunc. +var SkipCheck CreateIdxOptFunc = func(opt *CreateIdxOpt) { + opt.SkipCheck = true +} + +// IndexIsUntouched uses to indicate the index kv is untouched. +var IndexIsUntouched CreateIdxOptFunc = func(opt *CreateIdxOpt) { + opt.Untouched = true +} + +// WithCtx returns a CreateIdxFunc. +// This option is used to pass context.Context. +func WithCtx(ctx context.Context) CreateIdxOptFunc { + return func(opt *CreateIdxOpt) { + opt.Ctx = ctx + } +} + +// Index is the interface for index data on KV store. +type Index interface { + // Meta returns IndexInfo. + Meta() *model.IndexInfo + // Create supports insert into statement. + Create(ctx sessionctx.Context, rm kv.RetrieverMutator, indexedValues []types.Datum, h int64, opts ...CreateIdxOptFunc) (int64, error) + // Delete supports delete from statement. + Delete(sc *stmtctx.StatementContext, m kv.Mutator, indexedValues []types.Datum, h int64) error + // Drop supports drop table, drop index statements. + Drop(rm kv.RetrieverMutator) error + // Exist supports check index exists or not. + Exist(sc *stmtctx.StatementContext, rm kv.RetrieverMutator, indexedValues []types.Datum, h int64) (bool, int64, error) + // GenIndexKey generates an index key. + GenIndexKey(sc *stmtctx.StatementContext, indexedValues []types.Datum, h int64, buf []byte) (key []byte, distinct bool, err error) + // Seek supports where clause. + Seek(sc *stmtctx.StatementContext, r kv.Retriever, indexedValues []types.Datum) (iter IndexIterator, hit bool, err error) + // SeekFirst supports aggregate min and ascend order by. + SeekFirst(r kv.Retriever) (iter IndexIterator, err error) + // FetchValues fetched index column values in a row. + // Param columns is a reused buffer, if it is not nil, FetchValues will fill the index values in it, + // and return the buffer, if it is nil, FetchValues will allocate the buffer instead. + FetchValues(row []types.Datum, columns []types.Datum) ([]types.Datum, error) +} diff --git a/table/table.go b/table/table.go new file mode 100644 index 0000000..fe7aec5 --- /dev/null +++ b/table/table.go @@ -0,0 +1,258 @@ +// Copyright 2013 The ql Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSES/QL-LICENSE file. + +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package table + +import ( + "context" + + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta/autoid" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" +) + +// Type , the type of table, store data in different ways. +type Type int16 + +const ( + // NormalTable , store data in tikv, mocktikv and so on. + NormalTable Type = iota + // VirtualTable , store no data, just extract data from the memory struct. + VirtualTable +) + +// IsNormalTable checks whether the table is a normal table type. +func (tp Type) IsNormalTable() bool { + return tp == NormalTable +} + +// IsVirtualTable checks whether the table is a virtual table type. +func (tp Type) IsVirtualTable() bool { + return tp == VirtualTable +} + +const ( + // DirtyTableAddRow is the constant for dirty table operation type. + DirtyTableAddRow = iota + // DirtyTableDeleteRow is the constant for dirty table operation type. + DirtyTableDeleteRow +) + +var ( + // ErrColumnCantNull is used for inserting null to a not null column. + ErrColumnCantNull = terror.ClassTable.New(mysql.ErrBadNull, mysql.MySQLErrName[mysql.ErrBadNull]) + errUnknownColumn = terror.ClassTable.New(mysql.ErrBadField, mysql.MySQLErrName[mysql.ErrBadField]) + errDuplicateColumn = terror.ClassTable.New(mysql.ErrFieldSpecifiedTwice, mysql.MySQLErrName[mysql.ErrFieldSpecifiedTwice]) + + errGetDefaultFailed = terror.ClassTable.New(mysql.ErrFieldGetDefaultFailed, mysql.MySQLErrName[mysql.ErrFieldGetDefaultFailed]) + + // ErrNoDefaultValue is used when insert a row, the column value is not given, and the column has not null flag + // and it doesn't have a default value. + ErrNoDefaultValue = terror.ClassTable.New(mysql.ErrNoDefaultForField, mysql.MySQLErrName[mysql.ErrNoDefaultForField]) + // ErrIndexOutBound returns for index column offset out of bound. + ErrIndexOutBound = terror.ClassTable.New(mysql.ErrIndexOutBound, mysql.MySQLErrName[mysql.ErrIndexOutBound]) + // ErrUnsupportedOp returns for unsupported operation. + ErrUnsupportedOp = terror.ClassTable.New(mysql.ErrUnsupportedOp, mysql.MySQLErrName[mysql.ErrUnsupportedOp]) + // ErrRowNotFound returns for row not found. + ErrRowNotFound = terror.ClassTable.New(mysql.ErrRowNotFound, mysql.MySQLErrName[mysql.ErrRowNotFound]) + // ErrTableStateCantNone returns for table none state. + ErrTableStateCantNone = terror.ClassTable.New(mysql.ErrTableStateCantNone, mysql.MySQLErrName[mysql.ErrTableStateCantNone]) + // ErrColumnStateCantNone returns for column none state. + ErrColumnStateCantNone = terror.ClassTable.New(mysql.ErrColumnStateCantNone, mysql.MySQLErrName[mysql.ErrColumnStateCantNone]) + // ErrColumnStateNonPublic returns for column non-public state. + ErrColumnStateNonPublic = terror.ClassTable.New(mysql.ErrColumnStateNonPublic, mysql.MySQLErrName[mysql.ErrColumnStateNonPublic]) + // ErrIndexStateCantNone returns for index none state. + ErrIndexStateCantNone = terror.ClassTable.New(mysql.ErrIndexStateCantNone, mysql.MySQLErrName[mysql.ErrIndexStateCantNone]) + // ErrInvalidRecordKey returns for invalid record key. + ErrInvalidRecordKey = terror.ClassTable.New(mysql.ErrInvalidRecordKey, mysql.MySQLErrName[mysql.ErrInvalidRecordKey]) + // ErrTruncatedWrongValueForField returns for truncate wrong value for field. + ErrTruncatedWrongValueForField = terror.ClassTable.New(mysql.ErrTruncatedWrongValueForField, mysql.MySQLErrName[mysql.ErrTruncatedWrongValueForField]) + // ErrUnknownPartition returns unknown partition error. + ErrUnknownPartition = terror.ClassTable.New(mysql.ErrUnknownPartition, mysql.MySQLErrName[mysql.ErrUnknownPartition]) + // ErrNoPartitionForGivenValue returns table has no partition for value. + ErrNoPartitionForGivenValue = terror.ClassTable.New(mysql.ErrNoPartitionForGivenValue, mysql.MySQLErrName[mysql.ErrNoPartitionForGivenValue]) + // ErrLockOrActiveTransaction returns when execute unsupported statement in a lock session or an active transaction. + ErrLockOrActiveTransaction = terror.ClassTable.New(mysql.ErrLockOrActiveTransaction, mysql.MySQLErrName[mysql.ErrLockOrActiveTransaction]) +) + +// RecordIterFunc is used for low-level record iteration. +type RecordIterFunc func(h int64, rec []types.Datum, cols []*Column) (more bool, err error) + +// AddRecordOpt contains the options will be used when adding a record. +type AddRecordOpt struct { + CreateIdxOpt + IsUpdate bool +} + +// AddRecordOption is defined for the AddRecord() method of the Table interface. +type AddRecordOption interface { + ApplyOn(*AddRecordOpt) +} + +// ApplyOn implements the AddRecordOption interface, so any CreateIdxOptFunc +// can be passed as the optional argument to the table.AddRecord method. +func (f CreateIdxOptFunc) ApplyOn(opt *AddRecordOpt) { + f(&opt.CreateIdxOpt) +} + +// IsUpdate is a defined value for AddRecordOptFunc. +var IsUpdate AddRecordOption = isUpdate{} + +type isUpdate struct{} + +func (i isUpdate) ApplyOn(opt *AddRecordOpt) { + opt.IsUpdate = true +} + +// Table is used to retrieve and modify rows in table. +type Table interface { + // IterRecords iterates records in the table and calls fn. + IterRecords(ctx sessionctx.Context, startKey kv.Key, cols []*Column, fn RecordIterFunc) error + + // RowWithCols returns a row that contains the given cols. + RowWithCols(ctx sessionctx.Context, h int64, cols []*Column) ([]types.Datum, error) + + // Row returns a row for all columns. + Row(ctx sessionctx.Context, h int64) ([]types.Datum, error) + + // Cols returns the columns of the table which is used in select. + Cols() []*Column + + // WritableCols returns columns of the table in writable states. + // Writable states includes Public, WriteOnly, WriteOnlyReorganization. + WritableCols() []*Column + + // Indices returns the indices of the table. + Indices() []Index + + // WritableIndices returns write-only and public indices of the table. + WritableIndices() []Index + + // DeletableIndices returns delete-only, write-only and public indices of the table. + DeletableIndices() []Index + + // RecordPrefix returns the record key prefix. + RecordPrefix() kv.Key + + // IndexPrefix returns the index key prefix. + IndexPrefix() kv.Key + + // FirstKey returns the first key. + FirstKey() kv.Key + + // RecordKey returns the key in KV storage for the row. + RecordKey(h int64) kv.Key + + // AddRecord inserts a row which should contain only public columns + AddRecord(ctx sessionctx.Context, r []types.Datum, opts ...AddRecordOption) (recordID int64, err error) + + // UpdateRecord updates a row which should contain only writable columns. + UpdateRecord(ctx sessionctx.Context, h int64, currData, newData []types.Datum, touched []bool) error + + // RemoveRecord removes a row in the table. + RemoveRecord(ctx sessionctx.Context, h int64, r []types.Datum) error + + // AllocHandle allocates a handle for a new row. + AllocHandle(ctx sessionctx.Context) (int64, error) + + // AllocHandleIDs allocates multiple handle for rows. + AllocHandleIDs(ctx sessionctx.Context, n uint64) (int64, int64, error) + + // Allocator returns Allocator. + Allocator(ctx sessionctx.Context) autoid.Allocator + + // RebaseAutoID rebases the auto_increment ID base. + // If allocIDs is true, it will allocate some IDs and save to the cache. + // If allocIDs is false, it will not allocate IDs. + RebaseAutoID(ctx sessionctx.Context, newBase int64, allocIDs bool) error + + // Meta returns TableInfo. + Meta() *model.TableInfo + + // Seek returns the handle greater or equal to h. + Seek(ctx sessionctx.Context, h int64) (handle int64, found bool, err error) + + // Type returns the type of table + Type() Type +} + +// AllocAutoIncrementValue allocates an auto_increment value for a new row. +func AllocAutoIncrementValue(ctx context.Context, t Table, sctx sessionctx.Context) (int64, error) { + _, max, err := t.Allocator(sctx).Alloc(t.Meta().ID, uint64(1)) + if err != nil { + return 0, err + } + return max, err +} + +// AllocBatchAutoIncrementValue allocates batch auto_increment value (min and max] for rows. +func AllocBatchAutoIncrementValue(ctx context.Context, t Table, sctx sessionctx.Context, N int) (int64, int64, error) { + return t.Allocator(sctx).Alloc(t.Meta().ID, uint64(N)) +} + +// PhysicalTable is an abstraction for two kinds of table representation: partition or non-partitioned table. +// PhysicalID is a ID that can be used to construct a key ranges, all the data in the key range belongs to the corresponding PhysicalTable. +// For a non-partitioned table, its PhysicalID equals to its TableID; For a partition of a partitioned table, its PhysicalID is the partition's ID. +type PhysicalTable interface { + Table + GetPhysicalID() int64 +} + +// TableFromMeta builds a table.Table from *model.TableInfo. +// Currently, it is assigned to tables.TableFromMeta in tidb package's init function. +var TableFromMeta func(alloc autoid.Allocator, tblInfo *model.TableInfo) (Table, error) + +// MockTableFromMeta only serves for test. +var MockTableFromMeta func(tableInfo *model.TableInfo) Table + +// Slice is used for table sorting. +type Slice []Table + +func (s Slice) Len() int { return len(s) } + +func (s Slice) Less(i, j int) bool { + return s[i].Meta().Name.O < s[j].Meta().Name.O +} + +func (s Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func init() { + tableMySQLErrCodes := map[terror.ErrCode]uint16{ + mysql.ErrBadNull: mysql.ErrBadNull, + mysql.ErrBadField: mysql.ErrBadField, + mysql.ErrFieldSpecifiedTwice: mysql.ErrFieldSpecifiedTwice, + mysql.ErrNoDefaultForField: mysql.ErrNoDefaultForField, + mysql.ErrTruncatedWrongValueForField: mysql.ErrTruncatedWrongValueForField, + mysql.ErrUnknownPartition: mysql.ErrUnknownPartition, + mysql.ErrNoPartitionForGivenValue: mysql.ErrNoPartitionForGivenValue, + mysql.ErrLockOrActiveTransaction: mysql.ErrLockOrActiveTransaction, + mysql.ErrIndexOutBound: mysql.ErrIndexOutBound, + mysql.ErrColumnStateNonPublic: mysql.ErrColumnStateNonPublic, + mysql.ErrFieldGetDefaultFailed: mysql.ErrFieldGetDefaultFailed, + mysql.ErrUnsupportedOp: mysql.ErrUnsupportedOp, + mysql.ErrRowNotFound: mysql.ErrRowNotFound, + mysql.ErrTableStateCantNone: mysql.ErrTableStateCantNone, + mysql.ErrColumnStateCantNone: mysql.ErrColumnStateCantNone, + mysql.ErrIndexStateCantNone: mysql.ErrIndexStateCantNone, + mysql.ErrInvalidRecordKey: mysql.ErrInvalidRecordKey, + } + terror.ErrClassToMySQLCodes[terror.ClassTable] = tableMySQLErrCodes +} diff --git a/table/table_test.go b/table/table_test.go new file mode 100644 index 0000000..3ae33a9 --- /dev/null +++ b/table/table_test.go @@ -0,0 +1,48 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package table + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/mysql" +) + +var _ = Suite(&testTableSuite{}) + +type testTableSuite struct{} + +func (t *testTableSuite) TestSlice(c *C) { + sl := make(Slice, 2) + length := sl.Len() + c.Assert(length, Equals, 2) + sl.Swap(0, 1) +} + +func (t *testTableSuite) TestErrorCode(c *C) { + c.Assert(int(ErrColumnCantNull.ToSQLError().Code), Equals, mysql.ErrBadNull) + c.Assert(int(errUnknownColumn.ToSQLError().Code), Equals, mysql.ErrBadField) + c.Assert(int(errDuplicateColumn.ToSQLError().Code), Equals, mysql.ErrFieldSpecifiedTwice) + c.Assert(int(errGetDefaultFailed.ToSQLError().Code), Equals, mysql.ErrFieldGetDefaultFailed) + c.Assert(int(ErrNoDefaultValue.ToSQLError().Code), Equals, mysql.ErrNoDefaultForField) + c.Assert(int(ErrIndexOutBound.ToSQLError().Code), Equals, mysql.ErrIndexOutBound) + c.Assert(int(ErrUnsupportedOp.ToSQLError().Code), Equals, mysql.ErrUnsupportedOp) + c.Assert(int(ErrRowNotFound.ToSQLError().Code), Equals, mysql.ErrRowNotFound) + c.Assert(int(ErrTableStateCantNone.ToSQLError().Code), Equals, mysql.ErrTableStateCantNone) + c.Assert(int(ErrColumnStateCantNone.ToSQLError().Code), Equals, mysql.ErrColumnStateCantNone) + c.Assert(int(ErrColumnStateNonPublic.ToSQLError().Code), Equals, mysql.ErrColumnStateNonPublic) + c.Assert(int(ErrIndexStateCantNone.ToSQLError().Code), Equals, mysql.ErrIndexStateCantNone) + c.Assert(int(ErrInvalidRecordKey.ToSQLError().Code), Equals, mysql.ErrInvalidRecordKey) + c.Assert(int(ErrTruncatedWrongValueForField.ToSQLError().Code), Equals, mysql.ErrTruncatedWrongValueForField) + c.Assert(int(ErrLockOrActiveTransaction.ToSQLError().Code), Equals, mysql.ErrLockOrActiveTransaction) +} diff --git a/table/tables/index.go b/table/tables/index.go new file mode 100644 index 0000000..e03ff55 --- /dev/null +++ b/table/tables/index.go @@ -0,0 +1,372 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tables + +import ( + "bytes" + "context" + "encoding/binary" + "io" + "unicode/utf8" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/charset" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/codec" +) + +// EncodeHandle encodes handle in data. +func EncodeHandle(h int64) []byte { + var data [8]byte + binary.BigEndian.PutUint64(data[:], uint64(h)) + return data[:] +} + +// DecodeHandle decodes handle in data. +func DecodeHandle(data []byte) (int64, error) { + return int64(binary.BigEndian.Uint64(data)), nil +} + +// indexIter is for KV store index iterator. +type indexIter struct { + it kv.Iterator + idx *index + prefix kv.Key +} + +// Close does the clean up works when KV store index iterator is closed. +func (c *indexIter) Close() { + if c.it != nil { + c.it.Close() + c.it = nil + } +} + +// Next returns current key and moves iterator to the next step. +func (c *indexIter) Next() (val []types.Datum, h int64, err error) { + if !c.it.Valid() { + return nil, 0, errors.Trace(io.EOF) + } + if !c.it.Key().HasPrefix(c.prefix) { + return nil, 0, errors.Trace(io.EOF) + } + // get indexedValues + buf := c.it.Key()[len(c.prefix):] + vv, err := codec.Decode(buf, len(c.idx.idxInfo.Columns)) + if err != nil { + return nil, 0, err + } + if len(vv) > len(c.idx.idxInfo.Columns) { + h = vv[len(vv)-1].GetInt64() + val = vv[0 : len(vv)-1] + } else { + // If the index is unique and the value isn't nil, the handle is in value. + h, err = DecodeHandle(c.it.Value()) + if err != nil { + return nil, 0, err + } + val = vv + } + // update new iter to next + err = c.it.Next() + if err != nil { + return nil, 0, err + } + return +} + +// index is the data structure for index data in the KV store. +type index struct { + idxInfo *model.IndexInfo + tblInfo *model.TableInfo + prefix kv.Key +} + +// NewIndex builds a new Index object. +func NewIndex(physicalID int64, tblInfo *model.TableInfo, indexInfo *model.IndexInfo) table.Index { + index := &index{ + idxInfo: indexInfo, + tblInfo: tblInfo, + // The prefix can't encode from tblInfo.ID, because table partition may change the id to partition id. + prefix: tablecodec.EncodeTableIndexPrefix(physicalID, indexInfo.ID), + } + return index +} + +// Meta returns index info. +func (c *index) Meta() *model.IndexInfo { + return c.idxInfo +} + +func (c *index) getIndexKeyBuf(buf []byte, defaultCap int) []byte { + if buf != nil { + return buf[:0] + } + + return make([]byte, 0, defaultCap) +} + +// TruncateIndexValuesIfNeeded truncates the index values created using only the leading part of column values. +func TruncateIndexValuesIfNeeded(tblInfo *model.TableInfo, idxInfo *model.IndexInfo, indexedValues []types.Datum) []types.Datum { + for i := 0; i < len(indexedValues); i++ { + v := &indexedValues[i] + if v.Kind() == types.KindString || v.Kind() == types.KindBytes { + ic := idxInfo.Columns[i] + colCharset := tblInfo.Columns[ic.Offset].Charset + colValue := v.GetBytes() + isUTF8Charset := colCharset == charset.CharsetUTF8 || colCharset == charset.CharsetUTF8MB4 + origKind := v.Kind() + if isUTF8Charset { + if ic.Length != types.UnspecifiedLength && utf8.RuneCount(colValue) > ic.Length { + rs := bytes.Runes(colValue) + truncateStr := string(rs[:ic.Length]) + // truncate value and limit its length + v.SetString(truncateStr) + if origKind == types.KindBytes { + v.SetBytes(v.GetBytes()) + } + } + } else if ic.Length != types.UnspecifiedLength && len(colValue) > ic.Length { + // truncate value and limit its length + v.SetBytes(colValue[:ic.Length]) + if origKind == types.KindString { + v.SetString(v.GetString()) + } + } + } + } + + return indexedValues +} + +// GenIndexKey generates storage key for index values. Returned distinct indicates whether the +// indexed values should be distinct in storage (i.e. whether handle is encoded in the key). +func (c *index) GenIndexKey(sc *stmtctx.StatementContext, indexedValues []types.Datum, h int64, buf []byte) (key []byte, distinct bool, err error) { + if c.idxInfo.Unique { + // See https://dev.mysql.com/doc/refman/5.7/en/create-index.html + // A UNIQUE index creates a constraint such that all values in the index must be distinct. + // An error occurs if you try to add a new row with a key value that matches an existing row. + // For all engines, a UNIQUE index permits multiple NULL values for columns that can contain NULL. + distinct = true + for _, cv := range indexedValues { + if cv.IsNull() { + distinct = false + break + } + } + } + + // For string columns, indexes can be created using only the leading part of column values, + // using col_name(length) syntax to specify an index prefix length. + indexedValues = TruncateIndexValuesIfNeeded(c.tblInfo, c.idxInfo, indexedValues) + key = c.getIndexKeyBuf(buf, len(c.prefix)+len(indexedValues)*9+9) + key = append(key, []byte(c.prefix)...) + key, err = codec.EncodeKey(sc, key, indexedValues...) + if !distinct && err == nil { + key, err = codec.EncodeKey(sc, key, types.NewDatum(h)) + } + if err != nil { + return nil, false, err + } + return +} + +// Create creates a new entry in the kvIndex data. +// If the index is unique and there is an existing entry with the same key, +// Create will return the existing entry's handle as the first return value, ErrKeyExists as the second return value. +func (c *index) Create(sctx sessionctx.Context, rm kv.RetrieverMutator, indexedValues []types.Datum, h int64, opts ...table.CreateIdxOptFunc) (int64, error) { + var opt table.CreateIdxOpt + for _, fn := range opts { + fn(&opt) + } + vars := sctx.GetSessionVars() + writeBufs := vars.GetWriteStmtBufs() + skipCheck := vars.StmtCtx.BatchCheck + key, distinct, err := c.GenIndexKey(vars.StmtCtx, indexedValues, h, writeBufs.IndexKeyBuf) + if err != nil { + return 0, err + } + + ctx := opt.Ctx + if opt.Untouched { + txn, err1 := sctx.Txn(true) + if err1 != nil { + return 0, err1 + } + // If the index kv was untouched(unchanged), and the key/value already exists in mem-buffer, + // should not overwrite the key with un-commit flag. + // So if the key exists, just do nothing and return. + _, err = txn.GetMemBuffer().Get(ctx, key) + if err == nil { + return 0, nil + } + } + + // save the key buffer to reuse. + writeBufs.IndexKeyBuf = key + if !distinct { + // non-unique index doesn't need store value, write a '0' to reduce space + value := []byte{'0'} + if opt.Untouched { + value[0] = kv.UnCommitIndexKVFlag + } + err = rm.Set(key, value) + return 0, err + } + + if skipCheck || opt.Untouched { + value := EncodeHandle(h) + // If index is untouched and fetch here means the key is exists in TiKV, but not in txn mem-buffer, + // then should also write the untouched index key/value to mem-buffer to make sure the data + // is consistent with the index in txn mem-buffer. + if opt.Untouched { + value = append(value, kv.UnCommitIndexKVFlag) + } + err = rm.Set(key, value) + return 0, err + } + + ctx = context.TODO() + + var value []byte + value, err = rm.Get(ctx, key) + if kv.IsErrNotFound(err) { + v := EncodeHandle(h) + err = rm.Set(key, v) + return 0, err + } + + handle, err := DecodeHandle(value) + if err != nil { + return 0, err + } + return handle, kv.ErrKeyExists +} + +// Delete removes the entry for handle h and indexdValues from KV index. +func (c *index) Delete(sc *stmtctx.StatementContext, m kv.Mutator, indexedValues []types.Datum, h int64) error { + key, _, err := c.GenIndexKey(sc, indexedValues, h, nil) + if err != nil { + return err + } + err = m.Delete(key) + return err +} + +// Drop removes the KV index from store. +func (c *index) Drop(rm kv.RetrieverMutator) error { + it, err := rm.Iter(c.prefix, c.prefix.PrefixNext()) + if err != nil { + return err + } + defer it.Close() + + // remove all indices + for it.Valid() { + if !it.Key().HasPrefix(c.prefix) { + break + } + err := rm.Delete(it.Key()) + if err != nil { + return err + } + err = it.Next() + if err != nil { + return err + } + } + return nil +} + +// Seek searches KV index for the entry with indexedValues. +func (c *index) Seek(sc *stmtctx.StatementContext, r kv.Retriever, indexedValues []types.Datum) (iter table.IndexIterator, hit bool, err error) { + key, _, err := c.GenIndexKey(sc, indexedValues, 0, nil) + if err != nil { + return nil, false, err + } + + upperBound := c.prefix.PrefixNext() + it, err := r.Iter(key, upperBound) + if err != nil { + return nil, false, err + } + // check if hit + hit = false + if it.Valid() && it.Key().Cmp(key) == 0 { + hit = true + } + return &indexIter{it: it, idx: c, prefix: c.prefix}, hit, nil +} + +// SeekFirst returns an iterator which points to the first entry of the KV index. +func (c *index) SeekFirst(r kv.Retriever) (iter table.IndexIterator, err error) { + upperBound := c.prefix.PrefixNext() + it, err := r.Iter(c.prefix, upperBound) + if err != nil { + return nil, err + } + return &indexIter{it: it, idx: c, prefix: c.prefix}, nil +} + +func (c *index) Exist(sc *stmtctx.StatementContext, rm kv.RetrieverMutator, indexedValues []types.Datum, h int64) (bool, int64, error) { + key, distinct, err := c.GenIndexKey(sc, indexedValues, h, nil) + if err != nil { + return false, 0, err + } + + value, err := rm.Get(context.TODO(), key) + if kv.IsErrNotFound(err) { + return false, 0, nil + } + if err != nil { + return false, 0, err + } + + // For distinct index, the value of key is handle. + if distinct { + handle, err := DecodeHandle(value) + if err != nil { + return false, 0, err + } + + if handle != h { + return true, handle, kv.ErrKeyExists + } + + return true, handle, nil + } + + return true, h, nil +} + +func (c *index) FetchValues(r []types.Datum, vals []types.Datum) ([]types.Datum, error) { + needLength := len(c.idxInfo.Columns) + if vals == nil || cap(vals) < needLength { + vals = make([]types.Datum, needLength) + } + vals = vals[:needLength] + for i, ic := range c.idxInfo.Columns { + if ic.Offset < 0 || ic.Offset >= len(r) { + return nil, table.ErrIndexOutBound.GenWithStackByArgs(ic.Name, ic.Offset, r) + } + vals[i] = r[ic.Offset] + } + return vals, nil +} diff --git a/table/tables/index_test.go b/table/tables/index_test.go new file mode 100644 index 0000000..ac48503 --- /dev/null +++ b/table/tables/index_test.go @@ -0,0 +1,249 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tables_test + +import ( + "context" + "io" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/table/tables" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/mock" + "github.com/pingcap/tidb/util/testleak" +) + +var _ = Suite(&testIndexSuite{}) + +type testIndexSuite struct { + s kv.Storage + dom *domain.Domain +} + +func (s *testIndexSuite) SetUpSuite(c *C) { + testleak.BeforeTest() + store, err := mockstore.NewMockTikvStore() + c.Assert(err, IsNil) + s.s = store + s.dom, err = session.BootstrapSession(store) + c.Assert(err, IsNil) +} + +func (s *testIndexSuite) TearDownSuite(c *C) { + s.dom.Close() + err := s.s.Close() + c.Assert(err, IsNil) + testleak.AfterTest(c)() +} + +func (s *testIndexSuite) TestIndex(c *C) { + tblInfo := &model.TableInfo{ + ID: 1, + Indices: []*model.IndexInfo{ + { + ID: 2, + Name: model.NewCIStr("test"), + Columns: []*model.IndexColumn{ + {}, + {}, + }, + }, + }, + } + index := tables.NewIndex(tblInfo.ID, tblInfo, tblInfo.Indices[0]) + + // Test ununiq index. + txn, err := s.s.Begin() + c.Assert(err, IsNil) + + values := types.MakeDatums(1, 2) + mockCtx := mock.NewContext() + _, err = index.Create(mockCtx, txn, values, 1) + c.Assert(err, IsNil) + + it, err := index.SeekFirst(txn) + c.Assert(err, IsNil) + + getValues, h, err := it.Next() + c.Assert(err, IsNil) + c.Assert(getValues, HasLen, 2) + c.Assert(getValues[0].GetInt64(), Equals, int64(1)) + c.Assert(getValues[1].GetInt64(), Equals, int64(2)) + c.Assert(h, Equals, int64(1)) + it.Close() + sc := &stmtctx.StatementContext{TimeZone: time.Local} + exist, _, err := index.Exist(sc, txn, values, 100) + c.Assert(err, IsNil) + c.Assert(exist, IsFalse) + + exist, _, err = index.Exist(sc, txn, values, 1) + c.Assert(err, IsNil) + c.Assert(exist, IsTrue) + + err = index.Delete(sc, txn, values, 1) + c.Assert(err, IsNil) + + it, err = index.SeekFirst(txn) + c.Assert(err, IsNil) + + _, _, err = it.Next() + c.Assert(terror.ErrorEqual(err, io.EOF), IsTrue, Commentf("err %v", err)) + it.Close() + + _, err = index.Create(mockCtx, txn, values, 0) + c.Assert(err, IsNil) + + _, err = index.SeekFirst(txn) + c.Assert(err, IsNil) + + _, hit, err := index.Seek(sc, txn, values) + c.Assert(err, IsNil) + c.Assert(hit, IsTrue) + + err = index.Drop(txn) + c.Assert(err, IsNil) + + it, hit, err = index.Seek(sc, txn, values) + c.Assert(err, IsNil) + c.Assert(hit, IsFalse) + + _, _, err = it.Next() + c.Assert(terror.ErrorEqual(err, io.EOF), IsTrue, Commentf("err %v", err)) + it.Close() + + it, err = index.SeekFirst(txn) + c.Assert(err, IsNil) + + _, _, err = it.Next() + c.Assert(terror.ErrorEqual(err, io.EOF), IsTrue, Commentf("err %v", err)) + it.Close() + + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + + tblInfo = &model.TableInfo{ + ID: 2, + Indices: []*model.IndexInfo{ + { + ID: 3, + Name: model.NewCIStr("test"), + Unique: true, + Columns: []*model.IndexColumn{ + {}, + {}, + }, + }, + }, + } + index = tables.NewIndex(tblInfo.ID, tblInfo, tblInfo.Indices[0]) + + // Test uniq index. + txn, err = s.s.Begin() + c.Assert(err, IsNil) + + _, err = index.Create(mockCtx, txn, values, 1) + c.Assert(err, IsNil) + + _, err = index.Create(mockCtx, txn, values, 2) + c.Assert(err, NotNil) + + it, err = index.SeekFirst(txn) + c.Assert(err, IsNil) + + getValues, h, err = it.Next() + c.Assert(err, IsNil) + c.Assert(getValues, HasLen, 2) + c.Assert(getValues[0].GetInt64(), Equals, int64(1)) + c.Assert(getValues[1].GetInt64(), Equals, int64(2)) + c.Assert(h, Equals, int64(1)) + it.Close() + + exist, h, err = index.Exist(sc, txn, values, 1) + c.Assert(err, IsNil) + c.Assert(h, Equals, int64(1)) + c.Assert(exist, IsTrue) + + exist, h, err = index.Exist(sc, txn, values, 2) + c.Assert(err, NotNil) + c.Assert(h, Equals, int64(1)) + c.Assert(exist, IsTrue) + + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + + _, err = index.FetchValues(make([]types.Datum, 0), nil) + c.Assert(err, NotNil) + + // Test the function of Next when the value of unique key is nil. + values2 := types.MakeDatums(nil, nil) + _, err = index.Create(mockCtx, txn, values2, 2) + c.Assert(err, IsNil) + it, err = index.SeekFirst(txn) + c.Assert(err, IsNil) + getValues, h, err = it.Next() + c.Assert(err, IsNil) + c.Assert(getValues, HasLen, 2) + c.Assert(getValues[0].GetInterface(), Equals, nil) + c.Assert(getValues[1].GetInterface(), Equals, nil) + c.Assert(h, Equals, int64(2)) + it.Close() +} + +func (s *testIndexSuite) TestCombineIndexSeek(c *C) { + tblInfo := &model.TableInfo{ + ID: 1, + Indices: []*model.IndexInfo{ + { + ID: 2, + Name: model.NewCIStr("test"), + Columns: []*model.IndexColumn{ + {Offset: 1}, + {Offset: 2}, + }, + }, + }, + Columns: []*model.ColumnInfo{ + {Offset: 0}, + {Offset: 1}, + {Offset: 2}, + }, + } + index := tables.NewIndex(tblInfo.ID, tblInfo, tblInfo.Indices[0]) + + txn, err := s.s.Begin() + c.Assert(err, IsNil) + + mockCtx := mock.NewContext() + values := types.MakeDatums("abc", "def") + _, err = index.Create(mockCtx, txn, values, 1) + c.Assert(err, IsNil) + + index2 := tables.NewIndex(tblInfo.ID, tblInfo, tblInfo.Indices[0]) + sc := &stmtctx.StatementContext{TimeZone: time.Local} + iter, hit, err := index2.Seek(sc, txn, types.MakeDatums("abc", nil)) + c.Assert(err, IsNil) + defer iter.Close() + c.Assert(hit, IsFalse) + _, h, err := iter.Next() + c.Assert(err, IsNil) + c.Assert(h, Equals, int64(1)) +} diff --git a/table/tables/tables.go b/table/tables/tables.go new file mode 100644 index 0000000..78a7215 --- /dev/null +++ b/table/tables/tables.go @@ -0,0 +1,986 @@ +// Copyright 2013 The ql Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSES/QL-LICENSE file. + +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tables + +import ( + "context" + "encoding/binary" + "math" + "strconv" + "strings" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta/autoid" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util" + "github.com/pingcap/tidb/util/codec" + "github.com/pingcap/tidb/util/logutil" + "github.com/spaolacci/murmur3" + "go.uber.org/zap" +) + +// TableCommon is shared by both Table and partition. +type TableCommon struct { + tableID int64 + // physicalTableID is a unique int64 to identify a physical table. + physicalTableID int64 + Columns []*table.Column + publicColumns []*table.Column + writableColumns []*table.Column + writableIndices []table.Index + indices []table.Index + meta *model.TableInfo + alloc autoid.Allocator + + // recordPrefix and indexPrefix are generated using physicalTableID. + recordPrefix kv.Key + indexPrefix kv.Key +} + +// MockTableFromMeta only serves for test. +func MockTableFromMeta(tblInfo *model.TableInfo) table.Table { + columns := make([]*table.Column, 0, len(tblInfo.Columns)) + for _, colInfo := range tblInfo.Columns { + col := table.ToColumn(colInfo) + columns = append(columns, col) + } + + var t TableCommon + initTableCommon(&t, tblInfo, tblInfo.ID, columns, nil) + if err := initTableIndices(&t); err != nil { + return nil + } + return &t +} + +// TableFromMeta creates a Table instance from model.TableInfo. +func TableFromMeta(alloc autoid.Allocator, tblInfo *model.TableInfo) (table.Table, error) { + if tblInfo.State == model.StateNone { + return nil, table.ErrTableStateCantNone.GenWithStackByArgs(tblInfo.Name) + } + + colsLen := len(tblInfo.Columns) + columns := make([]*table.Column, 0, colsLen) + for i, colInfo := range tblInfo.Columns { + if colInfo.State == model.StateNone { + return nil, table.ErrColumnStateCantNone.GenWithStackByArgs(colInfo.Name) + } + + // Print some information when the column's offset isn't equal to i. + if colInfo.Offset != i { + logutil.BgLogger().Error("wrong table schema", zap.Any("table", tblInfo), zap.Any("column", colInfo), zap.Int("index", i), zap.Int("offset", colInfo.Offset), zap.Int("columnNumber", colsLen)) + } + + col := table.ToColumn(colInfo) + columns = append(columns, col) + } + + var t TableCommon + initTableCommon(&t, tblInfo, tblInfo.ID, columns, alloc) + if err := initTableIndices(&t); err != nil { + return nil, err + } + return &t, nil +} + +// initTableCommon initializes a TableCommon struct. +func initTableCommon(t *TableCommon, tblInfo *model.TableInfo, physicalTableID int64, cols []*table.Column, alloc autoid.Allocator) { + t.tableID = tblInfo.ID + t.physicalTableID = physicalTableID + t.alloc = alloc + t.meta = tblInfo + t.Columns = cols + t.publicColumns = t.Cols() + t.writableColumns = t.WritableCols() + t.writableIndices = t.WritableIndices() + t.recordPrefix = tablecodec.GenTableRecordPrefix(physicalTableID) + t.indexPrefix = tablecodec.GenTableIndexPrefix(physicalTableID) +} + +// initTableIndices initializes the indices of the TableCommon. +func initTableIndices(t *TableCommon) error { + tblInfo := t.meta + for _, idxInfo := range tblInfo.Indices { + if idxInfo.State == model.StateNone { + return table.ErrIndexStateCantNone.GenWithStackByArgs(idxInfo.Name) + } + + // Use partition ID for index, because TableCommon may be table or partition. + idx := NewIndex(t.physicalTableID, tblInfo, idxInfo) + t.indices = append(t.indices, idx) + } + return nil +} + +// Indices implements table.Table Indices interface. +func (t *TableCommon) Indices() []table.Index { + return t.indices +} + +// WritableIndices implements table.Table WritableIndices interface. +func (t *TableCommon) WritableIndices() []table.Index { + if len(t.writableIndices) > 0 { + return t.writableIndices + } + writable := make([]table.Index, 0, len(t.indices)) + for _, index := range t.indices { + s := index.Meta().State + if s != model.StateDeleteOnly && s != model.StateDeleteReorganization { + writable = append(writable, index) + } + } + return writable +} + +// DeletableIndices implements table.Table DeletableIndices interface. +func (t *TableCommon) DeletableIndices() []table.Index { + // All indices are deletable because we don't need to check StateNone. + return t.indices +} + +// Meta implements table.Table Meta interface. +func (t *TableCommon) Meta() *model.TableInfo { + return t.meta +} + +// GetPhysicalID implements table.Table GetPhysicalID interface. +func (t *TableCommon) GetPhysicalID() int64 { + return t.physicalTableID +} + +// Cols implements table.Table Cols interface. +func (t *TableCommon) Cols() []*table.Column { + if len(t.publicColumns) > 0 { + return t.publicColumns + } + publicColumns := make([]*table.Column, len(t.Columns)) + maxOffset := -1 + for _, col := range t.Columns { + if col.State != model.StatePublic { + continue + } + publicColumns[col.Offset] = col + if maxOffset < col.Offset { + maxOffset = col.Offset + } + } + return publicColumns[0 : maxOffset+1] +} + +// WritableCols implements table WritableCols interface. +func (t *TableCommon) WritableCols() []*table.Column { + if len(t.writableColumns) > 0 { + return t.writableColumns + } + writableColumns := make([]*table.Column, len(t.Columns)) + maxOffset := -1 + for _, col := range t.Columns { + if col.State == model.StateDeleteOnly || col.State == model.StateDeleteReorganization { + continue + } + writableColumns[col.Offset] = col + if maxOffset < col.Offset { + maxOffset = col.Offset + } + } + return writableColumns[0 : maxOffset+1] +} + +// RecordPrefix implements table.Table interface. +func (t *TableCommon) RecordPrefix() kv.Key { + return t.recordPrefix +} + +// IndexPrefix implements table.Table interface. +func (t *TableCommon) IndexPrefix() kv.Key { + return t.indexPrefix +} + +// RecordKey implements table.Table interface. +func (t *TableCommon) RecordKey(h int64) kv.Key { + return tablecodec.EncodeRecordKey(t.recordPrefix, h) +} + +// FirstKey implements table.Table interface. +func (t *TableCommon) FirstKey() kv.Key { + return t.RecordKey(math.MinInt64) +} + +// UpdateRecord implements table.Table UpdateRecord interface. +// `touched` means which columns are really modified, used for secondary indices. +// Length of `oldData` and `newData` equals to length of `t.WritableCols()`. +func (t *TableCommon) UpdateRecord(ctx sessionctx.Context, h int64, oldData, newData []types.Datum, touched []bool) error { + txn, err := ctx.Txn(true) + if err != nil { + return err + } + + // TODO: reuse bs, like AddRecord does. + bs := kv.NewBufferStore(txn, kv.DefaultTxnMembufCap) + + // rebuild index + err = t.rebuildIndices(ctx, bs, h, touched, oldData, newData) + if err != nil { + return err + } + numColsCap := len(newData) + 1 // +1 for the extra handle column that we may need to append. + + var colIDs []int64 + var row []types.Datum + colIDs = make([]int64, 0, numColsCap) + row = make([]types.Datum, 0, numColsCap) + + for _, col := range t.WritableCols() { + var value types.Datum + if col.State != model.StatePublic { + // If col is in write only or write reorganization state we should keep the oldData. + // Because the oldData must be the orignal data(it's changed by other TiDBs.) or the orignal default value. + // TODO: Use newData directly. + value = oldData[col.Offset] + } else { + value = newData[col.Offset] + } + if !t.canSkip(col, value) { + colIDs = append(colIDs, col.ID) + row = append(row, value) + } + } + + key := t.RecordKey(h) + sessVars := ctx.GetSessionVars() + sc := sessVars.StmtCtx + rd := &sessVars.RowEncoder + value, err := tablecodec.EncodeRow(sc, row, colIDs, nil, nil, rd) + if err != nil { + return err + } + if err = bs.Set(key, value); err != nil { + return err + } + if err = bs.SaveTo(txn); err != nil { + return err + } + ctx.StmtAddDirtyTableOP(table.DirtyTableDeleteRow, t.physicalTableID, h) + ctx.StmtAddDirtyTableOP(table.DirtyTableAddRow, t.physicalTableID, h) + colSize := make(map[int64]int64, len(t.Cols())) + for id, col := range t.Cols() { + size, err := codec.EstimateValueSize(sc, newData[id]) + if err != nil { + continue + } + newLen := size - 1 + size, err = codec.EstimateValueSize(sc, oldData[id]) + if err != nil { + continue + } + oldLen := size - 1 + colSize[col.ID] = int64(newLen - oldLen) + } + sessVars.TxnCtx.UpdateDeltaForTable(t.physicalTableID, 0, 1, colSize) + return nil +} + +func (t *TableCommon) rebuildIndices(ctx sessionctx.Context, rm kv.RetrieverMutator, h int64, touched []bool, oldData []types.Datum, newData []types.Datum) error { + txn, err := ctx.Txn(true) + if err != nil { + return err + } + for _, idx := range t.DeletableIndices() { + for _, ic := range idx.Meta().Columns { + if !touched[ic.Offset] { + continue + } + oldVs, err := idx.FetchValues(oldData, nil) + if err != nil { + return err + } + if err = t.removeRowIndex(ctx.GetSessionVars().StmtCtx, rm, h, oldVs, idx, txn); err != nil { + return err + } + break + } + } + for _, idx := range t.WritableIndices() { + untouched := true + for _, ic := range idx.Meta().Columns { + if !touched[ic.Offset] { + continue + } + untouched = false + break + } + // If txn is auto commit and index is untouched, no need to write index value. + if untouched && !ctx.GetSessionVars().InTxn() { + continue + } + newVs, err := idx.FetchValues(newData, nil) + if err != nil { + return err + } + if err := t.buildIndexForRow(ctx, rm, h, newVs, idx, txn, untouched); err != nil { + return err + } + } + return nil +} + +// adjustRowValuesBuf adjust writeBufs.AddRowValues length, AddRowValues stores the inserting values that is used +// by tablecodec.EncodeRow, the encoded row format is `id1, colval, id2, colval`, so the correct length is rowLen * 2. If +// the inserting row has null value, AddRecord will skip it, so the rowLen will be different, so we need to adjust it. +func adjustRowValuesBuf(writeBufs *variable.WriteStmtBufs, rowLen int) { + adjustLen := rowLen * 2 + if writeBufs.AddRowValues == nil || cap(writeBufs.AddRowValues) < adjustLen { + writeBufs.AddRowValues = make([]types.Datum, adjustLen) + } + writeBufs.AddRowValues = writeBufs.AddRowValues[:adjustLen] +} + +// getRollbackableMemStore get a rollbackable BufferStore, when we are importing data, +// Just add the kv to transaction's membuf directly. +func (t *TableCommon) getRollbackableMemStore(ctx sessionctx.Context) (kv.RetrieverMutator, error) { + bs := ctx.GetSessionVars().GetWriteStmtBufs().BufStore + if bs == nil { + txn, err := ctx.Txn(true) + if err != nil { + return nil, err + } + bs = kv.NewBufferStore(txn, kv.DefaultTxnMembufCap) + } else { + bs.Reset() + } + return bs, nil +} + +// AddRecord implements table.Table AddRecord interface. +func (t *TableCommon) AddRecord(ctx sessionctx.Context, r []types.Datum, opts ...table.AddRecordOption) (recordID int64, err error) { + var opt table.AddRecordOpt + for _, fn := range opts { + fn.ApplyOn(&opt) + } + var hasRecordID bool + cols := t.Cols() + // opt.IsUpdate is a flag for update. + // If handle ID is changed when update, update will remove the old record first, and then call `AddRecord` to add a new record. + // Currently, only insert can set _tidb_rowid, update can not update _tidb_rowid. + if len(r) > len(cols) && !opt.IsUpdate { + // The last value is _tidb_rowid. + recordID = r[len(r)-1].GetInt64() + hasRecordID = true + } else { + for _, col := range cols { + if col.IsPKHandleColumn(t.meta) { + recordID = r[col.Offset].GetInt64() + hasRecordID = true + break + } + } + } + if !hasRecordID { + stmtCtx := ctx.GetSessionVars().StmtCtx + rows := stmtCtx.RecordRows() + if rows > 1 { + if stmtCtx.BaseRowID >= stmtCtx.MaxRowID { + stmtCtx.BaseRowID, stmtCtx.MaxRowID, err = t.AllocHandleIDs(ctx, rows) + if err != nil { + return 0, err + } + } + stmtCtx.BaseRowID += 1 + recordID = stmtCtx.BaseRowID + } else { + recordID, err = t.AllocHandle(ctx) + if err != nil { + return 0, err + } + } + } + + txn, err := ctx.Txn(true) + if err != nil { + return 0, err + } + + sessVars := ctx.GetSessionVars() + + rm, err := t.getRollbackableMemStore(ctx) + if err != nil { + return 0, err + } + var createIdxOpts []table.CreateIdxOptFunc + if len(opts) > 0 { + createIdxOpts = make([]table.CreateIdxOptFunc, 0, len(opts)) + for _, fn := range opts { + if raw, ok := fn.(table.CreateIdxOptFunc); ok { + createIdxOpts = append(createIdxOpts, raw) + } + } + } + // Insert new entries into indices. + h, err := t.addIndices(ctx, recordID, r, rm, createIdxOpts) + if err != nil { + return h, err + } + + var colIDs []int64 + var row []types.Datum + colIDs = make([]int64, 0, len(r)) + row = make([]types.Datum, 0, len(r)) + + for _, col := range t.WritableCols() { + var value types.Datum + // Update call `AddRecord` will already handle the write only column default value. + // Only insert should add default value for write only column. + if col.State != model.StatePublic && !opt.IsUpdate { + // If col is in write only or write reorganization state, we must add it with its default value. + value, err = table.GetColOriginDefaultValue(ctx, col.ToInfo()) + if err != nil { + return 0, err + } + // add value to `r` for dirty db in transaction. + // Otherwise when update will panic cause by get value of column in write only state from dirty db. + if col.Offset < len(r) { + r[col.Offset] = value + } else { + r = append(r, value) + } + } else { + value = r[col.Offset] + } + if !t.canSkip(col, value) { + colIDs = append(colIDs, col.ID) + row = append(row, value) + } + } + writeBufs := sessVars.GetWriteStmtBufs() + adjustRowValuesBuf(writeBufs, len(row)) + key := t.RecordKey(recordID) + sc := sessVars.StmtCtx + rd := &sessVars.RowEncoder + writeBufs.RowValBuf, err = tablecodec.EncodeRow(sc, row, colIDs, writeBufs.RowValBuf, writeBufs.AddRowValues, rd) + if err != nil { + return 0, err + } + value := writeBufs.RowValBuf + if err = txn.Set(key, value); err != nil { + return 0, err + } + + if err = rm.(*kv.BufferStore).SaveTo(txn); err != nil { + return 0, err + } + ctx.StmtAddDirtyTableOP(table.DirtyTableAddRow, t.physicalTableID, recordID) + + sc.AddAffectedRows(1) + colSize := make(map[int64]int64, len(r)) + for id, col := range t.Cols() { + size, err := codec.EstimateValueSize(sc, r[id]) + if err != nil { + continue + } + colSize[col.ID] = int64(size) - 1 + } + sessVars.TxnCtx.UpdateDeltaForTable(t.physicalTableID, 1, 1, colSize) + return recordID, nil +} + +// genIndexKeyStr generates index content string representation. +func (t *TableCommon) genIndexKeyStr(colVals []types.Datum) (string, error) { + // Pass pre-composed error to txn. + strVals := make([]string, 0, len(colVals)) + for _, cv := range colVals { + cvs := "NULL" + var err error + if !cv.IsNull() { + cvs, err = types.ToString(cv.GetValue()) + if err != nil { + return "", err + } + } + strVals = append(strVals, cvs) + } + return strings.Join(strVals, "-"), nil +} + +// addIndices adds data into indices. If any key is duplicated, returns the original handle. +func (t *TableCommon) addIndices(sctx sessionctx.Context, recordID int64, r []types.Datum, rm kv.RetrieverMutator, + opts []table.CreateIdxOptFunc) (int64, error) { + txn, err := sctx.Txn(true) + if err != nil { + return 0, err + } + // Clean up lazy check error environment + defer txn.DelOption(kv.PresumeKeyNotExistsError) + var opt table.CreateIdxOpt + for _, fn := range opts { + fn(&opt) + } + var ctx context.Context + if opt.Ctx != nil { + ctx = opt.Ctx + } else { + ctx = context.Background() + } + skipCheck := sctx.GetSessionVars().StmtCtx.BatchCheck + if t.meta.PKIsHandle && !skipCheck && !opt.SkipHandleCheck { + if err := CheckHandleExists(ctx, sctx, t, recordID, nil); err != nil { + return recordID, err + } + } + + writeBufs := sctx.GetSessionVars().GetWriteStmtBufs() + indexVals := writeBufs.IndexValsBuf + for _, v := range t.WritableIndices() { + indexVals, err = v.FetchValues(r, indexVals) + if err != nil { + return 0, err + } + var dupErr error + if !skipCheck && v.Meta().Unique { + entryKey, err := t.genIndexKeyStr(indexVals) + if err != nil { + return 0, err + } + existErrInfo := kv.NewExistErrInfo(v.Meta().Name.String(), entryKey) + txn.SetOption(kv.PresumeKeyNotExistsError, existErrInfo) + dupErr = existErrInfo.Err() + } + if dupHandle, err := v.Create(sctx, rm, indexVals, recordID, opts...); err != nil { + if kv.ErrKeyExists.Equal(err) { + return dupHandle, dupErr + } + return 0, err + } + txn.DelOption(kv.PresumeKeyNotExistsError) + } + // save the buffer, multi rows insert can use it. + writeBufs.IndexValsBuf = indexVals + return 0, nil +} + +// RowWithCols implements table.Table RowWithCols interface. +func (t *TableCommon) RowWithCols(ctx sessionctx.Context, h int64, cols []*table.Column) ([]types.Datum, error) { + // Get raw row data from kv. + key := t.RecordKey(h) + txn, err := ctx.Txn(true) + if err != nil { + return nil, err + } + value, err := txn.Get(context.TODO(), key) + if err != nil { + return nil, err + } + v, _, err := DecodeRawRowData(ctx, t.Meta(), h, cols, value) + if err != nil { + return nil, err + } + return v, nil +} + +// DecodeRawRowData decodes raw row data into a datum slice and a (columnID:columnValue) map. +func DecodeRawRowData(ctx sessionctx.Context, meta *model.TableInfo, h int64, cols []*table.Column, + value []byte) ([]types.Datum, map[int64]types.Datum, error) { + v := make([]types.Datum, len(cols)) + colTps := make(map[int64]*types.FieldType, len(cols)) + for i, col := range cols { + if col == nil { + continue + } + if col.IsPKHandleColumn(meta) { + if mysql.HasUnsignedFlag(col.Flag) { + v[i].SetUint64(uint64(h)) + } else { + v[i].SetInt64(h) + } + continue + } + colTps[col.ID] = &col.FieldType + } + rowMap, err := tablecodec.DecodeRow(value, colTps, ctx.GetSessionVars().Location()) + if err != nil { + return nil, rowMap, err + } + defaultVals := make([]types.Datum, len(cols)) + for i, col := range cols { + if col == nil { + continue + } + if col.IsPKHandleColumn(meta) { + continue + } + ri, ok := rowMap[col.ID] + if ok { + v[i] = ri + continue + } + v[i], err = GetColDefaultValue(ctx, col, defaultVals) + if err != nil { + return nil, rowMap, err + } + } + return v, rowMap, nil +} + +// Row implements table.Table Row interface. +func (t *TableCommon) Row(ctx sessionctx.Context, h int64) ([]types.Datum, error) { + return t.RowWithCols(ctx, h, t.Cols()) +} + +// RemoveRecord implements table.Table RemoveRecord interface. +func (t *TableCommon) RemoveRecord(ctx sessionctx.Context, h int64, r []types.Datum) error { + err := t.removeRowData(ctx, h) + if err != nil { + return err + } + err = t.removeRowIndices(ctx, h, r) + if err != nil { + return err + } + + ctx.StmtAddDirtyTableOP(table.DirtyTableDeleteRow, t.physicalTableID, h) + colSize := make(map[int64]int64, len(t.Cols())) + sc := ctx.GetSessionVars().StmtCtx + for id, col := range t.Cols() { + size, err := codec.EstimateValueSize(sc, r[id]) + if err != nil { + continue + } + colSize[col.ID] = -int64(size - 1) + } + ctx.GetSessionVars().TxnCtx.UpdateDeltaForTable(t.physicalTableID, -1, 1, colSize) + return err +} + +func (t *TableCommon) removeRowData(ctx sessionctx.Context, h int64) error { + // Remove row data. + txn, err := ctx.Txn(true) + if err != nil { + return err + } + + key := t.RecordKey(h) + err = txn.Delete([]byte(key)) + if err != nil { + return err + } + return nil +} + +// removeRowIndices removes all the indices of a row. +func (t *TableCommon) removeRowIndices(ctx sessionctx.Context, h int64, rec []types.Datum) error { + txn, err := ctx.Txn(true) + if err != nil { + return err + } + for _, v := range t.DeletableIndices() { + vals, err := v.FetchValues(rec, nil) + if err != nil { + logutil.BgLogger().Info("remove row index failed", zap.Any("index", v.Meta()), zap.Uint64("txnStartTS", txn.StartTS()), zap.Int64("handle", h), zap.Any("record", rec), zap.Error(err)) + return err + } + if err = v.Delete(ctx.GetSessionVars().StmtCtx, txn, vals, h); err != nil { + if v.Meta().State != model.StatePublic && kv.ErrNotExist.Equal(err) { + // If the index is not in public state, we may have not created the index, + // or already deleted the index, so skip ErrNotExist error. + logutil.BgLogger().Debug("row index not exists", zap.Any("index", v.Meta()), zap.Uint64("txnStartTS", txn.StartTS()), zap.Int64("handle", h)) + continue + } + return err + } + } + return nil +} + +// removeRowIndex implements table.Table RemoveRowIndex interface. +func (t *TableCommon) removeRowIndex(sc *stmtctx.StatementContext, rm kv.RetrieverMutator, h int64, vals []types.Datum, idx table.Index, txn kv.Transaction) error { + return idx.Delete(sc, rm, vals, h) +} + +// buildIndexForRow implements table.Table BuildIndexForRow interface. +func (t *TableCommon) buildIndexForRow(ctx sessionctx.Context, rm kv.RetrieverMutator, h int64, vals []types.Datum, idx table.Index, txn kv.Transaction, untouched bool) error { + var opts []table.CreateIdxOptFunc + if untouched { + opts = append(opts, table.IndexIsUntouched) + } + if _, err := idx.Create(ctx, rm, vals, h, opts...); err != nil { + if kv.ErrKeyExists.Equal(err) { + // Make error message consistent with MySQL. + entryKey, err1 := t.genIndexKeyStr(vals) + if err1 != nil { + // if genIndexKeyStr failed, return the original error. + return err + } + + return kv.ErrKeyExists.FastGenByArgs(entryKey, idx.Meta().Name) + } + return err + } + return nil +} + +// IterRecords implements table.Table IterRecords interface. +func (t *TableCommon) IterRecords(ctx sessionctx.Context, startKey kv.Key, cols []*table.Column, + fn table.RecordIterFunc) error { + prefix := t.RecordPrefix() + txn, err := ctx.Txn(true) + if err != nil { + return err + } + + it, err := txn.Iter(startKey, prefix.PrefixNext()) + if err != nil { + return err + } + defer it.Close() + + if !it.Valid() { + return nil + } + + logutil.BgLogger().Debug("iterate records", zap.ByteString("startKey", startKey), zap.ByteString("key", it.Key()), zap.ByteString("value", it.Value())) + + colMap := make(map[int64]*types.FieldType) + for _, col := range cols { + colMap[col.ID] = &col.FieldType + } + defaultVals := make([]types.Datum, len(cols)) + for it.Valid() && it.Key().HasPrefix(prefix) { + // first kv pair is row lock information. + // TODO: check valid lock + // get row handle + handle, err := tablecodec.DecodeRowKey(it.Key()) + if err != nil { + return err + } + rowMap, err := tablecodec.DecodeRow(it.Value(), colMap, ctx.GetSessionVars().Location()) + if err != nil { + return err + } + data := make([]types.Datum, len(cols)) + for _, col := range cols { + if col.IsPKHandleColumn(t.meta) { + if mysql.HasUnsignedFlag(col.Flag) { + data[col.Offset].SetUint64(uint64(handle)) + } else { + data[col.Offset].SetInt64(handle) + } + continue + } + if _, ok := rowMap[col.ID]; ok { + data[col.Offset] = rowMap[col.ID] + continue + } + data[col.Offset], err = GetColDefaultValue(ctx, col, defaultVals) + if err != nil { + return err + } + } + more, err := fn(handle, data, cols) + if !more || err != nil { + return err + } + + rk := t.RecordKey(handle) + err = kv.NextUntil(it, util.RowKeyPrefixFilter(rk)) + if err != nil { + return err + } + } + + return nil +} + +// GetColDefaultValue gets a column default value. +// The defaultVals is used to avoid calculating the default value multiple times. +func GetColDefaultValue(ctx sessionctx.Context, col *table.Column, defaultVals []types.Datum) ( + colVal types.Datum, err error) { + if col.OriginDefaultValue == nil && mysql.HasNotNullFlag(col.Flag) { + return colVal, errors.New("Miss column") + } + if col.State != model.StatePublic { + return colVal, nil + } + if defaultVals[col.Offset].IsNull() { + colVal, err = table.GetColOriginDefaultValue(ctx, col.ToInfo()) + if err != nil { + return colVal, err + } + defaultVals[col.Offset] = colVal + } else { + colVal = defaultVals[col.Offset] + } + + return colVal, nil +} + +// AllocHandle implements table.Table AllocHandle interface. +func (t *TableCommon) AllocHandle(ctx sessionctx.Context) (int64, error) { + _, rowID, err := t.AllocHandleIDs(ctx, 1) + return rowID, err +} + +// AllocHandleIDs implements table.Table AllocHandle interface. +func (t *TableCommon) AllocHandleIDs(ctx sessionctx.Context, n uint64) (int64, int64, error) { + base, maxID, err := t.Allocator(ctx).Alloc(t.tableID, n) + if err != nil { + return 0, 0, err + } + if t.meta.ShardRowIDBits > 0 { + // Use max record ShardRowIDBits to check overflow. + if OverflowShardBits(maxID, t.meta.MaxShardRowIDBits) { + // If overflow, the rowID may be duplicated. For examples, + // t.meta.ShardRowIDBits = 4 + // rowID = 0010111111111111111111111111111111111111111111111111111111111111 + // shard = 01000000000000000000000000000000000000000000000000000000000000000 + // will be duplicated with: + // rowID = 0100111111111111111111111111111111111111111111111111111111111111 + // shard = 0010000000000000000000000000000000000000000000000000000000000000 + return 0, 0, autoid.ErrAutoincReadFailed + } + txnCtx := ctx.GetSessionVars().TxnCtx + if txnCtx.Shard == nil { + shard := t.calcShard(txnCtx.StartTS) + txnCtx.Shard = &shard + } + base |= *txnCtx.Shard + maxID |= *txnCtx.Shard + } + return base, maxID, nil +} + +// OverflowShardBits checks whether the rowID overflow `1<<(64-shardRowIDBits-1) -1`. +func OverflowShardBits(rowID int64, shardRowIDBits uint64) bool { + mask := (1< 0 +} + +func (t *TableCommon) calcShard(startTS uint64) int64 { + var buf [8]byte + binary.LittleEndian.PutUint64(buf[:], startTS) + hashVal := int64(murmur3.Sum32(buf[:])) + return (hashVal & (1< 0 { + remain, d, e := codec.DecodeOne(key) + if e != nil { + return 0, 0, nil, errInvalidIndexKey.GenWithStack("invalid index key - %q %v", k, e) + } + str, e1 := d.ToString() + if e1 != nil { + return 0, 0, nil, errInvalidIndexKey.GenWithStack("invalid index key - %q %v", k, e1) + } + indexValues = append(indexValues, str) + key = remain + } + return +} + +// EncodeRow encode row data and column ids into a slice of byte. +// Row layout: colID1, value1, colID2, value2, ..... +// valBuf and values pass by caller, for reducing EncodeRow allocates temporary bufs. If you pass valBuf and values as nil, +// EncodeRow will allocate it. +func EncodeRow(sc *stmtctx.StatementContext, row []types.Datum, colIDs []int64, valBuf []byte, values []types.Datum, rd *rowcodec.Encoder) ([]byte, error) { + if len(row) != len(colIDs) { + return nil, errors.Errorf("EncodeRow error: data and columnID count not match %d vs %d", len(row), len(colIDs)) + } + return rd.Encode(sc, colIDs, row, valBuf) +} + +// EncodeRowKey encodes the table id and record handle into a kv.Key +func EncodeRowKey(tableID int64, encodedHandle []byte) kv.Key { + buf := make([]byte, 0, RecordRowKeyLen) + buf = appendTableRecordPrefix(buf, tableID) + buf = append(buf, encodedHandle...) + return buf +} + +// CutRowKeyPrefix cuts the row key prefix. +func CutRowKeyPrefix(key kv.Key) []byte { + return key[prefixLen:] +} + +// EncodeRecordKey encodes the recordPrefix, row handle into a kv.Key. +func EncodeRecordKey(recordPrefix kv.Key, h int64) kv.Key { + buf := make([]byte, 0, len(recordPrefix)+idLen) + buf = append(buf, recordPrefix...) + buf = codec.EncodeInt(buf, h) + return buf +} + +func hasTablePrefix(key kv.Key) bool { + return key[0] == tablePrefix[0] +} + +func hasRecordPrefixSep(key kv.Key) bool { + return key[0] == recordPrefixSep[0] && key[1] == recordPrefixSep[1] +} + +// DecodeMetaKey decodes the key and get the meta key and meta field. +func DecodeMetaKey(ek kv.Key) (key []byte, field []byte, err error) { + var tp uint64 + prefix := []byte("m") + if !bytes.HasPrefix(ek, prefix) { + return nil, nil, errors.New("invalid encoded hash data key prefix") + } + ek = ek[len(prefix):] + ek, key, err = codec.DecodeBytes(ek, nil) + if err != nil { + return nil, nil, errors.Trace(err) + } + ek, tp, err = codec.DecodeUint(ek) + if err != nil { + return nil, nil, errors.Trace(err) + } else if structure.TypeFlag(tp) != structure.HashData { + return nil, nil, errors.Errorf("invalid encoded hash data key flag %c", byte(tp)) + } + _, field, err = codec.DecodeBytes(ek, nil) + return key, field, errors.Trace(err) +} + +// DecodeKeyHead decodes the key's head and gets the tableID, indexID. isRecordKey is true when is a record key. +func DecodeKeyHead(key kv.Key) (tableID int64, indexID int64, isRecordKey bool, err error) { + isRecordKey = false + k := key + if !key.HasPrefix(tablePrefix) { + err = errInvalidKey.GenWithStack("invalid key - %q", k) + return + } + + key = key[len(tablePrefix):] + key, tableID, err = codec.DecodeInt(key) + if err != nil { + err = errors.Trace(err) + return + } + + if key.HasPrefix(recordPrefixSep) { + isRecordKey = true + return + } + if !key.HasPrefix(indexPrefixSep) { + err = errInvalidKey.GenWithStack("invalid key - %q", k) + return + } + + key = key[len(indexPrefixSep):] + + key, indexID, err = codec.DecodeInt(key) + if err != nil { + err = errors.Trace(err) + return + } + return +} + +// DecodeTableID decodes the table ID of the key, if the key is not table key, returns 0. +func DecodeTableID(key kv.Key) int64 { + if !key.HasPrefix(tablePrefix) { + return 0 + } + key = key[len(tablePrefix):] + _, tableID, err := codec.DecodeInt(key) + // TODO: return error. + terror.Log(errors.Trace(err)) + return tableID +} + +// DecodeRowKey decodes the key and gets the handle. +func DecodeRowKey(key kv.Key) (int64, error) { + if len(key) != RecordRowKeyLen || !hasTablePrefix(key) || !hasRecordPrefixSep(key[prefixLen-2:]) { + return 0, errInvalidKey.GenWithStack("invalid key - %q", key) + } + u := binary.BigEndian.Uint64(key[prefixLen:]) + return codec.DecodeCmpUintToInt(u), nil +} + +// EncodeValue encodes a go value to bytes. +func EncodeValue(sc *stmtctx.StatementContext, b []byte, raw types.Datum) ([]byte, error) { + return codec.EncodeValue(sc, b, raw) +} + +// DecodeColumnValue decodes data to a Datum according to the column info. +func DecodeColumnValue(data []byte, ft *types.FieldType, loc *time.Location) (types.Datum, error) { + _, d, err := codec.DecodeOne(data) + if err != nil { + return types.Datum{}, errors.Trace(err) + } + return d, nil +} + +// DecodeRowWithMap decodes a byte slice into datums with a existing row map. +// Row layout: colID1, value1, colID2, value2, ..... +func DecodeRowWithMap(b []byte, cols map[int64]*types.FieldType, loc *time.Location, row map[int64]types.Datum) (map[int64]types.Datum, error) { + if row == nil { + row = make(map[int64]types.Datum, len(cols)) + } + if b == nil { + return row, nil + } + if len(b) == 1 && b[0] == codec.NilFlag { + return row, nil + } + cnt := 0 + var ( + data []byte + err error + ) + for len(b) > 0 { + // Get col id. + data, b, err = codec.CutOne(b) + if err != nil { + return nil, errors.Trace(err) + } + _, cid, err := codec.DecodeOne(data) + if err != nil { + return nil, errors.Trace(err) + } + // Get col value. + data, b, err = codec.CutOne(b) + if err != nil { + return nil, errors.Trace(err) + } + id := cid.GetInt64() + ft, ok := cols[id] + if ok { + _, v, err := codec.DecodeOne(data) + if err != nil { + return nil, errors.Trace(err) + } + v, err = unflatten(v, ft, loc) + if err != nil { + return nil, errors.Trace(err) + } + row[id] = v + cnt++ + if cnt == len(cols) { + // Get enough data. + break + } + } + } + return row, nil +} + +// DecodeRow decodes a byte slice into datums. +// Row layout: colID1, value1, colID2, value2, ..... +func DecodeRow(b []byte, cols map[int64]*types.FieldType, loc *time.Location) (map[int64]types.Datum, error) { + return DecodeRowWithMapNew(b, cols, loc, nil) +} + +// DecodeRowWithMapNew decode a row to datum map. +func DecodeRowWithMapNew(b []byte, cols map[int64]*types.FieldType, loc *time.Location, row map[int64]types.Datum) (map[int64]types.Datum, error) { + if row == nil { + row = make(map[int64]types.Datum, len(cols)) + } + if b == nil { + return row, nil + } + if len(b) == 1 && b[0] == codec.NilFlag { + return row, nil + } + + reqCols := make([]rowcodec.ColInfo, len(cols)) + var idx int + for id, tp := range cols { + reqCols[idx] = rowcodec.ColInfo{ + ID: id, + Tp: int32(tp.Tp), + Flag: int32(tp.Flag), + Flen: tp.Flen, + Decimal: tp.Decimal, + Elems: tp.Elems, + } + idx++ + } + // for decodeToMap: + // - no need handle + // - no need get default value + rd := rowcodec.NewDatumMapDecoder(reqCols, -1, loc) + return rd.DecodeToDatumMap(b, -1, row) +} + +// unflatten converts a raw datum to a column datum. +func unflatten(datum types.Datum, ft *types.FieldType, loc *time.Location) (types.Datum, error) { + return datum, nil +} + +// CutIndexKey cuts encoded index key into colIDs to bytes slices map. +// The returned value b is the remaining bytes of the key which would be empty if it is unique index or handle data +// if it is non-unique index. +func CutIndexKey(key kv.Key, colIDs []int64) (values map[int64][]byte, b []byte, err error) { + b = key[prefixLen+idLen:] + values = make(map[int64][]byte) + for _, id := range colIDs { + var val []byte + val, b, err = codec.CutOne(b) + if err != nil { + return nil, nil, errors.Trace(err) + } + values[id] = val + } + return +} + +// CutIndexPrefix cuts the index prefix. +func CutIndexPrefix(key kv.Key) []byte { + return key[prefixLen+idLen:] +} + +// CutIndexKeyNew cuts encoded index key into colIDs to bytes slices. +// The returned value b is the remaining bytes of the key which would be empty if it is unique index or handle data +// if it is non-unique index. +func CutIndexKeyNew(key kv.Key, length int) (values [][]byte, b []byte, err error) { + b = key[prefixLen+idLen:] + values = make([][]byte, 0, length) + for i := 0; i < length; i++ { + var val []byte + val, b, err = codec.CutOne(b) + if err != nil { + return nil, nil, errors.Trace(err) + } + values = append(values, val) + } + return +} + +// PrimaryKeyStatus is the primary key column status. +type PrimaryKeyStatus int + +const ( + // PrimaryKeyNotExists means no need to decode primary key column value when DecodeIndexKV. + PrimaryKeyNotExists PrimaryKeyStatus = iota + // PrimaryKeyIsSigned means decode primary key column value as int64 when DecodeIndexKV. + PrimaryKeyIsSigned + // PrimaryKeyIsUnsigned means decode primary key column value as uint64 when DecodeIndexKV. + PrimaryKeyIsUnsigned +) + +// DecodeIndexKV uses to decode index key values. +func DecodeIndexKV(key, value []byte, colsLen int, pkStatus PrimaryKeyStatus) ([][]byte, error) { + values, b, err := CutIndexKeyNew(key, colsLen) + if err != nil { + return nil, errors.Trace(err) + } + if len(b) > 0 { + if pkStatus != PrimaryKeyNotExists { + values = append(values, b) + } + } else if pkStatus != PrimaryKeyNotExists { + handle, err := DecodeIndexValueAsHandle(value) + if err != nil { + return nil, errors.Trace(err) + } + var handleDatum types.Datum + if pkStatus == PrimaryKeyIsUnsigned { + handleDatum = types.NewUintDatum(uint64(handle)) + } else { + handleDatum = types.NewIntDatum(handle) + } + handleBytes := make([]byte, 0, 8) + handleBytes, err = codec.EncodeValue(nil, handleBytes, handleDatum) + if err != nil { + return nil, errors.Trace(err) + } + values = append(values, handleBytes) + } + return values, nil +} + +// DecodeIndexHandle uses to decode the handle from index key/value. +func DecodeIndexHandle(key, value []byte, colsLen int, pkTp *types.FieldType) (int64, error) { + _, b, err := CutIndexKeyNew(key, colsLen) + if err != nil { + return 0, errors.Trace(err) + } + if len(b) > 0 { + d, err := DecodeColumnValue(b, pkTp, nil) + if err != nil { + return 0, errors.Trace(err) + } + return d.GetInt64(), nil + + } else if len(value) >= 8 { + return DecodeIndexValueAsHandle(value) + } + // Should never execute to here. + return 0, errors.Errorf("no handle in index key: %v, value: %v", key, value) +} + +// DecodeIndexValueAsHandle uses to decode index value as handle id. +func DecodeIndexValueAsHandle(data []byte) (int64, error) { + var h int64 + buf := bytes.NewBuffer(data) + err := binary.Read(buf, binary.BigEndian, &h) + return h, errors.Trace(err) +} + +// EncodeTableIndexPrefix encodes index prefix with tableID and idxID. +func EncodeTableIndexPrefix(tableID, idxID int64) kv.Key { + key := make([]byte, 0, prefixLen) + key = appendTableIndexPrefix(key, tableID) + key = codec.EncodeInt(key, idxID) + return key +} + +// EncodeTablePrefix encodes table prefix with table ID. +func EncodeTablePrefix(tableID int64) kv.Key { + var key kv.Key + key = append(key, tablePrefix...) + key = codec.EncodeInt(key, tableID) + return key +} + +// ReplaceRecordKeyTableID replace the tableID in the recordKey buf. +func ReplaceRecordKeyTableID(buf []byte, tableID int64) []byte { + if len(buf) < len(tablePrefix)+8 { + return buf + } + + u := codec.EncodeIntToCmpUint(tableID) + binary.BigEndian.PutUint64(buf[len(tablePrefix):], u) + return buf +} + +// GenTableRecordPrefix composes record prefix with tableID: "t[tableID]_r". +func GenTableRecordPrefix(tableID int64) kv.Key { + buf := make([]byte, 0, len(tablePrefix)+8+len(recordPrefixSep)) + return appendTableRecordPrefix(buf, tableID) +} + +// GenTableIndexPrefix composes index prefix with tableID: "t[tableID]_i". +func GenTableIndexPrefix(tableID int64) kv.Key { + buf := make([]byte, 0, len(tablePrefix)+8+len(indexPrefixSep)) + return appendTableIndexPrefix(buf, tableID) +} + +// IsIndexKey is used to check whether the key is an index key. +func IsIndexKey(k []byte) bool { + return len(k) > 11 && k[0] == 't' && k[10] == 'i' +} + +// IsUntouchedIndexKValue uses to check whether the key is index key, and the value is untouched, +// since the untouched index key/value is no need to commit. +func IsUntouchedIndexKValue(k, v []byte) bool { + vLen := len(v) + return IsIndexKey(k) && + ((vLen == 1 || vLen == 9) && v[vLen-1] == kv.UnCommitIndexKVFlag) +} + +// GenTablePrefix composes table record and index prefix: "t[tableID]". +func GenTablePrefix(tableID int64) kv.Key { + buf := make([]byte, 0, len(tablePrefix)+8) + buf = append(buf, tablePrefix...) + buf = codec.EncodeInt(buf, tableID) + return buf +} + +// TruncateToRowKeyLen truncates the key to row key length if the key is longer than row key. +func TruncateToRowKeyLen(key kv.Key) kv.Key { + if len(key) > RecordRowKeyLen { + return key[:RecordRowKeyLen] + } + return key +} + +// GetTableHandleKeyRange returns table handle's key range with tableID. +func GetTableHandleKeyRange(tableID int64) (startKey, endKey []byte) { + startKey = EncodeRowKeyWithHandle(tableID, math.MinInt64) + endKey = EncodeRowKeyWithHandle(tableID, math.MaxInt64) + return +} + +// GetTableIndexKeyRange returns table index's key range with tableID and indexID. +func GetTableIndexKeyRange(tableID, indexID int64) (startKey, endKey []byte) { + startKey = EncodeIndexSeekKey(tableID, indexID, nil) + endKey = EncodeIndexSeekKey(tableID, indexID, []byte{255}) + return +} + +var ( + errInvalidKey = terror.ClassXEval.New(mysql.ErrInvalidKey, mysql.MySQLErrName[mysql.ErrInvalidKey]) + errInvalidRecordKey = terror.ClassXEval.New(mysql.ErrInvalidRecordKey, mysql.MySQLErrName[mysql.ErrInvalidRecordKey]) + errInvalidIndexKey = terror.ClassXEval.New(mysql.ErrInvalidIndexKey, mysql.MySQLErrName[mysql.ErrInvalidIndexKey]) +) + +func init() { + mySQLErrCodes := map[terror.ErrCode]uint16{ + mysql.ErrInvalidKey: mysql.ErrInvalidKey, + mysql.ErrInvalidRecordKey: mysql.ErrInvalidRecordKey, + mysql.ErrInvalidIndexKey: mysql.ErrInvalidIndexKey, + } + terror.ErrClassToMySQLCodes[terror.ClassXEval] = mySQLErrCodes +} diff --git a/tablecodec/tablecodec_test.go b/tablecodec/tablecodec_test.go new file mode 100644 index 0000000..f6f2cbe --- /dev/null +++ b/tablecodec/tablecodec_test.go @@ -0,0 +1,295 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tablecodec + +import ( + "fmt" + "math" + "testing" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/codec" + "github.com/pingcap/tidb/util/testleak" +) + +func TestT(t *testing.T) { + TestingT(t) +} + +var _ = Suite(&testTableCodecSuite{}) + +type testTableCodecSuite struct{} + +// TestTableCodec tests some functions in package tablecodec +// TODO: add more tests. +func (s *testTableCodecSuite) TestTableCodec(c *C) { + defer testleak.AfterTest(c)() + key := EncodeRowKey(1, codec.EncodeInt(nil, 2)) + h, err := DecodeRowKey(key) + c.Assert(err, IsNil) + c.Assert(h, Equals, int64(2)) + + key = EncodeRowKeyWithHandle(1, 2) + h, err = DecodeRowKey(key) + c.Assert(err, IsNil) + c.Assert(h, Equals, int64(2)) +} + +func (s *testTableCodecSuite) TestCutKeyNew(c *C) { + values := []types.Datum{types.NewIntDatum(1), types.NewBytesDatum([]byte("abc")), types.NewFloat64Datum(5.5)} + handle := types.NewIntDatum(100) + values = append(values, handle) + sc := &stmtctx.StatementContext{TimeZone: time.UTC} + encodedValue, err := codec.EncodeKey(sc, nil, values...) + c.Assert(err, IsNil) + tableID := int64(4) + indexID := int64(5) + indexKey := EncodeIndexSeekKey(tableID, indexID, encodedValue) + valuesBytes, handleBytes, err := CutIndexKeyNew(indexKey, 3) + c.Assert(err, IsNil) + for i := 0; i < 3; i++ { + valueBytes := valuesBytes[i] + var val types.Datum + _, val, _ = codec.DecodeOne(valueBytes) + c.Assert(val, DeepEquals, values[i]) + } + _, handleVal, _ := codec.DecodeOne(handleBytes) + c.Assert(handleVal, DeepEquals, types.NewIntDatum(100)) +} + +func (s *testTableCodecSuite) TestCutKey(c *C) { + colIDs := []int64{1, 2, 3} + values := []types.Datum{types.NewIntDatum(1), types.NewBytesDatum([]byte("abc")), types.NewFloat64Datum(5.5)} + handle := types.NewIntDatum(100) + values = append(values, handle) + sc := &stmtctx.StatementContext{TimeZone: time.UTC} + encodedValue, err := codec.EncodeKey(sc, nil, values...) + c.Assert(err, IsNil) + tableID := int64(4) + indexID := int64(5) + indexKey := EncodeIndexSeekKey(tableID, indexID, encodedValue) + valuesMap, handleBytes, err := CutIndexKey(indexKey, colIDs) + c.Assert(err, IsNil) + for i, colID := range colIDs { + valueBytes := valuesMap[colID] + var val types.Datum + _, val, _ = codec.DecodeOne(valueBytes) + c.Assert(val, DeepEquals, values[i]) + } + _, handleVal, _ := codec.DecodeOne(handleBytes) + c.Assert(handleVal, DeepEquals, types.NewIntDatum(100)) +} + +func (s *testTableCodecSuite) TestIndexKey(c *C) { + tableID := int64(4) + indexID := int64(5) + indexKey := EncodeIndexSeekKey(tableID, indexID, []byte{}) + tTableID, tIndexID, isRecordKey, err := DecodeKeyHead(indexKey) + c.Assert(err, IsNil) + c.Assert(tTableID, Equals, tableID) + c.Assert(tIndexID, Equals, indexID) + c.Assert(isRecordKey, IsFalse) +} + +func (s *testTableCodecSuite) TestRecordKey(c *C) { + tableID := int64(55) + tableKey := EncodeRowKeyWithHandle(tableID, math.MaxUint32) + tTableID, _, isRecordKey, err := DecodeKeyHead(tableKey) + c.Assert(err, IsNil) + c.Assert(tTableID, Equals, tableID) + c.Assert(isRecordKey, IsTrue) + + encodedHandle := codec.EncodeInt(nil, math.MaxUint32) + rowKey := EncodeRowKey(tableID, encodedHandle) + c.Assert([]byte(tableKey), BytesEquals, []byte(rowKey)) + tTableID, handle, err := DecodeRecordKey(rowKey) + c.Assert(err, IsNil) + c.Assert(tTableID, Equals, tableID) + c.Assert(handle, Equals, int64(math.MaxUint32)) + + recordPrefix := GenTableRecordPrefix(tableID) + rowKey = EncodeRecordKey(recordPrefix, math.MaxUint32) + c.Assert([]byte(tableKey), BytesEquals, []byte(rowKey)) + + _, _, err = DecodeRecordKey(nil) + c.Assert(err, NotNil) + _, _, err = DecodeRecordKey([]byte("abcdefghijklmnopqrstuvwxyz")) + c.Assert(err, NotNil) + c.Assert(DecodeTableID(nil), Equals, int64(0)) +} + +func (s *testTableCodecSuite) TestPrefix(c *C) { + const tableID int64 = 66 + key := EncodeTablePrefix(tableID) + tTableID := DecodeTableID(key) + c.Assert(tTableID, Equals, int64(tableID)) + + c.Assert([]byte(TablePrefix()), BytesEquals, tablePrefix) + + tablePrefix1 := GenTablePrefix(tableID) + c.Assert([]byte(tablePrefix1), BytesEquals, []byte(key)) + + indexPrefix := EncodeTableIndexPrefix(tableID, math.MaxUint32) + tTableID, indexID, isRecordKey, err := DecodeKeyHead(indexPrefix) + c.Assert(err, IsNil) + c.Assert(tTableID, Equals, tableID) + c.Assert(indexID, Equals, int64(math.MaxUint32)) + c.Assert(isRecordKey, IsFalse) + + prefixKey := GenTableIndexPrefix(tableID) + c.Assert(DecodeTableID(prefixKey), Equals, tableID) + + c.Assert(TruncateToRowKeyLen(append(indexPrefix, "xyz"...)), HasLen, RecordRowKeyLen) + c.Assert(TruncateToRowKeyLen(key), HasLen, len(key)) +} + +func (s *testTableCodecSuite) TestReplaceRecordKeyTableID(c *C) { + tableID := int64(1) + tableKey := EncodeRowKeyWithHandle(tableID, 1) + tTableID, _, _, err := DecodeKeyHead(tableKey) + c.Assert(err, IsNil) + c.Assert(tTableID, Equals, tableID) + + tableID = 2 + tableKey = ReplaceRecordKeyTableID(tableKey, tableID) + tTableID, _, _, err = DecodeKeyHead(tableKey) + c.Assert(err, IsNil) + c.Assert(tTableID, Equals, tableID) + + tableID = 3 + ReplaceRecordKeyTableID(tableKey, tableID) + tableKey = ReplaceRecordKeyTableID(tableKey, tableID) + tTableID, _, _, err = DecodeKeyHead(tableKey) + c.Assert(err, IsNil) + c.Assert(tTableID, Equals, tableID) + + tableID = -1 + tableKey = ReplaceRecordKeyTableID(tableKey, tableID) + tTableID, _, _, err = DecodeKeyHead(tableKey) + c.Assert(err, IsNil) + c.Assert(tTableID, Equals, tableID) +} + +func (s *testTableCodecSuite) TestDecodeIndexKey(c *C) { + tableID := int64(4) + indexID := int64(5) + values := []types.Datum{ + types.NewIntDatum(1), + types.NewBytesDatum([]byte("abc")), + types.NewFloat64Datum(123.45), + // MysqlTime is not supported. + // types.NewTimeDatum(types.Time{ + // Time: types.FromGoTime(time.Now()), + // Fsp: 6, + // Type: mysql.TypeTimestamp, + // }), + } + valueStrs := make([]string, 0, len(values)) + for _, v := range values { + str, err := v.ToString() + if err != nil { + str = fmt.Sprintf("%d-%v", v.Kind(), v.GetValue()) + } + valueStrs = append(valueStrs, str) + } + sc := &stmtctx.StatementContext{TimeZone: time.UTC} + encodedValue, err := codec.EncodeKey(sc, nil, values...) + c.Assert(err, IsNil) + indexKey := EncodeIndexSeekKey(tableID, indexID, encodedValue) + + decodeTableID, decodeIndexID, decodeValues, err := DecodeIndexKey(indexKey) + c.Assert(err, IsNil) + c.Assert(decodeTableID, Equals, tableID) + c.Assert(decodeIndexID, Equals, indexID) + c.Assert(decodeValues, DeepEquals, valueStrs) +} + +func (s *testTableCodecSuite) TestCutPrefix(c *C) { + key := EncodeTableIndexPrefix(42, 666) + res := CutRowKeyPrefix(key) + c.Assert(res, BytesEquals, []byte{0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, 0x9a}) + res = CutIndexPrefix(key) + c.Assert(res, BytesEquals, []byte{}) +} + +func (s *testTableCodecSuite) TestRange(c *C) { + s1, e1 := GetTableHandleKeyRange(22) + s2, e2 := GetTableHandleKeyRange(23) + c.Assert([]byte(s1), Less, []byte(e1)) + c.Assert([]byte(e1), Less, []byte(s2)) + c.Assert([]byte(s2), Less, []byte(e2)) + + s1, e1 = GetTableIndexKeyRange(42, 666) + s2, e2 = GetTableIndexKeyRange(42, 667) + c.Assert([]byte(s1), Less, []byte(e1)) + c.Assert([]byte(e1), Less, []byte(s2)) + c.Assert([]byte(s2), Less, []byte(e2)) +} + +func (s *testTableCodecSuite) TestDecodeAutoIDMeta(c *C) { + keyBytes := []byte{0x6d, 0x44, 0x42, 0x3a, 0x35, 0x36, 0x0, 0x0, 0x0, 0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x68, 0x54, 0x49, 0x44, 0x3a, 0x31, 0x30, 0x38, 0x0, 0xfe} + key, field, err := DecodeMetaKey(kv.Key(keyBytes)) + c.Assert(err, IsNil) + c.Assert(string(key), Equals, "DB:56") + c.Assert(string(field), Equals, "TID:108") +} + +func BenchmarkHasTablePrefix(b *testing.B) { + k := kv.Key("foobar") + for i := 0; i < b.N; i++ { + hasTablePrefix(k) + } +} + +func BenchmarkHasTablePrefixBuiltin(b *testing.B) { + k := kv.Key("foobar") + for i := 0; i < b.N; i++ { + k.HasPrefix(tablePrefix) + } +} + +// Bench result: +// BenchmarkEncodeValue 5000000 368 ns/op +func BenchmarkEncodeValue(b *testing.B) { + row := make([]types.Datum, 7) + row[0] = types.NewIntDatum(100) + row[1] = types.NewBytesDatum([]byte("abc")) + row[2] = types.NewFloat32Datum(1.5) + b.ResetTimer() + encodedCol := make([]byte, 0, 16) + for i := 0; i < b.N; i++ { + for _, d := range row { + encodedCol = encodedCol[:0] + EncodeValue(nil, encodedCol, d) + } + } +} + +func (s *testTableCodecSuite) TestError(c *C) { + kvErrs := []*terror.Error{ + errInvalidKey, + errInvalidRecordKey, + errInvalidIndexKey, + } + for _, err := range kvErrs { + code := err.ToSQLError().Code + c.Assert(code != mysql.ErrUnknown && code == uint16(err.Code()), IsTrue, Commentf("err: %v", err)) + } +} diff --git a/tidb-server/main.go b/tidb-server/main.go new file mode 100644 index 0000000..62d80c9 --- /dev/null +++ b/tidb-server/main.go @@ -0,0 +1,291 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + "flag" + "fmt" + "os" + "strconv" + "sync/atomic" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/log" + "github.com/pingcap/tidb/config" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/server" + "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/sessionctx/variable" + kvstore "github.com/pingcap/tidb/store" + "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/store/tikv" + "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/signal" + "go.uber.org/automaxprocs/maxprocs" + "go.uber.org/zap" +) + +// Flag Names +const ( + nmConfig = "config" + nmStore = "store" + nmStorePath = "path" + nmHost = "host" + nmAdvertiseAddress = "advertise-address" + nmPort = "P" + nmCors = "cors" + nmLogLevel = "L" + nmLogFile = "log-file" + nmReportStatus = "report-status" + nmStatusHost = "status-host" + nmStatusPort = "status" + + nmDdlLease = "lease" +) + +var ( + configPath = flag.String(nmConfig, "", "config file path") + + // Base + store = flag.String(nmStore, "mocktikv", "registered store name, [tikv, mocktikv]") + storePath = flag.String(nmStorePath, "/tmp/tidb", "tidb storage path") + host = flag.String(nmHost, "0.0.0.0", "tidb server host") + advertiseAddress = flag.String(nmAdvertiseAddress, "", "tidb server advertise IP") + port = flag.String(nmPort, "4000", "tidb server port") + cors = flag.String(nmCors, "", "tidb server allow cors origin") + ddlLease = flag.String(nmDdlLease, "45s", "schema lease duration, very dangerous to change only if you know what you do") + + // Log + logLevel = flag.String(nmLogLevel, "info", "log level: info, debug, warn, error, fatal") + logFile = flag.String(nmLogFile, "", "log file path") + + // Status + reportStatus = flagBoolean(nmReportStatus, true, "If enable status report HTTP service.") + statusHost = flag.String(nmStatusHost, "0.0.0.0", "tidb server status host") + statusPort = flag.String(nmStatusPort, "10080", "tidb server status port") +) + +var ( + cfg *config.Config + storage kv.Storage + dom *domain.Domain + svr *server.Server + graceful bool +) + +func main() { + flag.Parse() + registerStores() + + configWarning := loadConfig() + overrideConfig() + setGlobalVars() + setupLog() + // If configStrict had been specified, and there had been an error, the server would already + // have exited by now. If configWarning is not an empty string, write it to the log now that + // it's been properly set up. + if configWarning != "" { + log.Warn(configWarning) + } + createStoreAndDomain() + createServer() + signal.SetupSignalHandler(serverShutdown) + runServer() + cleanup() + syncLog() +} + +func syncLog() { + if err := log.Sync(); err != nil { + fmt.Fprintln(os.Stderr, "sync log err:", err) + os.Exit(1) + } +} + +func registerStores() { + err := kvstore.Register("tikv", tikv.Driver{}) + terror.MustNil(err) + err = kvstore.Register("mocktikv", mockstore.MockDriver{}) + terror.MustNil(err) +} + +func createStoreAndDomain() { + fullPath := fmt.Sprintf("%s://%s", cfg.Store, cfg.Path) + var err error + storage, err = kvstore.New(fullPath) + terror.MustNil(err) + // Bootstrap a session to load information schema. + dom, err = session.BootstrapSession(storage) + terror.MustNil(err) +} + +// parseDuration parses lease argument string. +func parseDuration(lease string) time.Duration { + dur, err := time.ParseDuration(lease) + if err != nil { + dur, err = time.ParseDuration(lease + "s") + } + if err != nil || dur < 0 { + log.Fatal("invalid lease duration", zap.String("lease", lease)) + } + return dur +} + +func flagBoolean(name string, defaultVal bool, usage string) *bool { + if !defaultVal { + // Fix #4125, golang do not print default false value in usage, so we append it. + usage = fmt.Sprintf("%s (default false)", usage) + return flag.Bool(name, defaultVal, usage) + } + return flag.Bool(name, defaultVal, usage) +} + +func loadConfig() string { + cfg = config.GetGlobalConfig() + if *configPath != "" { + err := cfg.Load(*configPath) + if err == nil { + return "" + } + + // Unused config item erro turns to warnings. + if _, ok := err.(*config.ErrConfigValidationFailed); ok { + return err.Error() + } + + terror.MustNil(err) + } + return "" +} + +func overrideConfig() { + actualFlags := make(map[string]bool) + flag.Visit(func(f *flag.Flag) { + actualFlags[f.Name] = true + }) + + // Base + if actualFlags[nmHost] { + cfg.Host = *host + } + if actualFlags[nmAdvertiseAddress] { + cfg.AdvertiseAddress = *advertiseAddress + } + if len(cfg.AdvertiseAddress) == 0 { + cfg.AdvertiseAddress = cfg.Host + } + var err error + if actualFlags[nmPort] { + var p int + p, err = strconv.Atoi(*port) + terror.MustNil(err) + cfg.Port = uint(p) + } + if actualFlags[nmCors] { + fmt.Println(cors) + cfg.Cors = *cors + } + if actualFlags[nmStore] { + cfg.Store = *store + } + if actualFlags[nmStorePath] { + cfg.Path = *storePath + } + if actualFlags[nmDdlLease] { + cfg.Lease = *ddlLease + } + + // Log + if actualFlags[nmLogLevel] { + cfg.Log.Level = *logLevel + } + if actualFlags[nmLogFile] { + cfg.Log.File.Filename = *logFile + } + + // Status + if actualFlags[nmReportStatus] { + cfg.Status.ReportStatus = *reportStatus + } + if actualFlags[nmStatusHost] { + cfg.Status.StatusHost = *statusHost + } + if actualFlags[nmStatusPort] { + var p int + p, err = strconv.Atoi(*statusPort) + terror.MustNil(err) + cfg.Status.StatusPort = uint(p) + } +} + +func setGlobalVars() { + ddlLeaseDuration := parseDuration(cfg.Lease) + session.SetSchemaLease(ddlLeaseDuration) + + variable.SysVars[variable.Port].Value = fmt.Sprintf("%d", cfg.Port) + variable.SysVars[variable.DataDir].Value = cfg.Path +} + +func setupLog() { + err := logutil.InitZapLogger(cfg.Log.ToLogConfig()) + terror.MustNil(err) + + err = logutil.InitLogger(cfg.Log.ToLogConfig()) + terror.MustNil(err) + // Disable automaxprocs log + nopLog := func(string, ...interface{}) {} + _, err = maxprocs.Set(maxprocs.Logger(nopLog)) + terror.MustNil(err) +} + +func createServer() { + driver := server.NewTiDBDriver(storage) + var err error + svr, err = server.NewServer(cfg, driver) + // Both domain and storage have started, so we have to clean them before exiting. + terror.MustNil(err, closeDomainAndStorage) + svr.SetDomain(dom) +} + +func serverShutdown(isgraceful bool) { + if isgraceful { + graceful = true + } + svr.Close() +} + +func runServer() { + err := svr.Run() + terror.MustNil(err) +} + +func closeDomainAndStorage() { + atomic.StoreUint32(&tikv.ShuttingDown, 1) + dom.Close() + err := storage.Close() + terror.Log(errors.Trace(err)) +} + +func cleanup() { + if graceful { + svr.GracefulDown(context.Background(), nil) + } else { + svr.TryGracefulDown() + } + closeDomainAndStorage() +} diff --git a/tidb-server/main_test.go b/tidb-server/main_test.go new file mode 100644 index 0000000..747b405 --- /dev/null +++ b/tidb-server/main_test.go @@ -0,0 +1,34 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "testing" + + . "github.com/pingcap/check" +) + +var isCoverageServer = "0" + +// TestRunMain is a dummy test case, which contains only the main function of tidb-server, +// and it is used to generate coverage_server. +func TestRunMain(t *testing.T) { + if isCoverageServer == "1" { + main() + } +} + +func TestT(t *testing.T) { + TestingT(t) +} diff --git a/tools/check/check-gogenerate.sh b/tools/check/check-gogenerate.sh new file mode 100644 index 0000000..49c7167 --- /dev/null +++ b/tools/check/check-gogenerate.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +set -euo pipefail + +go generate ./... +set +e +diffline=$(git status -s | awk '{print $2}' | xargs grep '^// Code generated .* DO NOT EDIT\.$' 2>/dev/null) +set -e +if [[ $diffline != "" ]] +then + echo "Your commit is changed after running go generate ./..., it should not hanppen." + exit 1 +fi diff --git a/tools/check/check-tidy.sh b/tools/check/check-tidy.sh new file mode 100644 index 0000000..b20710c --- /dev/null +++ b/tools/check/check-tidy.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +# set is used to set the environment variables. +# -e: exit immediately when a command returning a non-zero exit code. +# -u: treat unset variables as an error. +# -o pipefail: sets the exit code of a pipeline to that of the rightmost command to exit with a non-zero status, +# or to zero if all commands of the pipeline exit successfully. +set -euo pipefail + +# go mod tidy do not support symlink +cd -P . + +GO111MODULE=on go mod tidy diff --git a/tools/check/check_parser_replace.sh b/tools/check/check_parser_replace.sh new file mode 100644 index 0000000..959a274 --- /dev/null +++ b/tools/check/check_parser_replace.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash +set -uo pipefail + +grep "replace.*github.com/pingcap/parser" go.mod +grep_ret=$? + +if [ $grep_ret -eq 0 ];then + exit 1 +else + exit 0 +fi diff --git a/tools/check/check_testSuite.sh b/tools/check/check_testSuite.sh new file mode 100644 index 0000000..5ae8ece --- /dev/null +++ b/tools/check/check_testSuite.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +set -euo pipefail + +exitCode=0 + +list=$(find . -name "*_test.go" -not -path "./vendor/*" -print0 | xargs -0 grep -E "type test(.*)Suite" | awk -F ':| ' '{print $1" "$3}') +while read -r file testSuite; do + # TODO: ugly regex + # TODO: check code comment + dir=$(dirname "$file") + if ! find "$dir" -name "*_test.go" -print0 | xargs -0 grep -E "_ = (check\.)?(Suite|SerialSuites)\((&?${testSuite}{|new\(${testSuite}\))" > /dev/null + then + if find "$dir" -name "*_test.go" -print0 | xargs -0 grep -E "func \((.* )?\*?${testSuite}\) Test" > /dev/null + then + echo "${testSuite} in ${dir} is not enabled" && exitCode=1 + fi + fi +done <<< "$list" +exit ${exitCode} diff --git a/tools/check/errcheck_excludes.txt b/tools/check/errcheck_excludes.txt new file mode 100644 index 0000000..7b25b70 --- /dev/null +++ b/tools/check/errcheck_excludes.txt @@ -0,0 +1,3 @@ +fmt.Fprintf +fmt.Fprint +fmt.Sscanf diff --git a/tools/check/go.mod b/tools/check/go.mod new file mode 100644 index 0000000..91fc1c9 --- /dev/null +++ b/tools/check/go.mod @@ -0,0 +1,25 @@ +module github.com/pingcap/tidb/_tools + +require ( + github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc // indirect + github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf // indirect + github.com/chzchzchz/goword v0.0.0-20170907005317-a9744cb52b03 + github.com/client9/misspell v0.3.4 + github.com/dnephin/govet v0.0.0-20171012192244-4a96d43e39d3 + github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf // indirect + github.com/gordonklaus/ineffassign v0.0.0-20180909121442-1003c8bd00dc + github.com/kisielk/errcheck v1.2.0 + github.com/mgechev/revive v0.0.0-20181210140514-b4cc152955fb + github.com/nicksnyder/go-i18n v1.10.0 // indirect + github.com/pelletier/go-toml v1.2.0 // indirect + github.com/securego/gosec v0.0.0-20181211171558-12400f9a1ca7 + golang.org/x/tools v0.0.0-20190925020647-22afafe3322a // indirect + gopkg.in/alecthomas/gometalinter.v2 v2.0.12 // indirect + gopkg.in/alecthomas/gometalinter.v3 v3.0.0 // indirect + gopkg.in/alecthomas/kingpin.v2 v2.2.6 // indirect + gopkg.in/alecthomas/kingpin.v3-unstable v3.0.0-20170321130658-9670b87a702e // indirect + gopkg.in/yaml.v2 v2.2.2 // indirect + honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3 +) + +go 1.13 diff --git a/tools/check/go.sum b/tools/check/go.sum new file mode 100644 index 0000000..49bfd82 --- /dev/null +++ b/tools/check/go.sum @@ -0,0 +1,83 @@ +github.com/BurntSushi/toml v0.3.0 h1:e1/Ivsx3Z0FVTV0NSOv/aVgbUWyQuzj7DDnFblkRvsY= +github.com/BurntSushi/toml v0.3.0/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/chzchzchz/goword v0.0.0-20170907005317-a9744cb52b03/go.mod h1:uFE9hX+zXEwvyUThZ4gDb9vkAwc5DoHUnRSEpH0VrOs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dnephin/govet v0.0.0-20171012192244-4a96d43e39d3/go.mod h1:pPTX0MEEoAnfbrAGFj4nSVNhl6YbugRj6eardUZdtGo= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/structtag v1.0.0 h1:pTHj65+u3RKWYPSGaU290FpI/dXxTaHdVwVwbcPKmEc= +github.com/fatih/structtag v1.0.0/go.mod h1:IKitwq45uXL/yqi5mYghiD3w9H6eTOvI9vnk8tXMphA= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf h1:7+FW5aGwISbqUtkfmIpZJGRgNFg2ioYPvFaUxdqpDsg= +github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf/go.mod h1:RpwtwJQFrIEPstU94h88MWPXP2ektJZ8cZ0YntAmXiE= +github.com/gordonklaus/ineffassign v0.0.0-20180909121442-1003c8bd00dc h1:cJlkeAx1QYgO5N80aF5xRGstVsRQwgLR7uA2FnP1ZjY= +github.com/gordonklaus/ineffassign v0.0.0-20180909121442-1003c8bd00dc/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= +github.com/kisielk/errcheck v1.2.0 h1:reN85Pxc5larApoH1keMBiu2GWtPqXQ1nc9gx+jOU+E= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v0.0.0-20161130080628-0de1eaf82fa3 h1:s/sV9geKJwXXzcrFiQdiiIFgfesbREplXWR9ZFgnGSQ= +github.com/kisielk/gotool v0.0.0-20161130080628-0de1eaf82fa3/go.mod h1:jxZFDH7ILpTPQTk+E2s+z4CUas9lVNjIuKR4c5/zKgM= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.3 h1:a+kO+98RDGEfo6asOGMmpodZq4FNtnGP54yps8BzLR4= +github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mgechev/dots v0.0.0-20180605013149-8e09d8ea2757 h1:KTwJ7Lo3KDKMknRYN5JEFRGIM4IkG59QjFFM2mxsMEU= +github.com/mgechev/dots v0.0.0-20180605013149-8e09d8ea2757/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= +github.com/mgechev/revive v0.0.0-20181210140514-b4cc152955fb h1:bLiKpCHe+ngBsF1o7DjZTmoffHEy2gdQ/+9NunuJ4ZY= +github.com/mgechev/revive v0.0.0-20181210140514-b4cc152955fb/go.mod h1:pVHj2KvxEhotJ6Lmr7zb3YgNMX1QKt8cyp6fdPHOrzU= +github.com/mozilla/tls-observatory v0.0.0-20180409132520-8791a200eb40/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= +github.com/nbutton23/zxcvbn-go v0.0.0-20160627004424-a22cb81b2ecd h1:hEzcdYzgmGA1zDrSYdh+OE4H43RrglXdZQ5ip/+93GU= +github.com/nbutton23/zxcvbn-go v0.0.0-20160627004424-a22cb81b2ecd/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= +github.com/nicksnyder/go-i18n v1.10.0 h1:5AzlPKvXBH4qBzmZ09Ua9Gipyruv6uApMcrNZdo96+Q= +github.com/nicksnyder/go-i18n v1.10.0/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2xBGjv0j365Q= +github.com/olekukonko/tablewriter v0.0.0-20180912035003-be2c049b30cc h1:rQ1O4ZLYR2xXHXgBCCfIIGnuZ0lidMQw2S5n1oOv+Wg= +github.com/olekukonko/tablewriter v0.0.0-20180912035003-be2c049b30cc/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735 h1:7YvPJVmEeFHR1Tj9sZEYsmarJEQfMVYpd/Vyy/A8dqE= +github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/securego/gosec v0.0.0-20181211171558-12400f9a1ca7 h1:Ca7U7/rZ+caxjW2na7wbmgmaPsoSCIlpc6sm0aWtFg0= +github.com/securego/gosec v0.0.0-20181211171558-12400f9a1ca7/go.mod h1:m3KbCTwh9vLhm6AKBjE+ALesKilKcQHezI1uVOti0Ks= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20170915142106-8351a756f30f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20171026204733-164713f0dfce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.0.0-20170915090833-1cbadb444a80/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20170915040203-e531a2a1c15f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180911133044-677d2ff680c1/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563 h1:NIou6eNFigscvKJmsbyez16S2cIS6idossORlFtSt2E= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190925020647-22afafe3322a h1:3GxqzBPBt1O2dIiPnzldQ5d25CAMWJFBZTpqxLPfjs8= +golang.org/x/tools v0.0.0-20190925020647-22afafe3322a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/alecthomas/gometalinter.v2 v2.0.12 h1:/xBWwtjmOmVxn8FXfIk9noV8m2E2Id9jFfUY/Mh9QAI= +gopkg.in/alecthomas/gometalinter.v2 v2.0.12/go.mod h1:NDRytsqEZyolNuAgTzJkZMkSQM7FIKyzVzGhjB/qfYo= +gopkg.in/alecthomas/gometalinter.v3 v3.0.0 h1:tKnpLD70cWDacxrv9JZ4atld7RPoPiHOBfad6mPmyBw= +gopkg.in/alecthomas/gometalinter.v3 v3.0.0/go.mod h1:sE0aqUDPY4ibZWdfOxx4ZVG9CD+Y5I1H+Snwv8a3r/s= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/alecthomas/kingpin.v3-unstable v3.0.0-20170321130658-9670b87a702e h1:/Kkmnr1GwjGf9vuTsP/PZLLQxJolIZl7UGN+Or0co1A= +gopkg.in/alecthomas/kingpin.v3-unstable v3.0.0-20170321130658-9670b87a702e/go.mod h1:3HH7i1SgMqlzxCcBmUHW657sD4Kvv9sC3HpL3YukzwA= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/tools/check/revive.toml b/tools/check/revive.toml new file mode 100644 index 0000000..19193c2 --- /dev/null +++ b/tools/check/revive.toml @@ -0,0 +1,51 @@ +ignoreGeneratedHeader = false +severity = "error" +confidence = 0.8 +errorCode = -1 +warningCode = -1 + +[rule.blank-imports] +[rule.context-as-argument] +[rule.dot-imports] +[rule.error-return] +[rule.error-strings] +[rule.error-naming] +[rule.exported] +[rule.if-return] +[rule.var-naming] +[rule.package-comments] +[rule.range] +[rule.receiver-naming] +[rule.indent-error-flow] +[rule.superfluous-else] +[rule.modifies-parameter] + +# This can be checked by other tools like megacheck +[rule.unreachable-code] + + +# Currently this makes too much noise, but should add it in +# and perhaps ignore it in a few files +#[rule.confusing-naming] +# severity = "warning" +#[rule.confusing-results] +# severity = "warning" +#[rule.unused-parameter] +# severity = "warning" +#[rule.deep-exit] +# severity = "warning" +#[rule.flag-parameter] +# severity = "warning" + + + +# Adding these will slow down the linter +# They are already provided by megacheck +# [rule.unexported-return] +# [rule.time-naming] +# [rule.errorf] + +# Adding these will slow down the linter +# Not sure if they are already provided by megacheck +# [rule.var-declaration] +# [rule.context-keys-type] diff --git a/tutorial.md b/tutorial.md new file mode 100644 index 0000000..c8e2146 --- /dev/null +++ b/tutorial.md @@ -0,0 +1,37 @@ +# Tutorial For Course Participants + +当前内容处于实验阶段。 + +## 如何参与课程 + +本课程使用 [Github Classroom](https://classroom.github.com/) 进行作业内容管理。如果你想参与本课程,可以通过我们创建的 [作业模版](https://classroom.github.com/a/aioto_CO) 接收作业内容,它将会自动为你生成一个作业仓库,该仓库位于 xxx 组织下。 + +## 本地工作 + +你可以通过如下命令在本地获取到你的作业: + +``` +git clone https://github.com/{Org name}/tinysql-template-{Your name}.git +``` + +该作业模版基于 [TinySQL](https://github.com/pingcap-incubator/tinysql) 创建,你可以通过如下命令获取最新的内容更新。 + +``` +git remote add upstream https://github.com/pingcap-incubator/tinysql +git remote set-url --push upstream no_push +git fetch upstream +git checkout master +git rebase upstream/master +``` + +## 提交作业 + +我们采用 CI 工具对你的作业内容进行评估,每一次的 push 到 master 仓库,都会触发 CI 过程。我们将测试你提交的代码的正确性,并且计算相应的得分情况。得分结果将会通过邮件的形式告知,邮件目的地为参与者的 Github 注册邮箱。 + +可以通过如下命令提交作业: + +``` +git add . +git commit -m"{Your Commit Message}" +git push origin master +``` diff --git a/types/benchmark_test.go b/types/benchmark_test.go new file mode 100644 index 0000000..1a2c34d --- /dev/null +++ b/types/benchmark_test.go @@ -0,0 +1,57 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "math/rand" + "testing" +) + +func BenchmarkDefaultTypeForValue(b *testing.B) { + lenNums := 1000000 + numsFull := make([]uint64, lenNums) + nums64k := make([]uint64, lenNums) + nums512 := make([]uint64, lenNums) + + for i := range numsFull { + r := rand.Uint64() + numsFull[i] = r + nums64k[i] = r % 64000 + nums512[i] = r % 512 + } + + b.Run("LenOfUint64_input full range", func(b *testing.B) { + b.StartTimer() + var ft FieldType + for i := 0; i < b.N; i++ { + DefaultTypeForValue(numsFull[int(i)%lenNums], &ft) + } + }) + + b.Run("LenOfUint64_input 0 to 64K ", func(b *testing.B) { + b.StartTimer() + var ft FieldType + for i := 0; i < b.N; i++ { + DefaultTypeForValue(nums64k[int(i)%lenNums], &ft) + } + }) + + b.Run("LenOfUint64_input 0 to 512 ", func(b *testing.B) { + b.StartTimer() + var ft FieldType + for i := 0; i < b.N; i++ { + DefaultTypeForValue(nums512[int(i)%lenNums], &ft) + } + }) +} diff --git a/types/binary_literal.go b/types/binary_literal.go new file mode 100644 index 0000000..5725b34 --- /dev/null +++ b/types/binary_literal.go @@ -0,0 +1,237 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "fmt" + "math" + "strconv" + "strings" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/sessionctx/stmtctx" +) + +// BinaryLiteral is the internal type for storing bit / hex literal type. +type BinaryLiteral []byte + +// BitLiteral is the bit literal type. +type BitLiteral BinaryLiteral + +// HexLiteral is the hex literal type. +type HexLiteral BinaryLiteral + +// ZeroBinaryLiteral is a BinaryLiteral literal with zero value. +var ZeroBinaryLiteral = BinaryLiteral{} + +func trimLeadingZeroBytes(bytes []byte) []byte { + if len(bytes) == 0 { + return bytes + } + pos, posMax := 0, len(bytes)-1 + for ; pos < posMax; pos++ { + if bytes[pos] != 0 { + break + } + } + return bytes[pos:] +} + +// NewBinaryLiteralFromUint creates a new BinaryLiteral instance by the given uint value in BitEndian. +// byteSize will be used as the length of the new BinaryLiteral, with leading bytes filled to zero. +// If byteSize is -1, the leading zeros in new BinaryLiteral will be trimmed. +func NewBinaryLiteralFromUint(value uint64, byteSize int) BinaryLiteral { + if byteSize != -1 && (byteSize < 1 || byteSize > 8) { + panic("Invalid byteSize") + } + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, value) + if byteSize == -1 { + buf = trimLeadingZeroBytes(buf) + } else { + buf = buf[8-byteSize:] + } + return buf +} + +// String implements fmt.Stringer interface. +func (b BinaryLiteral) String() string { + if len(b) == 0 { + return "" + } + return "0x" + hex.EncodeToString(b) +} + +// ToString returns the string representation for the literal. +func (b BinaryLiteral) ToString() string { + return string(b) +} + +// ToBitLiteralString returns the bit literal representation for the literal. +func (b BinaryLiteral) ToBitLiteralString(trimLeadingZero bool) string { + if len(b) == 0 { + return "b''" + } + var buf bytes.Buffer + for _, data := range b { + fmt.Fprintf(&buf, "%08b", data) + } + ret := buf.Bytes() + if trimLeadingZero { + ret = bytes.TrimLeft(ret, "0") + if len(ret) == 0 { + ret = []byte{'0'} + } + } + return fmt.Sprintf("b'%s'", string(ret)) +} + +// ToInt returns the int value for the literal. +func (b BinaryLiteral) ToInt(sc *stmtctx.StatementContext) (uint64, error) { + buf := trimLeadingZeroBytes(b) + length := len(buf) + if length == 0 { + return 0, nil + } + if length > 8 { + var err error = ErrTruncatedWrongVal.GenWithStackByArgs("BINARY", b) + if sc != nil { + err = sc.HandleTruncate(err) + } + return math.MaxUint64, err + } + // Note: the byte-order is BigEndian. + val := uint64(buf[0]) + for i := 1; i < length; i++ { + val = (val << 8) | uint64(buf[i]) + } + return val, nil +} + +// Compare compares BinaryLiteral to another one +func (b BinaryLiteral) Compare(b2 BinaryLiteral) int { + bufB := trimLeadingZeroBytes(b) + bufB2 := trimLeadingZeroBytes(b2) + if len(bufB) > len(bufB2) { + return 1 + } + if len(bufB) < len(bufB2) { + return -1 + } + return bytes.Compare(bufB, bufB2) +} + +// ParseBitStr parses bit string. +// The string format can be b'val', B'val' or 0bval, val must be 0 or 1. +// See https://dev.mysql.com/doc/refman/5.7/en/bit-value-literals.html +func ParseBitStr(s string) (BinaryLiteral, error) { + if len(s) == 0 { + return nil, errors.Errorf("invalid empty string for parsing bit type") + } + + if s[0] == 'b' || s[0] == 'B' { + // format is b'val' or B'val' + s = strings.Trim(s[1:], "'") + } else if strings.HasPrefix(s, "0b") { + s = s[2:] + } else { + // here means format is not b'val', B'val' or 0bval. + return nil, errors.Errorf("invalid bit type format %s", s) + } + + if len(s) == 0 { + return ZeroBinaryLiteral, nil + } + + alignedLength := (len(s) + 7) &^ 7 + s = ("00000000" + s)[len(s)+8-alignedLength:] // Pad with zero (slice from `-alignedLength`) + byteLength := len(s) >> 3 + buf := make([]byte, byteLength) + + for i := 0; i < byteLength; i++ { + strPosition := i << 3 + val, err := strconv.ParseUint(s[strPosition:strPosition+8], 2, 8) + if err != nil { + return nil, errors.Trace(err) + } + buf[i] = byte(val) + } + + return buf, nil +} + +// NewBitLiteral parses bit string as BitLiteral type. +func NewBitLiteral(s string) (BitLiteral, error) { + b, err := ParseBitStr(s) + if err != nil { + return BitLiteral{}, err + } + return BitLiteral(b), nil +} + +// ToString implement ast.BinaryLiteral interface +func (b BitLiteral) ToString() string { + return BinaryLiteral(b).ToString() +} + +// ParseHexStr parses hexadecimal string literal. +// See https://dev.mysql.com/doc/refman/5.7/en/hexadecimal-literals.html +func ParseHexStr(s string) (BinaryLiteral, error) { + if len(s) == 0 { + return nil, errors.Errorf("invalid empty string for parsing hexadecimal literal") + } + + if s[0] == 'x' || s[0] == 'X' { + // format is x'val' or X'val' + s = strings.Trim(s[1:], "'") + if len(s)%2 != 0 { + return nil, errors.Errorf("invalid hexadecimal format, must even numbers, but %d", len(s)) + } + } else if strings.HasPrefix(s, "0x") { + s = s[2:] + } else { + // here means format is not x'val', X'val' or 0xval. + return nil, errors.Errorf("invalid hexadecimal format %s", s) + } + + if len(s) == 0 { + return ZeroBinaryLiteral, nil + } + + if len(s)%2 != 0 { + s = "0" + s + } + buf, err := hex.DecodeString(s) + if err != nil { + return nil, errors.Trace(err) + } + return buf, nil +} + +// NewHexLiteral parses hexadecimal string as HexLiteral type. +func NewHexLiteral(s string) (HexLiteral, error) { + h, err := ParseHexStr(s) + if err != nil { + return HexLiteral{}, err + } + return HexLiteral(h), nil +} + +// ToString implement ast.BinaryLiteral interface +func (b HexLiteral) ToString() string { + return BinaryLiteral(b).ToString() +} diff --git a/types/compare.go b/types/compare.go new file mode 100644 index 0000000..101f47e --- /dev/null +++ b/types/compare.go @@ -0,0 +1,134 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "math" + "time" +) + +// CompareInt64 returns an integer comparing the int64 x to y. +func CompareInt64(x, y int64) int { + if x < y { + return -1 + } else if x == y { + return 0 + } + + return 1 +} + +// CompareUint64 returns an integer comparing the uint64 x to y. +func CompareUint64(x, y uint64) int { + if x < y { + return -1 + } else if x == y { + return 0 + } + + return 1 +} + +//VecCompareUU returns []int64 comparing the []uint64 x to []uint64 y +func VecCompareUU(x, y []uint64, res []int64) { + n := len(x) + for i := 0; i < n; i++ { + if x[i] < y[i] { + res[i] = -1 + } else if x[i] == y[i] { + res[i] = 0 + } else { + res[i] = 1 + } + } +} + +//VecCompareII returns []int64 comparing the []int64 x to []int64 y +func VecCompareII(x, y, res []int64) { + n := len(x) + for i := 0; i < n; i++ { + if x[i] < y[i] { + res[i] = -1 + } else if x[i] == y[i] { + res[i] = 0 + } else { + res[i] = 1 + } + } +} + +//VecCompareUI returns []int64 comparing the []uint64 x to []int64y +func VecCompareUI(x []uint64, y, res []int64) { + n := len(x) + for i := 0; i < n; i++ { + if y[i] < 0 || x[i] > math.MaxInt64 { + res[i] = 1 + } else if int64(x[i]) < y[i] { + res[i] = -1 + } else if int64(x[i]) == y[i] { + res[i] = 0 + } else { + res[i] = 1 + } + } +} + +//VecCompareIU returns []int64 comparing the []int64 x to []uint64y +func VecCompareIU(x []int64, y []uint64, res []int64) { + n := len(x) + for i := 0; i < n; i++ { + if x[i] < 0 || uint64(y[i]) > math.MaxInt64 { + res[i] = -1 + } else if x[i] < int64(y[i]) { + res[i] = -1 + } else if x[i] == int64(y[i]) { + res[i] = 0 + } else { + res[i] = 1 + } + } +} + +// CompareFloat64 returns an integer comparing the float64 x to y. +func CompareFloat64(x, y float64) int { + if x < y { + return -1 + } else if x == y { + return 0 + } + + return 1 +} + +// CompareString returns an integer comparing the string x to y. +func CompareString(x, y string) int { + if x < y { + return -1 + } else if x == y { + return 0 + } + + return 1 +} + +// CompareDuration returns an integer comparing the duration x to y. +func CompareDuration(x, y time.Duration) int { + if x < y { + return -1 + } else if x == y { + return 0 + } + + return 1 +} diff --git a/types/compare_test.go b/types/compare_test.go new file mode 100644 index 0000000..b5c5388 --- /dev/null +++ b/types/compare_test.go @@ -0,0 +1,216 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/util/testleak" + "math" +) + +var _ = Suite(&testCompareSuite{}) + +type testCompareSuite struct { +} + +func (s *testCompareSuite) TestCompare(c *C) { + defer testleak.AfterTest(c)() + cmpTbl := []struct { + lhs interface{} + rhs interface{} + ret int // 0, 1, -1 + }{ + {float64(1), float64(1), 0}, + {float64(1), "1", 0}, + {int64(1), int64(1), 0}, + {int64(-1), uint64(1), -1}, + {int64(-1), "-1", 0}, + {uint64(1), uint64(1), 0}, + {uint64(1), int64(-1), 1}, + {uint64(1), "1", 0}, + {"1", "1", 0}, + {"1", int64(-1), 1}, + {"1", float64(2), -1}, + {"1", uint64(1), 0}, + + {nil, 2, -1}, + {nil, nil, 0}, + + {false, nil, 1}, + {false, true, -1}, + {true, true, 0}, + {false, false, 0}, + {true, 2, -1}, + + {float64(1.23), nil, 1}, + {float64(0.0), float64(3.45), -1}, + {float64(354.23), float64(3.45), 1}, + {float64(3.452), float64(3.452), 0}, + + {int(432), nil, 1}, + {-4, int(32), -1}, + {int(4), -32, 1}, + {int(432), int64(12), 1}, + {int(23), int64(128), -1}, + {int(123), int64(123), 0}, + {int(432), int(12), 1}, + {int(23), int(123), -1}, + {int64(133), int(183), -1}, + + {uint64(133), uint64(183), -1}, + {uint64(2), int64(-2), 1}, + {uint64(2), int64(1), 1}, + + {"", nil, 1}, + {"", "24", -1}, + {"aasf", "4", 1}, + {"", "", 0}, + + {[]byte(""), nil, 1}, + {[]byte(""), []byte("sff"), -1}, + + {[]byte{}, []byte{}, 0}, + {[]byte("abc"), []byte("ab"), 1}, + {[]byte("123"), 1234, -1}, + {[]byte{}, nil, 1}, + } + + for i, t := range cmpTbl { + comment := Commentf("%d %v %v", i, t.lhs, t.rhs) + ret, err := compareForTest(t.lhs, t.rhs) + c.Assert(err, IsNil) + c.Assert(ret, Equals, t.ret, comment) + + ret, err = compareForTest(t.rhs, t.lhs) + c.Assert(err, IsNil) + c.Assert(ret, Equals, -t.ret, comment) + } +} + +func compareForTest(a, b interface{}) (int, error) { + sc := new(stmtctx.StatementContext) + sc.IgnoreTruncate = true + aDatum := NewDatum(a) + bDatum := NewDatum(b) + return aDatum.CompareDatum(sc, &bDatum) +} + +func (s *testCompareSuite) TestCompareDatum(c *C) { + defer testleak.AfterTest(c)() + cmpTbl := []struct { + lhs Datum + rhs Datum + ret int // 0, 1, -1 + }{ + {MaxValueDatum(), NewDatum("00:00:00"), 1}, + {MinNotNullDatum(), NewDatum("00:00:00"), -1}, + {Datum{}, NewDatum("00:00:00"), -1}, + {Datum{}, Datum{}, 0}, + {MinNotNullDatum(), MinNotNullDatum(), 0}, + {MaxValueDatum(), MaxValueDatum(), 0}, + {Datum{}, MinNotNullDatum(), -1}, + {MinNotNullDatum(), MaxValueDatum(), -1}, + } + sc := new(stmtctx.StatementContext) + sc.IgnoreTruncate = true + for i, t := range cmpTbl { + comment := Commentf("%d %v %v", i, t.lhs, t.rhs) + ret, err := t.lhs.CompareDatum(sc, &t.rhs) + c.Assert(err, IsNil) + c.Assert(ret, Equals, t.ret, comment) + + ret, err = t.rhs.CompareDatum(sc, &t.lhs) + c.Assert(err, IsNil) + c.Assert(ret, Equals, -t.ret, comment) + } +} + +func (s *testCompareSuite) TestVecCompareIntAndUint(c *C) { + defer testleak.AfterTest(c)() + cmpTblUU := []struct { + lhs []uint64 + rhs []uint64 + ret []int64 + }{ + {[]uint64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, []uint64{9, 8, 7, 6, 5, 4, 3, 2, 1, 0}, []int64{-1, -1, -1, -1, -1, 1, 1, 1, 1, 1}}, + {[]uint64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, []uint64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, []int64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, + {[]uint64{math.MaxInt64, math.MaxInt64 + 1, math.MaxInt64 + 2, math.MaxInt64 + 3, math.MaxInt64 + 4, math.MaxInt64 + 5, math.MaxInt64 + 6, math.MaxInt64 + 7, math.MaxInt64 + 8, math.MaxInt64 + 9}, []uint64{math.MaxInt64, math.MaxInt64 + 1, math.MaxInt64 + 2, math.MaxInt64 + 3, math.MaxInt64 + 4, math.MaxInt64 + 5, math.MaxInt64 + 6, math.MaxInt64 + 7, math.MaxInt64 + 8, math.MaxInt64 + 9}, []int64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, + } + for _, t := range cmpTblUU { + res := []int64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + VecCompareUU(t.lhs, t.rhs, res) + c.Assert(len(res), Equals, len(t.ret)) + for i, v := range res { + c.Assert(v, Equals, t.ret[i]) + } + } + + cmpTblII := []struct { + lhs []int64 + rhs []int64 + ret []int64 + }{ + {[]int64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, []int64{9, 8, 7, 6, 5, 4, 3, 2, 1, 0}, []int64{-1, -1, -1, -1, -1, 1, 1, 1, 1, 1}}, + {[]int64{0, -1, -2, -3, -4, -5, -6, -7, -8, -9}, []int64{9, 8, 7, 6, 5, 4, 3, 2, 1, 0}, []int64{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1}}, + {[]int64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, []int64{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0}, []int64{1, 1, 1, 1, 1, 1, 1, 1, 1, 1}}, + {[]int64{0, -1, -2, -3, -4, -5, -6, -7, -8, -9}, []int64{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0}, []int64{1, 1, 1, 1, 1, -1, -1, -1, -1, -1}}, + {[]int64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, []int64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, []int64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, + } + for _, t := range cmpTblII { + res := []int64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + VecCompareII(t.lhs, t.rhs, res) + c.Assert(len(res), Equals, len(t.ret)) + for i, v := range res { + c.Assert(v, Equals, t.ret[i]) + } + } + + cmpTblIU := []struct { + lhs []int64 + rhs []uint64 + ret []int64 + }{ + {[]int64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, []uint64{9, 8, 7, 6, 5, 4, 3, 2, 1, 0}, []int64{-1, -1, -1, -1, -1, 1, 1, 1, 1, 1}}, + {[]int64{0, -1, -2, -3, -4, -5, -6, -7, -8, -9}, []uint64{9, 8, 7, 6, 5, 4, 3, 2, 1, 0}, []int64{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1}}, + {[]int64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, []uint64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, []int64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, + {[]int64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, []uint64{math.MaxInt64 + 1, math.MaxInt64 + 1, math.MaxInt64 + 1, math.MaxInt64 + 1, math.MaxInt64 + 1, math.MaxInt64 + 1, math.MaxInt64 + 1, math.MaxInt64 + 1, math.MaxInt64 + 1, math.MaxInt64 + 1}, []int64{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1}}, + } + for _, t := range cmpTblIU { + res := []int64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + VecCompareIU(t.lhs, t.rhs, res) + c.Assert(len(res), Equals, len(t.ret)) + for i, v := range res { + c.Assert(v, Equals, t.ret[i]) + } + } + + cmpTblUI := []struct { + lhs []uint64 + rhs []int64 + ret []int64 + }{ + {[]uint64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, []int64{9, 8, 7, 6, 5, 4, 3, 2, 1, 0}, []int64{-1, -1, -1, -1, -1, 1, 1, 1, 1, 1}}, + {[]uint64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, []int64{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0}, []int64{1, 1, 1, 1, 1, 1, 1, 1, 1, 1}}, + {[]uint64{math.MaxInt64 + 1, math.MaxInt64 + 1, math.MaxInt64 + 1, math.MaxInt64 + 1, math.MaxInt64 + 1, math.MaxInt64 + 1, math.MaxInt64 + 1, math.MaxInt64 + 1, math.MaxInt64 + 1, math.MaxInt64 + 1}, []int64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, []int64{1, 1, 1, 1, 1, 1, 1, 1, 1, 1}}, + } + for _, t := range cmpTblUI { + res := []int64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + VecCompareUI(t.lhs, t.rhs, res) + c.Assert(len(res), Equals, len(t.ret)) + for i, v := range res { + c.Assert(v, Equals, t.ret[i]) + } + } +} diff --git a/types/const_test.go b/types/const_test.go new file mode 100644 index 0000000..8e66d44 --- /dev/null +++ b/types/const_test.go @@ -0,0 +1,215 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package types_test + +import ( + "flag" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/store/mockstore/mocktikv" + "github.com/pingcap/tidb/util/testkit" + "github.com/pingcap/tidb/util/testleak" +) + +var _ = Suite(&testMySQLConstSuite{}) + +type testMySQLConstSuite struct { + cluster *mocktikv.Cluster + mvccStore mocktikv.MVCCStore + store kv.Storage + dom *domain.Domain + *parser.Parser +} + +var mockTikv = flag.Bool("mockTikv", true, "use mock tikv store in executor test") + +func (s *testMySQLConstSuite) SetUpSuite(c *C) { + s.Parser = parser.New() + flag.Lookup("mockTikv") + useMockTikv := *mockTikv + if useMockTikv { + s.cluster = mocktikv.NewCluster() + mocktikv.BootstrapWithSingleStore(s.cluster) + s.mvccStore = mocktikv.MustNewMVCCStore() + store, err := mockstore.NewMockTikvStore( + mockstore.WithCluster(s.cluster), + mockstore.WithMVCCStore(s.mvccStore), + ) + c.Assert(err, IsNil) + s.store = store + session.SetSchemaLease(0) + session.DisableStats4Test() + } + var err error + s.dom, err = session.BootstrapSession(s.store) + c.Assert(err, IsNil) +} + +func (s *testMySQLConstSuite) TearDownSuite(c *C) { + s.dom.Close() + s.store.Close() + testleak.AfterTest(c)() +} + +func (s *testMySQLConstSuite) TestGetSQLMode(c *C) { + positiveCases := []struct { + arg string + }{ + {"NO_ZERO_DATE"}, + {",,NO_ZERO_DATE"}, + {"NO_ZERO_DATE,NO_ZERO_IN_DATE"}, + {""}, + {", "}, + {","}, + } + + for _, t := range positiveCases { + _, err := mysql.GetSQLMode(mysql.FormatSQLModeStr(t.arg)) + c.Assert(err, IsNil) + } + + negativeCases := []struct { + arg string + }{ + {"NO_ZERO_DATE, NO_ZERO_IN_DATE"}, + {"NO_ZERO_DATE,adfadsdfasdfads"}, + {", ,NO_ZERO_DATE"}, + {" ,"}, + } + + for _, t := range negativeCases { + _, err := mysql.GetSQLMode(mysql.FormatSQLModeStr(t.arg)) + c.Assert(err, NotNil) + } +} + +func (s *testMySQLConstSuite) TestSQLMode(c *C) { + tests := []struct { + arg string + hasNoZeroDateMode bool + hasNoZeroInDateMode bool + hasErrorForDivisionByZeroMode bool + }{ + {"NO_ZERO_DATE", true, false, false}, + {"NO_ZERO_IN_DATE", false, true, false}, + {"ERROR_FOR_DIVISION_BY_ZERO", false, false, true}, + {"NO_ZERO_IN_DATE,NO_ZERO_DATE", true, true, false}, + {"NO_ZERO_DATE,NO_ZERO_IN_DATE", true, true, false}, + {"NO_ZERO_DATE,NO_ZERO_IN_DATE", true, true, false}, + {"NO_ZERO_DATE,NO_ZERO_IN_DATE,ERROR_FOR_DIVISION_BY_ZERO", true, true, true}, + {"NO_ZERO_IN_DATE,ERROR_FOR_DIVISION_BY_ZERO", false, true, true}, + {"", false, false, false}, + } + + for _, t := range tests { + sqlMode, _ := mysql.GetSQLMode(t.arg) + c.Assert(sqlMode.HasNoZeroDateMode(), Equals, t.hasNoZeroDateMode) + c.Assert(sqlMode.HasNoZeroInDateMode(), Equals, t.hasNoZeroInDateMode) + c.Assert(sqlMode.HasErrorForDivisionByZeroMode(), Equals, t.hasErrorForDivisionByZeroMode) + } +} + +func (s *testMySQLConstSuite) TestIgnoreSpaceMode(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("set sql_mode=''") + tk.MustExec("CREATE TABLE COUNT (a bigint);") + tk.MustExec("DROP TABLE COUNT;") + tk.MustExec("CREATE TABLE `COUNT` (a bigint);") + tk.MustExec("DROP TABLE COUNT;") + _, err := tk.Exec("CREATE TABLE COUNT(a bigint);") + c.Assert(err, NotNil) + tk.MustExec("CREATE TABLE test.COUNT(a bigint);") + tk.MustExec("DROP TABLE COUNT;") + + tk.MustExec("CREATE TABLE BIT_AND (a bigint);") + tk.MustExec("DROP TABLE BIT_AND;") + tk.MustExec("CREATE TABLE `BIT_AND` (a bigint);") + tk.MustExec("DROP TABLE BIT_AND;") + _, err = tk.Exec("CREATE TABLE BIT_AND(a bigint);") + c.Assert(err, NotNil) + tk.MustExec("CREATE TABLE test.BIT_AND(a bigint);") + tk.MustExec("DROP TABLE BIT_AND;") + + tk.MustExec("CREATE TABLE NOW (a bigint);") + tk.MustExec("DROP TABLE NOW;") + tk.MustExec("CREATE TABLE `NOW` (a bigint);") + tk.MustExec("DROP TABLE NOW;") + _, err = tk.Exec("CREATE TABLE NOW(a bigint);") + c.Assert(err, NotNil) + tk.MustExec("CREATE TABLE test.NOW(a bigint);") + tk.MustExec("DROP TABLE NOW;") + + tk.MustExec("set sql_mode='IGNORE_SPACE'") + _, err = tk.Exec("CREATE TABLE COUNT (a bigint);") + c.Assert(err, NotNil) + tk.MustExec("CREATE TABLE `COUNT` (a bigint);") + tk.MustExec("DROP TABLE COUNT;") + _, err = tk.Exec("CREATE TABLE COUNT(a bigint);") + c.Assert(err, NotNil) + tk.MustExec("CREATE TABLE test.COUNT(a bigint);") + tk.MustExec("DROP TABLE COUNT;") + + _, err = tk.Exec("CREATE TABLE BIT_AND (a bigint);") + c.Assert(err, NotNil) + tk.MustExec("CREATE TABLE `BIT_AND` (a bigint);") + tk.MustExec("DROP TABLE BIT_AND;") + _, err = tk.Exec("CREATE TABLE BIT_AND(a bigint);") + c.Assert(err, NotNil) + tk.MustExec("CREATE TABLE test.BIT_AND(a bigint);") + tk.MustExec("DROP TABLE BIT_AND;") + + _, err = tk.Exec("CREATE TABLE NOW (a bigint);") + c.Assert(err, NotNil) + tk.MustExec("CREATE TABLE `NOW` (a bigint);") + tk.MustExec("DROP TABLE NOW;") + _, err = tk.Exec("CREATE TABLE NOW(a bigint);") + c.Assert(err, NotNil) + tk.MustExec("CREATE TABLE test.NOW(a bigint);") + tk.MustExec("DROP TABLE NOW;") + +} + +func (s *testMySQLConstSuite) TestNoBackslashEscapesMode(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("set sql_mode=''") + r := tk.MustQuery("SELECT '\\\\'") + r.Check(testkit.Rows("\\")) + tk.MustExec("set sql_mode='NO_BACKSLASH_ESCAPES'") + r = tk.MustQuery("SELECT '\\\\'") + r.Check(testkit.Rows("\\\\")) +} + +func (s *testMySQLConstSuite) TestServerStatus(c *C) { + tests := []struct { + arg uint16 + IsCursorExists bool + }{ + {0, false}, + {mysql.ServerStatusInTrans | mysql.ServerStatusNoBackslashEscaped, false}, + {mysql.ServerStatusCursorExists, true}, + {mysql.ServerStatusCursorExists | mysql.ServerStatusLastRowSend, true}, + } + + for _, t := range tests { + ret := mysql.HasCursorExistsFlag(t.arg) + c.Assert(ret, Equals, t.IsCursorExists) + } +} diff --git a/types/convert.go b/types/convert.go new file mode 100644 index 0000000..9b4fe29 --- /dev/null +++ b/types/convert.go @@ -0,0 +1,505 @@ +// Copyright 2014 The ql Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSES/QL-LICENSE file. + +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "math" + "strconv" + "strings" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx/stmtctx" +) + +func truncateStr(str string, flen int) string { + if flen != UnspecifiedLength && len(str) > flen { + str = str[:flen] + } + return str +} + +// IntergerUnsignedUpperBound indicates the max uint64 values of different mysql types. +func IntergerUnsignedUpperBound(intType byte) uint64 { + switch intType { + case mysql.TypeTiny: + return math.MaxUint8 + case mysql.TypeShort: + return math.MaxUint16 + case mysql.TypeInt24: + return mysql.MaxUint24 + case mysql.TypeLong: + return math.MaxUint32 + case mysql.TypeLonglong: + return math.MaxUint64 + default: + panic("Input byte is not a mysql type") + } +} + +// IntergerSignedUpperBound indicates the max int64 values of different mysql types. +func IntergerSignedUpperBound(intType byte) int64 { + switch intType { + case mysql.TypeTiny: + return math.MaxInt8 + case mysql.TypeShort: + return math.MaxInt16 + case mysql.TypeInt24: + return mysql.MaxInt24 + case mysql.TypeLong: + return math.MaxInt32 + case mysql.TypeLonglong: + return math.MaxInt64 + default: + panic("Input byte is not a mysql type") + } +} + +// IntergerSignedLowerBound indicates the min int64 values of different mysql types. +func IntergerSignedLowerBound(intType byte) int64 { + switch intType { + case mysql.TypeTiny: + return math.MinInt8 + case mysql.TypeShort: + return math.MinInt16 + case mysql.TypeInt24: + return mysql.MinInt24 + case mysql.TypeLong: + return math.MinInt32 + case mysql.TypeLonglong: + return math.MinInt64 + default: + panic("Input byte is not a mysql type") + } +} + +// ConvertFloatToInt converts a float64 value to a int value. +// `tp` is used in err msg, if there is overflow, this func will report err according to `tp` +func ConvertFloatToInt(fval float64, lowerBound, upperBound int64, tp byte) (int64, error) { + val := RoundFloat(fval) + if val < float64(lowerBound) { + return lowerBound, overflow(val, tp) + } + + if val >= float64(upperBound) { + if val == float64(upperBound) { + return upperBound, nil + } + return upperBound, overflow(val, tp) + } + return int64(val), nil +} + +// ConvertIntToInt converts an int value to another int value of different precision. +func ConvertIntToInt(val int64, lowerBound int64, upperBound int64, tp byte) (int64, error) { + if val < lowerBound { + return lowerBound, overflow(val, tp) + } + + if val > upperBound { + return upperBound, overflow(val, tp) + } + + return val, nil +} + +// ConvertUintToInt converts an uint value to an int value. +func ConvertUintToInt(val uint64, upperBound int64, tp byte) (int64, error) { + if val > uint64(upperBound) { + return upperBound, overflow(val, tp) + } + + return int64(val), nil +} + +// ConvertIntToUint converts an int value to an uint value. +func ConvertIntToUint(sc *stmtctx.StatementContext, val int64, upperBound uint64, tp byte) (uint64, error) { + if sc.ShouldClipToZero() && val < 0 { + return 0, overflow(val, tp) + } + + if uint64(val) > upperBound { + return upperBound, overflow(val, tp) + } + + return uint64(val), nil +} + +// ConvertUintToUint converts an uint value to another uint value of different precision. +func ConvertUintToUint(val uint64, upperBound uint64, tp byte) (uint64, error) { + if val > upperBound { + return upperBound, overflow(val, tp) + } + + return val, nil +} + +// ConvertFloatToUint converts a float value to an uint value. +func ConvertFloatToUint(sc *stmtctx.StatementContext, fval float64, upperBound uint64, tp byte) (uint64, error) { + val := RoundFloat(fval) + if val < 0 { + if sc.ShouldClipToZero() { + return 0, overflow(val, tp) + } + return uint64(int64(val)), overflow(val, tp) + } + + ubf := float64(upperBound) + // Because math.MaxUint64 can not be represented precisely in iee754(64bit), + // so `float64(math.MaxUint64)` will make a num bigger than math.MaxUint64, + // which can not be represented by 64bit integer. + // So `uint64(float64(math.MaxUint64))` is undefined behavior. + if val == ubf { + return uint64(math.MaxInt64), nil + } + if val > ubf { + return uint64(math.MaxInt64), overflow(val, tp) + } + return uint64(val), nil +} + +// convertScientificNotation converts a decimal string with scientific notation to a normal decimal string. +// 1E6 => 1000000, .12345E+5 => 12345 +func convertScientificNotation(str string) (string, error) { + // https://golang.org/ref/spec#Floating-point_literals + eIdx := -1 + point := -1 + for i := 0; i < len(str); i++ { + if str[i] == '.' { + point = i + } + if str[i] == 'e' || str[i] == 'E' { + eIdx = i + if point == -1 { + point = i + } + break + } + } + if eIdx == -1 { + return str, nil + } + exp, err := strconv.ParseInt(str[eIdx+1:], 10, 64) + if err != nil { + return "", errors.WithStack(err) + } + + f := str[:eIdx] + if exp == 0 { + return f, nil + } else if exp > 0 { // move point right + if point+int(exp) == len(f)-1 { // 123.456 >> 3 = 123456. = 123456 + return f[:point] + f[point+1:], nil + } else if point+int(exp) < len(f)-1 { // 123.456 >> 2 = 12345.6 + return f[:point] + f[point+1:point+1+int(exp)] + "." + f[point+1+int(exp):], nil + } + // 123.456 >> 5 = 12345600 + return f[:point] + f[point+1:] + strings.Repeat("0", point+int(exp)-len(f)+1), nil + } else { // move point left + exp = -exp + if int(exp) < point { // 123.456 << 2 = 1.23456 + return f[:point-int(exp)] + "." + f[point-int(exp):point] + f[point+1:], nil + } + // 123.456 << 5 = 0.00123456 + return "0." + strings.Repeat("0", int(exp)-point) + f[:point] + f[point+1:], nil + } +} + +// StrToInt converts a string to an integer at the best-effort. +func StrToInt(sc *stmtctx.StatementContext, str string) (int64, error) { + str = strings.TrimSpace(str) + validPrefix, err := getValidIntPrefix(sc, str) + iVal, err1 := strconv.ParseInt(validPrefix, 10, 64) + if err1 != nil { + return iVal, ErrOverflow.GenWithStackByArgs("BIGINT", validPrefix) + } + return iVal, errors.Trace(err) +} + +// StrToUint converts a string to an unsigned integer at the best-effortt. +func StrToUint(sc *stmtctx.StatementContext, str string) (uint64, error) { + str = strings.TrimSpace(str) + validPrefix, err := getValidIntPrefix(sc, str) + if validPrefix[0] == '+' { + validPrefix = validPrefix[1:] + } + uVal, err1 := strconv.ParseUint(validPrefix, 10, 64) + if err1 != nil { + return uVal, ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", validPrefix) + } + return uVal, errors.Trace(err) +} + +// getValidIntPrefix gets prefix of the string which can be successfully parsed as int. +func getValidIntPrefix(sc *stmtctx.StatementContext, str string) (string, error) { + if !sc.CastStrToIntStrict { + floatPrefix, err := getValidFloatPrefix(sc, str) + if err != nil { + return floatPrefix, errors.Trace(err) + } + return floatStrToIntStr(sc, floatPrefix, str) + } + + validLen := 0 + + for i := 0; i < len(str); i++ { + c := str[i] + if (c == '+' || c == '-') && i == 0 { + continue + } + + if c >= '0' && c <= '9' { + validLen = i + 1 + continue + } + + break + } + valid := str[:validLen] + if valid == "" { + valid = "0" + } + if validLen == 0 || validLen != len(str) { + return valid, errors.Trace(handleTruncateError(sc, ErrTruncatedWrongVal.GenWithStackByArgs("INTEGER", str))) + } + return valid, nil +} + +// roundIntStr is to round a **valid int string** base on the number following dot. +func roundIntStr(numNextDot byte, intStr string) string { + if numNextDot < '5' { + return intStr + } + retStr := []byte(intStr) + idx := len(intStr) - 1 + for ; idx >= 1; idx-- { + if retStr[idx] != '9' { + retStr[idx]++ + break + } + retStr[idx] = '0' + } + if idx == 0 { + if intStr[0] == '9' { + retStr[0] = '1' + retStr = append(retStr, '0') + } else if isDigit(intStr[0]) { + retStr[0]++ + } else { + retStr[1] = '1' + retStr = append(retStr, '0') + } + } + return string(retStr) +} + +// floatStrToIntStr converts a valid float string into valid integer string which can be parsed by +// strconv.ParseInt, we can't parse float first then convert it to string because precision will +// be lost. For example, the string value "18446744073709551615" which is the max number of unsigned +// int will cause some precision to lose. intStr[0] may be a positive and negative sign like '+' or '-'. +// +// This func will find serious overflow such as the len of intStr > 20 (without prefix `+/-`) +// however, it will not check whether the intStr overflow BIGINT. +func floatStrToIntStr(sc *stmtctx.StatementContext, validFloat string, oriStr string) (intStr string, _ error) { + var dotIdx = -1 + var eIdx = -1 + for i := 0; i < len(validFloat); i++ { + switch validFloat[i] { + case '.': + dotIdx = i + case 'e', 'E': + eIdx = i + } + } + if eIdx == -1 { + if dotIdx == -1 { + return validFloat, nil + } + var digits []byte + if validFloat[0] == '-' || validFloat[0] == '+' { + dotIdx-- + digits = []byte(validFloat[1:]) + } else { + digits = []byte(validFloat) + } + if dotIdx == 0 { + intStr = "0" + } else { + intStr = string(digits)[:dotIdx] + } + if len(digits) > dotIdx+1 { + intStr = roundIntStr(digits[dotIdx+1], intStr) + } + if (len(intStr) > 1 || intStr[0] != '0') && validFloat[0] == '-' { + intStr = "-" + intStr + } + return intStr, nil + } + // intCnt and digits contain the prefix `+/-` if validFloat[0] is `+/-` + var intCnt int + digits := make([]byte, 0, len(validFloat)) + if dotIdx == -1 { + digits = append(digits, validFloat[:eIdx]...) + intCnt = len(digits) + } else { + digits = append(digits, validFloat[:dotIdx]...) + intCnt = len(digits) + digits = append(digits, validFloat[dotIdx+1:eIdx]...) + } + exp, err := strconv.Atoi(validFloat[eIdx+1:]) + if err != nil { + return validFloat, errors.Trace(err) + } + intCnt += exp + if exp >= 0 && (intCnt > 21 || intCnt < 0) { + // MaxInt64 has 19 decimal digits. + // MaxUint64 has 20 decimal digits. + // And the intCnt may contain the len of `+/-`, + // so I use 21 here as the early detection. + sc.AppendWarning(ErrOverflow.GenWithStackByArgs("BIGINT", oriStr)) + return validFloat[:eIdx], nil + } + if intCnt <= 0 { + intStr = "0" + if intCnt == 0 && len(digits) > 0 && isDigit(digits[0]) { + intStr = roundIntStr(digits[0], intStr) + } + return intStr, nil + } + if intCnt == 1 && (digits[0] == '-' || digits[0] == '+') { + intStr = "0" + if len(digits) > 1 { + intStr = roundIntStr(digits[1], intStr) + } + if intStr[0] == '1' { + intStr = string(digits[:1]) + intStr + } + return intStr, nil + } + if intCnt <= len(digits) { + intStr = string(digits[:intCnt]) + if intCnt < len(digits) { + intStr = roundIntStr(digits[intCnt], intStr) + } + } else { + // convert scientific notation decimal number + extraZeroCount := intCnt - len(digits) + intStr = string(digits) + strings.Repeat("0", extraZeroCount) + } + return intStr, nil +} + +// StrToFloat converts a string to a float64 at the best-effort. +func StrToFloat(sc *stmtctx.StatementContext, str string) (float64, error) { + str = strings.TrimSpace(str) + validStr, err := getValidFloatPrefix(sc, str) + f, err1 := strconv.ParseFloat(validStr, 64) + if err1 != nil { + if err2, ok := err1.(*strconv.NumError); ok { + // value will truncate to MAX/MIN if out of range. + if err2.Err == strconv.ErrRange { + err1 = sc.HandleTruncate(ErrTruncatedWrongVal.GenWithStackByArgs("DOUBLE", str)) + if math.IsInf(f, 1) { + f = math.MaxFloat64 + } else if math.IsInf(f, -1) { + f = -math.MaxFloat64 + } + } + } + return f, errors.Trace(err1) + } + return f, errors.Trace(err) +} + +// getValidFloatPrefix gets prefix of string which can be successfully parsed as float. +func getValidFloatPrefix(sc *stmtctx.StatementContext, s string) (valid string, err error) { + if (sc.InDeleteStmt || sc.InSelectStmt) && s == "" { + return "0", nil + } + + var ( + sawDot bool + sawDigit bool + validLen int + eIdx int + ) + for i := 0; i < len(s); i++ { + c := s[i] + if c == '+' || c == '-' { + if i != 0 && i != eIdx+1 { // "1e+1" is valid. + break + } + } else if c == '.' { + if sawDot || eIdx > 0 { // "1.1." or "1e1.1" + break + } + sawDot = true + if sawDigit { // "123." is valid. + validLen = i + 1 + } + } else if c == 'e' || c == 'E' { + if !sawDigit { // "+.e" + break + } + if eIdx != 0 { // "1e5e" + break + } + eIdx = i + } else if c < '0' || c > '9' { + break + } else { + sawDigit = true + validLen = i + 1 + } + } + valid = s[:validLen] + if valid == "" { + valid = "0" + } + if validLen == 0 || validLen != len(s) { + err = errors.Trace(handleTruncateError(sc, ErrTruncatedWrongVal.GenWithStackByArgs("FLOAT", s))) + } + return valid, err +} + +// ToString converts an interface to a string. +func ToString(value interface{}) (string, error) { + switch v := value.(type) { + case bool: + if v { + return "1", nil + } + return "0", nil + case int: + return strconv.FormatInt(int64(v), 10), nil + case int64: + return strconv.FormatInt(v, 10), nil + case uint64: + return strconv.FormatUint(v, 10), nil + case float32: + return strconv.FormatFloat(float64(v), 'f', -1, 32), nil + case float64: + return strconv.FormatFloat(v, 'f', -1, 64), nil + case string: + return v, nil + case []byte: + return string(v), nil + default: + return "", errors.Errorf("cannot convert %v(type %T) to string", value, value) + } +} diff --git a/types/convert_test.go b/types/convert_test.go new file mode 100644 index 0000000..1b34745 --- /dev/null +++ b/types/convert_test.go @@ -0,0 +1,617 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "math" + "strconv" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/parser/charset" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/util/testleak" +) + +var _ = Suite(&testTypeConvertSuite{}) + +type testTypeConvertSuite struct { +} + +type invalidMockType struct { +} + +// Convert converts the val with type tp. +func Convert(val interface{}, target *FieldType) (v interface{}, err error) { + d := NewDatum(val) + sc := new(stmtctx.StatementContext) + sc.TimeZone = time.UTC + ret, err := d.ConvertTo(sc, target) + if err != nil { + return ret.GetValue(), errors.Trace(err) + } + return ret.GetValue(), nil +} + +func (s *testTypeConvertSuite) TestConvertType(c *C) { + defer testleak.AfterTest(c)() + ft := NewFieldType(mysql.TypeBlob) + ft.Flen = 4 + ft.Charset = "utf8" + v, err := Convert("123456", ft) + c.Assert(ErrDataTooLong.Equal(err), IsTrue) + c.Assert(v, Equals, "1234") + ft = NewFieldType(mysql.TypeString) + ft.Flen = 4 + ft.Charset = charset.CharsetBin + v, err = Convert("12345", ft) + c.Assert(ErrDataTooLong.Equal(err), IsTrue) + c.Assert(v, DeepEquals, []byte("1234")) + + ft = NewFieldType(mysql.TypeFloat) + ft.Flen = 5 + ft.Decimal = 2 + v, err = Convert(111.114, ft) + c.Assert(err, IsNil) + c.Assert(v, Equals, float32(111.11)) + + ft = NewFieldType(mysql.TypeFloat) + ft.Flen = 5 + ft.Decimal = 2 + v, err = Convert(999.999, ft) + c.Assert(err, NotNil) + c.Assert(v, Equals, float32(999.99)) + + ft = NewFieldType(mysql.TypeFloat) + ft.Flen = 5 + ft.Decimal = 2 + v, err = Convert(-999.999, ft) + c.Assert(err, NotNil) + c.Assert(v, Equals, float32(-999.99)) + + ft = NewFieldType(mysql.TypeFloat) + ft.Flen = 5 + ft.Decimal = 2 + v, err = Convert(1111.11, ft) + c.Assert(err, NotNil) + c.Assert(v, Equals, float32(999.99)) + + ft = NewFieldType(mysql.TypeFloat) + ft.Flen = 5 + ft.Decimal = 2 + v, err = Convert(999.916, ft) + c.Assert(err, IsNil) + c.Assert(v, Equals, float32(999.92)) + + ft = NewFieldType(mysql.TypeFloat) + ft.Flen = 5 + ft.Decimal = 2 + v, err = Convert(999.914, ft) + c.Assert(err, IsNil) + c.Assert(v, Equals, float32(999.91)) + + ft = NewFieldType(mysql.TypeFloat) + ft.Flen = 5 + ft.Decimal = 2 + v, err = Convert(999.9155, ft) + c.Assert(err, IsNil) + c.Assert(v, Equals, float32(999.92)) + + // For TypeBlob + ft = NewFieldType(mysql.TypeBlob) + _, err = Convert(&invalidMockType{}, ft) + c.Assert(err, NotNil) + + // Nil + ft = NewFieldType(mysql.TypeBlob) + v, err = Convert(nil, ft) + c.Assert(err, IsNil) + c.Assert(v, IsNil) + + // TypeDouble + ft = NewFieldType(mysql.TypeDouble) + ft.Flen = 5 + ft.Decimal = 2 + v, err = Convert(999.9155, ft) + c.Assert(err, IsNil) + c.Assert(v, Equals, float64(999.92)) + + // For TypeString + ft = NewFieldType(mysql.TypeString) + ft.Flen = 3 + v, err = Convert("12345", ft) + c.Assert(ErrDataTooLong.Equal(err), IsTrue) + c.Assert(v, Equals, "123") + ft = NewFieldType(mysql.TypeString) + ft.Flen = 3 + ft.Charset = charset.CharsetBin + v, err = Convert("12345", ft) + c.Assert(ErrDataTooLong.Equal(err), IsTrue) + c.Assert(v, DeepEquals, []byte("123")) + + // For TypeLonglong + ft = NewFieldType(mysql.TypeLonglong) + v, err = Convert("100", ft) + c.Assert(err, IsNil) + c.Assert(v, Equals, int64(100)) + // issue 4287. + v, err = Convert(math.Pow(2, 63)-1, ft) + c.Assert(err, IsNil) + c.Assert(v, Equals, int64(math.MaxInt64)) + ft = NewFieldType(mysql.TypeLonglong) + ft.Flag |= mysql.UnsignedFlag + v, err = Convert("100", ft) + c.Assert(err, IsNil) + c.Assert(v, Equals, uint64(100)) +} + +func testStrToInt(c *C, str string, expect int64, truncateAsErr bool, expectErr error) { + sc := new(stmtctx.StatementContext) + sc.IgnoreTruncate = !truncateAsErr + val, err := StrToInt(sc, str) + if expectErr != nil { + c.Assert(terror.ErrorEqual(err, expectErr), IsTrue, Commentf("err %v", err)) + } else { + c.Assert(err, IsNil) + c.Assert(val, Equals, expect) + } +} + +func testStrToUint(c *C, str string, expect uint64, truncateAsErr bool, expectErr error) { + sc := new(stmtctx.StatementContext) + sc.IgnoreTruncate = !truncateAsErr + val, err := StrToUint(sc, str) + if expectErr != nil { + c.Assert(terror.ErrorEqual(err, expectErr), IsTrue, Commentf("err %v", err)) + } else { + c.Assert(err, IsNil) + c.Assert(val, Equals, expect) + } +} + +func testStrToFloat(c *C, str string, expect float64, truncateAsErr bool, expectErr error) { + sc := new(stmtctx.StatementContext) + sc.IgnoreTruncate = !truncateAsErr + val, err := StrToFloat(sc, str) + if expectErr != nil { + c.Assert(terror.ErrorEqual(err, expectErr), IsTrue, Commentf("err %v", err)) + } else { + c.Assert(err, IsNil) + c.Assert(val, Equals, expect) + } +} + +func (s *testTypeConvertSuite) TestStrToNum(c *C) { + defer testleak.AfterTest(c)() + testStrToInt(c, "0", 0, true, nil) + testStrToInt(c, "-1", -1, true, nil) + testStrToInt(c, "100", 100, true, nil) + testStrToInt(c, "65.0", 65, false, nil) + testStrToInt(c, "65.0", 65, true, nil) + testStrToInt(c, "", 0, false, nil) + testStrToInt(c, "", 0, true, ErrTruncatedWrongVal) + testStrToInt(c, "xx", 0, true, ErrTruncatedWrongVal) + testStrToInt(c, "xx", 0, false, nil) + testStrToInt(c, "11xx", 11, true, ErrTruncatedWrongVal) + testStrToInt(c, "11xx", 11, false, nil) + testStrToInt(c, "xx11", 0, false, nil) + + testStrToUint(c, "0", 0, true, nil) + testStrToUint(c, "", 0, false, nil) + testStrToUint(c, "", 0, false, nil) + testStrToUint(c, "-1", 0xffffffffffffffff, false, ErrOverflow) + testStrToUint(c, "100", 100, true, nil) + testStrToUint(c, "+100", 100, true, nil) + testStrToUint(c, "65.0", 65, true, nil) + testStrToUint(c, "xx", 0, true, ErrTruncatedWrongVal) + testStrToUint(c, "11xx", 11, true, ErrTruncatedWrongVal) + testStrToUint(c, "xx11", 0, true, ErrTruncatedWrongVal) + + // TODO: makes StrToFloat return truncated value instead of zero to make it pass. + testStrToFloat(c, "", 0, true, ErrTruncatedWrongVal) + testStrToFloat(c, "-1", -1.0, true, nil) + testStrToFloat(c, "1.11", 1.11, true, nil) + testStrToFloat(c, "1.11.00", 1.11, false, nil) + testStrToFloat(c, "1.11.00", 1.11, true, ErrTruncatedWrongVal) + testStrToFloat(c, "xx", 0.0, false, nil) + testStrToFloat(c, "0x00", 0.0, false, nil) + testStrToFloat(c, "11.xx", 11.0, false, nil) + testStrToFloat(c, "11.xx", 11.0, true, ErrTruncatedWrongVal) + testStrToFloat(c, "xx.11", 0.0, false, nil) + + // for issue #5111 + testStrToFloat(c, "1e649", math.MaxFloat64, true, ErrTruncatedWrongVal) + testStrToFloat(c, "1e649", math.MaxFloat64, false, nil) + testStrToFloat(c, "-1e649", -math.MaxFloat64, true, ErrTruncatedWrongVal) + testStrToFloat(c, "-1e649", -math.MaxFloat64, false, nil) + + // for issue #10806, #11179 + testSelectUpdateDeleteEmptyStringError(c) +} + +func testSelectUpdateDeleteEmptyStringError(c *C) { + testCases := []struct { + inSelect bool + inDelete bool + }{ + {true, false}, + {false, true}, + } + sc := new(stmtctx.StatementContext) + for _, tc := range testCases { + sc.InSelectStmt = tc.inSelect + sc.InDeleteStmt = tc.inDelete + + str := "" + expect := 0 + + val, err := StrToInt(sc, str) + c.Assert(err, IsNil) + c.Assert(val, Equals, int64(expect)) + + val1, err := StrToUint(sc, str) + c.Assert(err, IsNil) + c.Assert(val1, Equals, uint64(expect)) + + val2, err := StrToFloat(sc, str) + c.Assert(err, IsNil) + c.Assert(val2, Equals, float64(expect)) + } +} + +func (s *testTypeConvertSuite) TestFieldTypeToStr(c *C) { + defer testleak.AfterTest(c)() + v := TypeToStr(mysql.TypeUnspecified, "not binary") + c.Assert(v, Equals, TypeStr(mysql.TypeUnspecified)) + v = TypeToStr(mysql.TypeBlob, charset.CharsetBin) + c.Assert(v, Equals, "blob") + v = TypeToStr(mysql.TypeString, charset.CharsetBin) + c.Assert(v, Equals, "binary") +} + +func accept(c *C, tp byte, value interface{}, unsigned bool, expected string) { + ft := NewFieldType(tp) + if unsigned { + ft.Flag |= mysql.UnsignedFlag + } + d := NewDatum(value) + sc := new(stmtctx.StatementContext) + sc.TimeZone = time.UTC + sc.IgnoreTruncate = true + casted, err := d.ConvertTo(sc, ft) + c.Assert(err, IsNil, Commentf("%v", ft)) + if casted.IsNull() { + c.Assert(expected, Equals, "") + } else { + str, err := casted.ToString() + c.Assert(err, IsNil) + c.Assert(str, Equals, expected) + } +} + +func unsignedAccept(c *C, tp byte, value interface{}, expected string) { + accept(c, tp, value, true, expected) +} + +func signedAccept(c *C, tp byte, value interface{}, expected string) { + accept(c, tp, value, false, expected) +} + +func deny(c *C, tp byte, value interface{}, unsigned bool, expected string) { + ft := NewFieldType(tp) + if unsigned { + ft.Flag |= mysql.UnsignedFlag + } + d := NewDatum(value) + sc := new(stmtctx.StatementContext) + casted, err := d.ConvertTo(sc, ft) + c.Assert(err, NotNil) + if casted.IsNull() { + c.Assert(expected, Equals, "") + } else { + str, err := casted.ToString() + c.Assert(err, IsNil) + c.Assert(str, Equals, expected) + } +} + +func unsignedDeny(c *C, tp byte, value interface{}, expected string) { + deny(c, tp, value, true, expected) +} + +func signedDeny(c *C, tp byte, value interface{}, expected string) { + deny(c, tp, value, false, expected) +} + +func strvalue(v interface{}) string { + return fmt.Sprintf("%v", v) +} + +func (s *testTypeConvertSuite) TestConvert(c *C) { + defer testleak.AfterTest(c)() + // integer ranges + signedDeny(c, mysql.TypeTiny, -129, "-128") + signedAccept(c, mysql.TypeTiny, -128, "-128") + signedAccept(c, mysql.TypeTiny, 127, "127") + signedDeny(c, mysql.TypeTiny, 128, "127") + unsignedDeny(c, mysql.TypeTiny, -1, "255") + unsignedAccept(c, mysql.TypeTiny, 0, "0") + unsignedAccept(c, mysql.TypeTiny, 255, "255") + unsignedDeny(c, mysql.TypeTiny, 256, "255") + + signedDeny(c, mysql.TypeShort, int64(math.MinInt16)-1, strvalue(int64(math.MinInt16))) + signedAccept(c, mysql.TypeShort, int64(math.MinInt16), strvalue(int64(math.MinInt16))) + signedAccept(c, mysql.TypeShort, int64(math.MaxInt16), strvalue(int64(math.MaxInt16))) + signedDeny(c, mysql.TypeShort, int64(math.MaxInt16)+1, strvalue(int64(math.MaxInt16))) + unsignedDeny(c, mysql.TypeShort, -1, "65535") + unsignedAccept(c, mysql.TypeShort, 0, "0") + unsignedAccept(c, mysql.TypeShort, uint64(math.MaxUint16), strvalue(uint64(math.MaxUint16))) + unsignedDeny(c, mysql.TypeShort, uint64(math.MaxUint16)+1, strvalue(uint64(math.MaxUint16))) + + signedDeny(c, mysql.TypeInt24, -1<<23-1, strvalue(-1<<23)) + signedAccept(c, mysql.TypeInt24, -1<<23, strvalue(-1<<23)) + signedAccept(c, mysql.TypeInt24, 1<<23-1, strvalue(1<<23-1)) + signedDeny(c, mysql.TypeInt24, 1<<23, strvalue(1<<23-1)) + unsignedDeny(c, mysql.TypeInt24, -1, "16777215") + unsignedAccept(c, mysql.TypeInt24, 0, "0") + unsignedAccept(c, mysql.TypeInt24, 1<<24-1, strvalue(1<<24-1)) + unsignedDeny(c, mysql.TypeInt24, 1<<24, strvalue(1<<24-1)) + + signedDeny(c, mysql.TypeLong, int64(math.MinInt32)-1, strvalue(int64(math.MinInt32))) + signedAccept(c, mysql.TypeLong, int64(math.MinInt32), strvalue(int64(math.MinInt32))) + signedAccept(c, mysql.TypeLong, int64(math.MaxInt32), strvalue(int64(math.MaxInt32))) + signedDeny(c, mysql.TypeLong, uint64(math.MaxUint64), strvalue(uint64(math.MaxInt32))) + signedDeny(c, mysql.TypeLong, int64(math.MaxInt32)+1, strvalue(int64(math.MaxInt32))) + signedDeny(c, mysql.TypeLong, "1343545435346432587475", strvalue(int64(math.MaxInt32))) + unsignedDeny(c, mysql.TypeLong, -1, "4294967295") + unsignedAccept(c, mysql.TypeLong, 0, "0") + unsignedAccept(c, mysql.TypeLong, uint64(math.MaxUint32), strvalue(uint64(math.MaxUint32))) + unsignedDeny(c, mysql.TypeLong, uint64(math.MaxUint32)+1, strvalue(uint64(math.MaxUint32))) + + signedDeny(c, mysql.TypeLonglong, math.MinInt64*1.1, strvalue(int64(math.MinInt64))) + signedAccept(c, mysql.TypeLonglong, int64(math.MinInt64), strvalue(int64(math.MinInt64))) + signedAccept(c, mysql.TypeLonglong, int64(math.MaxInt64), strvalue(int64(math.MaxInt64))) + signedDeny(c, mysql.TypeLonglong, math.MaxInt64*1.1, strvalue(int64(math.MaxInt64))) + unsignedAccept(c, mysql.TypeLonglong, -1, "18446744073709551615") + unsignedAccept(c, mysql.TypeLonglong, 0, "0") + unsignedAccept(c, mysql.TypeLonglong, uint64(math.MaxUint64), strvalue(uint64(math.MaxUint64))) + unsignedDeny(c, mysql.TypeLonglong, math.MaxUint64*1.1, strvalue(uint64(math.MaxInt64))) + + // integer from string + signedAccept(c, mysql.TypeLong, " 234 ", "234") + signedAccept(c, mysql.TypeLong, " 2.35e3 ", "2350") + signedAccept(c, mysql.TypeLong, " 2.e3 ", "2000") + signedAccept(c, mysql.TypeLong, " -2.e3 ", "-2000") + signedAccept(c, mysql.TypeLong, " 2e2 ", "200") + signedAccept(c, mysql.TypeLong, " 0.002e3 ", "2") + signedAccept(c, mysql.TypeLong, " .002e3 ", "2") + signedAccept(c, mysql.TypeLong, " 20e-2 ", "0") + signedAccept(c, mysql.TypeLong, " -20e-2 ", "0") + signedAccept(c, mysql.TypeLong, " +2.51 ", "3") + signedAccept(c, mysql.TypeLong, " -9999.5 ", "-10000") + signedAccept(c, mysql.TypeLong, " 999.4", "999") + signedAccept(c, mysql.TypeLong, " -3.58", "-4") + signedDeny(c, mysql.TypeLong, " 1a ", "1") + signedDeny(c, mysql.TypeLong, " +1+ ", "1") + + // integer from float + signedAccept(c, mysql.TypeLong, 234.5456, "235") + signedAccept(c, mysql.TypeLong, -23.45, "-23") + unsignedAccept(c, mysql.TypeLonglong, 234.5456, "235") + unsignedDeny(c, mysql.TypeLonglong, -23.45, "18446744073709551593") + + // float from string + signedAccept(c, mysql.TypeFloat, "23.523", "23.523") + signedAccept(c, mysql.TypeFloat, int64(123), "123") + signedAccept(c, mysql.TypeFloat, uint64(123), "123") + signedAccept(c, mysql.TypeFloat, int(123), "123") + signedAccept(c, mysql.TypeFloat, float32(123), "123") + signedAccept(c, mysql.TypeFloat, float64(123), "123") + signedAccept(c, mysql.TypeDouble, " -23.54", "-23.54") + signedDeny(c, mysql.TypeDouble, "-23.54a", "-23.54") + signedDeny(c, mysql.TypeDouble, "-23.54e2e", "-2354") + signedDeny(c, mysql.TypeDouble, "+.e", "0") + signedAccept(c, mysql.TypeDouble, "1e+1", "10") + + // string from string + signedAccept(c, mysql.TypeString, "abc", "abc") +} + +func (s *testTypeConvertSuite) TestRoundIntStr(c *C) { + cases := []struct { + a string + b byte + c string + }{ + {"+999", '5', "+1000"}, + {"999", '5', "1000"}, + {"-999", '5', "-1000"}, + } + for _, cc := range cases { + c.Assert(roundIntStr(cc.b, cc.a), Equals, cc.c) + } +} + +func (s *testTypeConvertSuite) TestGetValidInt(c *C) { + tests := []struct { + origin string + valid string + signed bool + warning bool + }{ + {"100", "100", true, false}, + {"-100", "-100", true, false}, + {"9223372036854775808", "9223372036854775808", false, false}, + {"1abc", "1", true, true}, + {"-1-1", "-1", true, true}, + {"+1+1", "+1", true, true}, + {"123..34", "123", true, true}, + {"123.23E-10", "123", true, true}, + {"1.1e1.3", "1", true, true}, + {"11e1.3", "11", true, true}, + {"1.", "1", true, true}, + {".1", "0", true, true}, + {"", "0", true, true}, + {"123e+", "123", true, true}, + {"123de", "123", true, true}, + } + sc := new(stmtctx.StatementContext) + sc.TruncateAsWarning = true + sc.CastStrToIntStrict = true + warningCount := 0 + for _, tt := range tests { + prefix, err := getValidIntPrefix(sc, tt.origin) + c.Assert(err, IsNil) + c.Assert(prefix, Equals, tt.valid) + if tt.signed { + _, err = strconv.ParseInt(prefix, 10, 64) + } else { + _, err = strconv.ParseUint(prefix, 10, 64) + } + c.Assert(err, IsNil) + warnings := sc.GetWarnings() + if tt.warning { + c.Assert(warnings, HasLen, warningCount+1) + c.Assert(terror.ErrorEqual(warnings[len(warnings)-1].Err, ErrTruncatedWrongVal), IsTrue) + warningCount += 1 + } else { + c.Assert(warnings, HasLen, warningCount) + } + } + + tests2 := []struct { + origin string + valid string + warning bool + }{ + {"100", "100", false}, + {"-100", "-100", false}, + {"1abc", "1", true}, + {"-1-1", "-1", true}, + {"+1+1", "+1", true}, + {"123..34", "123.", true}, + {"123.23E-10", "0", false}, + {"1.1e1.3", "1.1e1", true}, + {"11e1.3", "11e1", true}, + {"1.", "1", false}, + {".1", "0", false}, + {"", "0", true}, + {"123e+", "123", true}, + {"123de", "123", true}, + } + sc.TruncateAsWarning = false + sc.CastStrToIntStrict = false + for _, tt := range tests2 { + prefix, err := getValidIntPrefix(sc, tt.origin) + if tt.warning { + c.Assert(terror.ErrorEqual(err, ErrTruncatedWrongVal), IsTrue) + } else { + c.Assert(err, IsNil) + } + c.Assert(prefix, Equals, tt.valid) + } +} + +func (s *testTypeConvertSuite) TestGetValidFloat(c *C) { + tests := []struct { + origin string + valid string + }{ + {"-100", "-100"}, + {"1abc", "1"}, + {"-1-1", "-1"}, + {"+1+1", "+1"}, + {"123..34", "123."}, + {"123.23E-10", "123.23E-10"}, + {"1.1e1.3", "1.1e1"}, + {"11e1.3", "11e1"}, + {"1.1e-13a", "1.1e-13"}, + {"1.", "1."}, + {".1", ".1"}, + {"", "0"}, + {"123e+", "123"}, + {"123.e", "123."}, + } + sc := new(stmtctx.StatementContext) + for _, tt := range tests { + prefix, _ := getValidFloatPrefix(sc, tt.origin) + c.Assert(prefix, Equals, tt.valid) + _, err := strconv.ParseFloat(prefix, 64) + c.Assert(err, IsNil) + } + + tests2 := []struct { + origin string + expected string + }{ + {"1e9223372036854775807", "1"}, + {"125e342", "125"}, + {"1e21", "1"}, + {"1e5", "100000"}, + {"-123.45678e5", "-12345678"}, + {"+0.5", "1"}, + {"-0.5", "-1"}, + {".5e0", "1"}, + {"+.5e0", "+1"}, + {"-.5e0", "-1"}, + {".5", "1"}, + {"123.456789e5", "12345679"}, + {"123.456784e5", "12345678"}, + {"+999.9999e2", "+100000"}, + } + for _, t := range tests2 { + str, err := floatStrToIntStr(sc, t.origin, t.origin) + c.Assert(err, IsNil) + c.Assert(str, Equals, t.expected, Commentf("%v, %v", t.origin, t.expected)) + } +} + +func (s *testTypeConvertSuite) TestConvertScientificNotation(c *C) { + cases := []struct { + input string + output string + succ bool + }{ + {"123.456e0", "123.456", true}, + {"123.456e1", "1234.56", true}, + {"123.456e3", "123456", true}, + {"123.456e4", "1234560", true}, + {"123.456e5", "12345600", true}, + {"123.456e6", "123456000", true}, + {"123.456e7", "1234560000", true}, + {"123.456e-1", "12.3456", true}, + {"123.456e-2", "1.23456", true}, + {"123.456e-3", "0.123456", true}, + {"123.456e-4", "0.0123456", true}, + {"123.456e-5", "0.00123456", true}, + {"123.456e-6", "0.000123456", true}, + {"123.456e-7", "0.0000123456", true}, + {"123.456e-", "", false}, + {"123.456e-7.5", "", false}, + {"123.456e", "", false}, + } + for _, ca := range cases { + result, err := convertScientificNotation(ca.input) + if !ca.succ { + c.Assert(err, NotNil) + } else { + c.Assert(err, IsNil) + c.Assert(ca.output, Equals, result) + } + } +} diff --git a/types/datum.go b/types/datum.go new file mode 100644 index 0000000..e6b8163 --- /dev/null +++ b/types/datum.go @@ -0,0 +1,1011 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "math" + "sort" + "strconv" + "strings" + "unicode/utf8" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/parser/charset" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/util/hack" +) + +// Kind constants. +const ( + KindNull byte = 0 + KindInt64 byte = 1 + KindUint64 byte = 2 + KindFloat32 byte = 3 + KindFloat64 byte = 4 + KindString byte = 5 + KindBytes byte = 6 + KindBinaryLiteral byte = 7 // Used for BIT / HEX literals. + KindMysqlBit byte = 11 // Used for BIT table column values. + KindMysqlSet byte = 12 + KindMysqlTime byte = 13 + KindInterface byte = 14 + KindMinNotNull byte = 15 + KindMaxValue byte = 16 + KindRaw byte = 17 + KindMysqlJSON byte = 18 +) + +// Datum is a data box holds different kind of data. +// It has better performance and is easier to use than `interface{}`. +type Datum struct { + k byte // datum kind. + collation uint8 // collation can hold uint8 values. + decimal uint16 // decimal can hold uint16 values. + length uint32 // length can hold uint32 values. + i int64 // i can hold int64 uint64 float64 values. + b []byte // b can hold string or []byte values. + x interface{} // x hold all other types. +} + +// Copy deep copies a Datum. +func (d *Datum) Copy() *Datum { + ret := *d + if d.b != nil { + ret.b = make([]byte, len(d.b)) + copy(ret.b, d.b) + } + return &ret +} + +// Kind gets the kind of the datum. +func (d *Datum) Kind() byte { + return d.k +} + +// Collation gets the collation of the datum. +func (d *Datum) Collation() byte { + return d.collation +} + +// SetCollation sets the collation of the datum. +func (d *Datum) SetCollation(collation byte) { + d.collation = collation +} + +// Frac gets the frac of the datum. +func (d *Datum) Frac() int { + return int(d.decimal) +} + +// SetFrac sets the frac of the datum. +func (d *Datum) SetFrac(frac int) { + d.decimal = uint16(frac) +} + +// Length gets the length of the datum. +func (d *Datum) Length() int { + return int(d.length) +} + +// SetLength sets the length of the datum. +func (d *Datum) SetLength(l int) { + d.length = uint32(l) +} + +// IsNull checks if datum is null. +func (d *Datum) IsNull() bool { + return d.k == KindNull +} + +// GetInt64 gets int64 value. +func (d *Datum) GetInt64() int64 { + return d.i +} + +// SetInt64 sets int64 value. +func (d *Datum) SetInt64(i int64) { + d.k = KindInt64 + d.i = i +} + +// GetUint64 gets uint64 value. +func (d *Datum) GetUint64() uint64 { + return uint64(d.i) +} + +// SetUint64 sets uint64 value. +func (d *Datum) SetUint64(i uint64) { + d.k = KindUint64 + d.i = int64(i) +} + +// GetFloat64 gets float64 value. +func (d *Datum) GetFloat64() float64 { + return math.Float64frombits(uint64(d.i)) +} + +// SetFloat64 sets float64 value. +func (d *Datum) SetFloat64(f float64) { + d.k = KindFloat64 + d.i = int64(math.Float64bits(f)) +} + +// GetFloat32 gets float32 value. +func (d *Datum) GetFloat32() float32 { + return float32(math.Float64frombits(uint64(d.i))) +} + +// SetFloat32 sets float32 value. +func (d *Datum) SetFloat32(f float32) { + d.k = KindFloat32 + d.i = int64(math.Float64bits(float64(f))) +} + +// GetString gets string value. +func (d *Datum) GetString() string { + return string(hack.String(d.b)) +} + +// SetString sets string value. +func (d *Datum) SetString(s string) { + d.k = KindString + sink(s) + d.b = hack.Slice(s) +} + +// sink prevents s from being allocated on the stack. +var sink = func(s string) { +} + +// GetBytes gets bytes value. +func (d *Datum) GetBytes() []byte { + return d.b +} + +// SetBytes sets bytes value to datum. +func (d *Datum) SetBytes(b []byte) { + d.k = KindBytes + d.b = b +} + +// SetBytesAsString sets bytes value to datum as string type. +func (d *Datum) SetBytesAsString(b []byte) { + d.k = KindString + d.b = b +} + +// GetInterface gets interface value. +func (d *Datum) GetInterface() interface{} { + return d.x +} + +// SetInterface sets interface to datum. +func (d *Datum) SetInterface(x interface{}) { + d.k = KindInterface + d.x = x +} + +// SetNull sets datum to nil. +func (d *Datum) SetNull() { + d.k = KindNull + d.x = nil +} + +// SetMinNotNull sets datum to minNotNull value. +func (d *Datum) SetMinNotNull() { + d.k = KindMinNotNull + d.x = nil +} + +// GetBinaryLiteral gets Bit value +func (d *Datum) GetBinaryLiteral() BinaryLiteral { + return d.b +} + +// GetMysqlBit gets MysqlBit value +func (d *Datum) GetMysqlBit() BinaryLiteral { + return d.GetBinaryLiteral() +} + +// SetBinaryLiteral sets Bit value +func (d *Datum) SetBinaryLiteral(b BinaryLiteral) { + d.k = KindBinaryLiteral + d.b = b +} + +// SetMysqlBit sets MysqlBit value +func (d *Datum) SetMysqlBit(b BinaryLiteral) { + d.k = KindMysqlBit + d.b = b +} + +// SetRaw sets raw value. +func (d *Datum) SetRaw(b []byte) { + d.k = KindRaw + d.b = b +} + +// GetRaw gets raw value. +func (d *Datum) GetRaw() []byte { + return d.b +} + +// SetAutoID set the auto increment ID according to its int flag. +func (d *Datum) SetAutoID(id int64, flag uint) { + if mysql.HasUnsignedFlag(flag) { + d.SetUint64(uint64(id)) + } else { + d.SetInt64(id) + } +} + +// String returns a human-readable description of Datum. It is intended only for debugging. +func (d Datum) String() string { + var t string + switch d.k { + case KindNull: + t = "KindNull" + case KindInt64: + t = "KindInt64" + case KindUint64: + t = "KindUint64" + case KindFloat32: + t = "KindFloat32" + case KindFloat64: + t = "KindFloat64" + case KindString: + t = "KindString" + case KindBytes: + t = "KindBytes" + case KindBinaryLiteral: + t = "KindBinaryLiteral" + case KindMysqlBit: + t = "KindMysqlBit" + case KindMysqlSet: + t = "KindMysqlSet" + case KindMysqlJSON: + t = "KindMysqlJSON" + case KindMysqlTime: + t = "KindMysqlTime" + default: + t = "Unknown" + } + v := d.GetValue() + if b, ok := v.([]byte); ok && d.k == KindBytes { + v = string(b) + } + return fmt.Sprintf("%v %v", t, v) +} + +// GetValue gets the value of the datum of any kind. +func (d *Datum) GetValue() interface{} { + switch d.k { + case KindInt64: + return d.GetInt64() + case KindUint64: + return d.GetUint64() + case KindFloat32: + return d.GetFloat32() + case KindFloat64: + return d.GetFloat64() + case KindString: + return d.GetString() + case KindBytes: + return d.GetBytes() + case KindBinaryLiteral, KindMysqlBit: + return d.GetBinaryLiteral() + default: + return d.GetInterface() + } +} + +// SetValue sets any kind of value. +func (d *Datum) SetValue(val interface{}) { + switch x := val.(type) { + case nil: + d.SetNull() + case bool: + if x { + d.SetInt64(1) + } else { + d.SetInt64(0) + } + case int: + d.SetInt64(int64(x)) + case int64: + d.SetInt64(x) + case uint64: + d.SetUint64(x) + case float32: + d.SetFloat32(x) + case float64: + d.SetFloat64(x) + case string: + d.SetString(x) + case []byte: + d.SetBytes(x) + case BinaryLiteral: + d.SetBinaryLiteral(x) + case BitLiteral: // Store as BinaryLiteral for Bit and Hex literals + d.SetBinaryLiteral(BinaryLiteral(x)) + case HexLiteral: + d.SetBinaryLiteral(BinaryLiteral(x)) + default: + d.SetInterface(x) + } +} + +// CompareDatum compares datum to another datum. +// TODO: return error properly. +func (d *Datum) CompareDatum(sc *stmtctx.StatementContext, ad *Datum) (int, error) { + if d.k == KindMysqlJSON && ad.k != KindMysqlJSON { + cmp, err := ad.CompareDatum(sc, d) + return cmp * -1, errors.Trace(err) + } + switch ad.k { + case KindNull: + if d.k == KindNull { + return 0, nil + } + return 1, nil + case KindMinNotNull: + if d.k == KindNull { + return -1, nil + } else if d.k == KindMinNotNull { + return 0, nil + } + return 1, nil + case KindMaxValue: + if d.k == KindMaxValue { + return 0, nil + } + return -1, nil + case KindInt64: + return d.compareInt64(sc, ad.GetInt64()) + case KindUint64: + return d.compareUint64(sc, ad.GetUint64()) + case KindFloat32, KindFloat64: + return d.compareFloat64(sc, ad.GetFloat64()) + case KindString: + return d.compareString(sc, ad.GetString()) + case KindBytes: + return d.compareBytes(sc, ad.GetBytes()) + case KindBinaryLiteral, KindMysqlBit: + return d.compareBinaryLiteral(sc, ad.GetBinaryLiteral()) + default: + return 0, nil + } +} + +func (d *Datum) compareInt64(sc *stmtctx.StatementContext, i int64) (int, error) { + switch d.k { + case KindMaxValue: + return 1, nil + case KindInt64: + return CompareInt64(d.i, i), nil + case KindUint64: + if i < 0 || d.GetUint64() > math.MaxInt64 { + return 1, nil + } + return CompareInt64(d.i, i), nil + default: + return d.compareFloat64(sc, float64(i)) + } +} + +func (d *Datum) compareUint64(sc *stmtctx.StatementContext, u uint64) (int, error) { + switch d.k { + case KindMaxValue: + return 1, nil + case KindInt64: + if d.i < 0 || u > math.MaxInt64 { + return -1, nil + } + return CompareInt64(d.i, int64(u)), nil + case KindUint64: + return CompareUint64(d.GetUint64(), u), nil + default: + return d.compareFloat64(sc, float64(u)) + } +} + +func (d *Datum) compareFloat64(sc *stmtctx.StatementContext, f float64) (int, error) { + switch d.k { + case KindNull, KindMinNotNull: + return -1, nil + case KindMaxValue: + return 1, nil + case KindInt64: + return CompareFloat64(float64(d.i), f), nil + case KindUint64: + return CompareFloat64(float64(d.GetUint64()), f), nil + case KindFloat32, KindFloat64: + return CompareFloat64(d.GetFloat64(), f), nil + case KindString, KindBytes: + fVal, err := StrToFloat(sc, d.GetString()) + return CompareFloat64(fVal, f), errors.Trace(err) + case KindBinaryLiteral, KindMysqlBit: + val, err := d.GetBinaryLiteral().ToInt(sc) + fVal := float64(val) + return CompareFloat64(fVal, f), errors.Trace(err) + default: + return -1, nil + } +} + +func (d *Datum) compareString(sc *stmtctx.StatementContext, s string) (int, error) { + switch d.k { + case KindNull, KindMinNotNull: + return -1, nil + case KindMaxValue: + return 1, nil + case KindString, KindBytes: + return CompareString(d.GetString(), s), nil + case KindBinaryLiteral, KindMysqlBit: + return CompareString(d.GetBinaryLiteral().ToString(), s), nil + default: + fVal, err := StrToFloat(sc, s) + if err != nil { + return 0, errors.Trace(err) + } + return d.compareFloat64(sc, fVal) + } +} + +func (d *Datum) compareBytes(sc *stmtctx.StatementContext, b []byte) (int, error) { + str := string(hack.String(b)) + return d.compareString(sc, str) +} + +func (d *Datum) compareBinaryLiteral(sc *stmtctx.StatementContext, b BinaryLiteral) (int, error) { + switch d.k { + case KindString, KindBytes: + return CompareString(d.GetString(), b.ToString()), nil + case KindBinaryLiteral, KindMysqlBit: + return CompareString(d.GetBinaryLiteral().ToString(), b.ToString()), nil + default: + val, err := b.ToInt(sc) + if err != nil { + return 0, errors.Trace(err) + } + result, err := d.compareFloat64(sc, float64(val)) + return result, errors.Trace(err) + } +} + +// ConvertTo converts a datum to the target field type. +func (d *Datum) ConvertTo(sc *stmtctx.StatementContext, target *FieldType) (Datum, error) { + if d.k == KindNull { + return Datum{}, nil + } + switch target.Tp { // TODO: implement mysql types convert when "CAST() AS" syntax are supported. + case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong: + unsigned := mysql.HasUnsignedFlag(target.Flag) + if unsigned { + return d.convertToUint(sc, target) + } + return d.convertToInt(sc, target) + case mysql.TypeFloat, mysql.TypeDouble: + return d.convertToFloat(sc, target) + case mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob, + mysql.TypeString, mysql.TypeVarchar, mysql.TypeVarString: + return d.convertToString(sc, target) + case mysql.TypeBit: + return d.convertToMysqlBit(sc, target) + case mysql.TypeNull: + return Datum{}, nil + default: + panic("should never happen") + } +} + +func (d *Datum) convertToFloat(sc *stmtctx.StatementContext, target *FieldType) (Datum, error) { + var ( + f float64 + ret Datum + err error + ) + switch d.k { + case KindNull: + return ret, nil + case KindInt64: + f = float64(d.GetInt64()) + case KindUint64: + f = float64(d.GetUint64()) + case KindFloat32, KindFloat64: + f = d.GetFloat64() + case KindString, KindBytes: + f, err = StrToFloat(sc, d.GetString()) + case KindBinaryLiteral, KindMysqlBit: + val, err1 := d.GetBinaryLiteral().ToInt(sc) + f, err = float64(val), err1 + default: + return invalidConv(d, target.Tp) + } + var err1 error + f, err1 = ProduceFloatWithSpecifiedTp(f, target, sc) + if err == nil && err1 != nil { + err = err1 + } + if target.Tp == mysql.TypeFloat { + ret.SetFloat32(float32(f)) + } else { + ret.SetFloat64(f) + } + return ret, errors.Trace(err) +} + +// ProduceFloatWithSpecifiedTp produces a new float64 according to `flen` and `decimal`. +func ProduceFloatWithSpecifiedTp(f float64, target *FieldType, sc *stmtctx.StatementContext) (_ float64, err error) { + // For float and following double type, we will only truncate it for float(M, D) format. + // If no D is set, we will handle it like origin float whether M is set or not. + if target.Flen != UnspecifiedLength && target.Decimal != UnspecifiedLength { + f, err = TruncateFloat(f, target.Flen, target.Decimal) + if err = sc.HandleOverflow(err, err); err != nil { + return f, errors.Trace(err) + } + } + if mysql.HasUnsignedFlag(target.Flag) && f < 0 { + return 0, overflow(f, target.Tp) + } + return f, nil +} + +func (d *Datum) convertToString(sc *stmtctx.StatementContext, target *FieldType) (Datum, error) { + var ret Datum + var s string + switch d.k { + case KindInt64: + s = strconv.FormatInt(d.GetInt64(), 10) + case KindUint64: + s = strconv.FormatUint(d.GetUint64(), 10) + case KindFloat32: + s = strconv.FormatFloat(d.GetFloat64(), 'f', -1, 32) + case KindFloat64: + s = strconv.FormatFloat(d.GetFloat64(), 'f', -1, 64) + case KindString, KindBytes: + s = d.GetString() + case KindBinaryLiteral, KindMysqlBit: + s = d.GetBinaryLiteral().ToString() + default: + return invalidConv(d, target.Tp) + } + s, err := ProduceStrWithSpecifiedTp(s, target, sc, true) + ret.SetString(s) + if target.Charset == charset.CharsetBin { + ret.k = KindBytes + } + return ret, errors.Trace(err) +} + +// ProduceStrWithSpecifiedTp produces a new string according to `flen` and `chs`. Param `padZero` indicates +// whether we should pad `\0` for `binary(flen)` type. +func ProduceStrWithSpecifiedTp(s string, tp *FieldType, sc *stmtctx.StatementContext, padZero bool) (_ string, err error) { + flen, chs := tp.Flen, tp.Charset + if flen >= 0 { + // Flen is the rune length, not binary length, for UTF8 charset, we need to calculate the + // rune count and truncate to Flen runes if it is too long. + if chs == charset.CharsetUTF8 || chs == charset.CharsetUTF8MB4 { + characterLen := utf8.RuneCountInString(s) + if characterLen > flen { + // 1. If len(s) is 0 and flen is 0, truncateLen will be 0, don't truncate s. + // CREATE TABLE t (a char(0)); + // INSERT INTO t VALUES (``); + // 2. If len(s) is 10 and flen is 0, truncateLen will be 0 too, but we still need to truncate s. + // SELECT 1, CAST(1234 AS CHAR(0)); + // So truncateLen is not a suitable variable to determine to do truncate or not. + var runeCount int + var truncateLen int + for i := range s { + if runeCount == flen { + truncateLen = i + break + } + runeCount++ + } + err = ErrDataTooLong.GenWithStack("Data Too Long, field len %d, data len %d", flen, characterLen) + s = truncateStr(s, truncateLen) + } + } else if len(s) > flen { + err = ErrDataTooLong.GenWithStack("Data Too Long, field len %d, data len %d", flen, len(s)) + s = truncateStr(s, flen) + } else if tp.Tp == mysql.TypeString && IsBinaryStr(tp) && len(s) < flen && padZero { + padding := make([]byte, flen-len(s)) + s = string(append([]byte(s), padding...)) + } + } + return s, errors.Trace(sc.HandleTruncate(err)) +} + +func (d *Datum) convertToInt(sc *stmtctx.StatementContext, target *FieldType) (Datum, error) { + i64, err := d.toSignedInteger(sc, target.Tp) + return NewIntDatum(i64), errors.Trace(err) +} + +func (d *Datum) convertToUint(sc *stmtctx.StatementContext, target *FieldType) (Datum, error) { + tp := target.Tp + upperBound := IntergerUnsignedUpperBound(tp) + var ( + val uint64 + err error + ret Datum + ) + switch d.k { + case KindInt64: + val, err = ConvertIntToUint(sc, d.GetInt64(), upperBound, tp) + case KindUint64: + val, err = ConvertUintToUint(d.GetUint64(), upperBound, tp) + case KindFloat32, KindFloat64: + val, err = ConvertFloatToUint(sc, d.GetFloat64(), upperBound, tp) + case KindString, KindBytes: + uval, err1 := StrToUint(sc, d.GetString()) + if err1 != nil && ErrOverflow.Equal(err1) && !sc.ShouldIgnoreOverflowError() { + return ret, errors.Trace(err1) + } + val, err = ConvertUintToUint(uval, upperBound, tp) + if err != nil { + return ret, errors.Trace(err) + } + err = err1 + case KindBinaryLiteral, KindMysqlBit: + val, err = d.GetBinaryLiteral().ToInt(sc) + default: + return invalidConv(d, target.Tp) + } + ret.SetUint64(val) + if err != nil { + return ret, errors.Trace(err) + } + return ret, nil +} + +func (d *Datum) convertToMysqlBit(sc *stmtctx.StatementContext, target *FieldType) (Datum, error) { + var ret Datum + var uintValue uint64 + var err error + switch d.k { + case KindString, KindBytes: + uintValue, err = BinaryLiteral(d.b).ToInt(sc) + case KindInt64: + // if input kind is int64 (signed), when trans to bit, we need to treat it as unsigned + d.k = KindUint64 + fallthrough + default: + uintDatum, err1 := d.convertToUint(sc, target) + uintValue, err = uintDatum.GetUint64(), err1 + } + if target.Flen < 64 && uintValue >= 1<<(uint64(target.Flen)) { + return Datum{}, errors.Trace(ErrDataTooLong.GenWithStack("Data Too Long, field len %d", target.Flen)) + } + byteSize := (target.Flen + 7) >> 3 + ret.SetMysqlBit(NewBinaryLiteralFromUint(uintValue, byteSize)) + return ret, errors.Trace(err) +} + +// ToBool converts to a bool. +// We will use 1 for true, and 0 for false. +func (d *Datum) ToBool(sc *stmtctx.StatementContext) (int64, error) { + var err error + isZero := false + switch d.Kind() { + case KindInt64: + isZero = d.GetInt64() == 0 + case KindUint64: + isZero = d.GetUint64() == 0 + case KindFloat32: + isZero = RoundFloat(d.GetFloat64()) == 0 + case KindFloat64: + isZero = RoundFloat(d.GetFloat64()) == 0 + case KindString, KindBytes: + iVal, err1 := StrToInt(sc, d.GetString()) + isZero, err = iVal == 0, err1 + case KindBinaryLiteral, KindMysqlBit: + val, err1 := d.GetBinaryLiteral().ToInt(sc) + isZero, err = val == 0, err1 + default: + return 0, errors.Errorf("cannot convert %v(type %T) to bool", d.GetValue(), d.GetValue()) + } + var ret int64 + if isZero { + ret = 0 + } else { + ret = 1 + } + if err != nil { + return ret, errors.Trace(err) + } + return ret, nil +} + +// ToInt64 converts to a int64. +func (d *Datum) ToInt64(sc *stmtctx.StatementContext) (int64, error) { + return d.toSignedInteger(sc, mysql.TypeLonglong) +} + +func (d *Datum) toSignedInteger(sc *stmtctx.StatementContext, tp byte) (int64, error) { + lowerBound := IntergerSignedLowerBound(tp) + upperBound := IntergerSignedUpperBound(tp) + switch d.Kind() { + case KindInt64: + return ConvertIntToInt(d.GetInt64(), lowerBound, upperBound, tp) + case KindUint64: + return ConvertUintToInt(d.GetUint64(), upperBound, tp) + case KindFloat32: + return ConvertFloatToInt(float64(d.GetFloat32()), lowerBound, upperBound, tp) + case KindFloat64: + return ConvertFloatToInt(d.GetFloat64(), lowerBound, upperBound, tp) + case KindString, KindBytes: + iVal, err := StrToInt(sc, d.GetString()) + iVal, err2 := ConvertIntToInt(iVal, lowerBound, upperBound, tp) + if err == nil { + err = err2 + } + return iVal, errors.Trace(err) + case KindBinaryLiteral, KindMysqlBit: + val, err := d.GetBinaryLiteral().ToInt(sc) + return int64(val), errors.Trace(err) + default: + return 0, errors.Errorf("cannot convert %v(type %T) to int64", d.GetValue(), d.GetValue()) + } +} + +// ToFloat64 converts to a float64 +func (d *Datum) ToFloat64(sc *stmtctx.StatementContext) (float64, error) { + switch d.Kind() { + case KindInt64: + return float64(d.GetInt64()), nil + case KindUint64: + return float64(d.GetUint64()), nil + case KindFloat32: + return float64(d.GetFloat32()), nil + case KindFloat64: + return d.GetFloat64(), nil + case KindString: + return StrToFloat(sc, d.GetString()) + case KindBytes: + return StrToFloat(sc, string(d.GetBytes())) + case KindBinaryLiteral, KindMysqlBit: + val, err := d.GetBinaryLiteral().ToInt(sc) + return float64(val), errors.Trace(err) + default: + return 0, errors.Errorf("cannot convert %v(type %T) to float64", d.GetValue(), d.GetValue()) + } +} + +// ToString gets the string representation of the datum. +func (d *Datum) ToString() (string, error) { + switch d.Kind() { + case KindInt64: + return strconv.FormatInt(d.GetInt64(), 10), nil + case KindUint64: + return strconv.FormatUint(d.GetUint64(), 10), nil + case KindFloat32: + return strconv.FormatFloat(float64(d.GetFloat32()), 'f', -1, 32), nil + case KindFloat64: + return strconv.FormatFloat(d.GetFloat64(), 'f', -1, 64), nil + case KindString: + return d.GetString(), nil + case KindBytes: + return d.GetString(), nil + case KindBinaryLiteral, KindMysqlBit: + return d.GetBinaryLiteral().ToString(), nil + default: + return "", errors.Errorf("cannot convert %v(type %T) to string", d.GetValue(), d.GetValue()) + } +} + +// ToBytes gets the bytes representation of the datum. +func (d *Datum) ToBytes() ([]byte, error) { + switch d.k { + case KindString, KindBytes: + return d.GetBytes(), nil + default: + str, err := d.ToString() + if err != nil { + return nil, errors.Trace(err) + } + return []byte(str), nil + } +} + +func invalidConv(d *Datum, tp byte) (Datum, error) { + return Datum{}, errors.Errorf("cannot convert datum from %s to type %s.", KindStr(d.Kind()), TypeStr(tp)) +} + +// NewDatum creates a new Datum from an interface{}. +func NewDatum(in interface{}) (d Datum) { + switch x := in.(type) { + case []interface{}: + d.SetValue(MakeDatums(x...)) + default: + d.SetValue(in) + } + return d +} + +// NewIntDatum creates a new Datum from an int64 value. +func NewIntDatum(i int64) (d Datum) { + d.SetInt64(i) + return d +} + +// NewUintDatum creates a new Datum from an uint64 value. +func NewUintDatum(i uint64) (d Datum) { + d.SetUint64(i) + return d +} + +// NewBytesDatum creates a new Datum from a byte slice. +func NewBytesDatum(b []byte) (d Datum) { + d.SetBytes(b) + return d +} + +// NewStringDatum creates a new Datum from a string. +func NewStringDatum(s string) (d Datum) { + d.SetString(s) + return d +} + +// NewFloat64Datum creates a new Datum from a float64 value. +func NewFloat64Datum(f float64) (d Datum) { + d.SetFloat64(f) + return d +} + +// NewFloat32Datum creates a new Datum from a float32 value. +func NewFloat32Datum(f float32) (d Datum) { + d.SetFloat32(f) + return d +} + +// MakeDatums creates datum slice from interfaces. +func MakeDatums(args ...interface{}) []Datum { + datums := make([]Datum, len(args)) + for i, v := range args { + datums[i] = NewDatum(v) + } + return datums +} + +// MinNotNullDatum returns a datum represents minimum not null value. +func MinNotNullDatum() Datum { + return Datum{k: KindMinNotNull} +} + +// MaxValueDatum returns a datum represents max value. +func MaxValueDatum() Datum { + return Datum{k: KindMaxValue} +} + +// EqualDatums compare if a and b contains the same datum values. +func EqualDatums(sc *stmtctx.StatementContext, a []Datum, b []Datum) (bool, error) { + if len(a) != len(b) { + return false, nil + } + if a == nil && b == nil { + return true, nil + } + if a == nil || b == nil { + return false, nil + } + for i, ai := range a { + v, err := ai.CompareDatum(sc, &b[i]) + if err != nil { + return false, errors.Trace(err) + } + if v != 0 { + return false, nil + } + } + return true, nil +} + +// SortDatums sorts a slice of datum. +func SortDatums(sc *stmtctx.StatementContext, datums []Datum) error { + sorter := datumsSorter{datums: datums, sc: sc} + sort.Sort(&sorter) + return sorter.err +} + +type datumsSorter struct { + datums []Datum + sc *stmtctx.StatementContext + err error +} + +func (ds *datumsSorter) Len() int { + return len(ds.datums) +} + +func (ds *datumsSorter) Less(i, j int) bool { + cmp, err := ds.datums[i].CompareDatum(ds.sc, &ds.datums[j]) + if err != nil { + ds.err = errors.Trace(err) + return true + } + return cmp < 0 +} + +func (ds *datumsSorter) Swap(i, j int) { + ds.datums[i], ds.datums[j] = ds.datums[j], ds.datums[i] +} + +func handleTruncateError(sc *stmtctx.StatementContext, err error) error { + if sc.IgnoreTruncate { + return nil + } + if !sc.TruncateAsWarning { + return err + } + sc.AppendWarning(err) + return nil +} + +// DatumsToString converts several datums to formatted string. +func DatumsToString(datums []Datum, handleSpecialValue bool) (string, error) { + strs := make([]string, 0, len(datums)) + for _, datum := range datums { + if handleSpecialValue { + switch datum.Kind() { + case KindNull: + strs = append(strs, "NULL") + continue + case KindMinNotNull: + strs = append(strs, "-inf") + continue + case KindMaxValue: + strs = append(strs, "+inf") + continue + } + } + str, err := datum.ToString() + if err != nil { + return "", errors.Trace(err) + } + strs = append(strs, str) + } + size := len(datums) + if size > 1 { + strs[0] = "(" + strs[0] + strs[size-1] = strs[size-1] + ")" + } + return strings.Join(strs, ", "), nil +} + +// DatumsToStrNoErr converts some datums to a formatted string. +// If an error occurs, it will print a log instead of returning an error. +func DatumsToStrNoErr(datums []Datum) string { + str, err := DatumsToString(datums, true) + terror.Log(errors.Trace(err)) + return str +} + +// CloneDatum returns a new copy of the datum. +// TODO: Abandon this function. +func CloneDatum(datum Datum) Datum { + return *datum.Copy() +} + +// CloneRow deep copies a Datum slice. +func CloneRow(dr []Datum) []Datum { + c := make([]Datum, len(dr)) + for i, d := range dr { + c[i] = *d.Copy() + } + return c +} diff --git a/types/datum_eval.go b/types/datum_eval.go new file mode 100644 index 0000000..861dbed --- /dev/null +++ b/types/datum_eval.go @@ -0,0 +1,56 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "github.com/pingcap/errors" + "github.com/pingcap/tidb/parser/opcode" +) + +// ComputePlus computes the result of a+b. +func ComputePlus(a, b Datum) (d Datum, err error) { + switch a.Kind() { + case KindInt64: + switch b.Kind() { + case KindInt64: + r, err1 := AddInt64(a.GetInt64(), b.GetInt64()) + d.SetInt64(r) + return d, errors.Trace(err1) + case KindUint64: + r, err1 := AddInteger(b.GetUint64(), a.GetInt64()) + d.SetUint64(r) + return d, errors.Trace(err1) + } + case KindUint64: + switch b.Kind() { + case KindInt64: + r, err1 := AddInteger(a.GetUint64(), b.GetInt64()) + d.SetUint64(r) + return d, errors.Trace(err1) + case KindUint64: + r, err1 := AddUint64(a.GetUint64(), b.GetUint64()) + d.SetUint64(r) + return d, errors.Trace(err1) + } + case KindFloat64: + switch b.Kind() { + case KindFloat64: + r := a.GetFloat64() + b.GetFloat64() + d.SetFloat64(r) + return d, nil + } + } + _, err = InvOp2(a.GetValue(), b.GetValue(), opcode.Plus) + return d, err +} diff --git a/types/datum_test.go b/types/datum_test.go new file mode 100644 index 0000000..1d71e0e --- /dev/null +++ b/types/datum_test.go @@ -0,0 +1,308 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "reflect" + "testing" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx/stmtctx" +) + +var _ = Suite(&testDatumSuite{}) + +type testDatumSuite struct { +} + +func (ts *testDatumSuite) TestDatum(c *C) { + values := []interface{}{ + int64(1), + uint64(1), + 1.1, + "abc", + []byte("abc"), + []int{1}, + } + for _, val := range values { + var d Datum + d.SetMinNotNull() + d.SetValue(val) + x := d.GetValue() + c.Assert(x, DeepEquals, val) + d.SetCollation(d.Collation()) + c.Assert(d.Collation(), NotNil) + c.Assert(d.Length(), Equals, int(d.length)) + c.Assert(fmt.Sprint(d), Equals, d.String()) + } +} + +func testDatumToBool(c *C, in interface{}, res int) { + datum := NewDatum(in) + res64 := int64(res) + sc := new(stmtctx.StatementContext) + sc.IgnoreTruncate = true + b, err := datum.ToBool(sc) + c.Assert(err, IsNil) + c.Assert(b, Equals, res64) +} + +func (ts *testDatumSuite) TestToBool(c *C) { + testDatumToBool(c, int(0), 0) + testDatumToBool(c, int64(0), 0) + testDatumToBool(c, uint64(0), 0) + testDatumToBool(c, float32(0.1), 0) + testDatumToBool(c, float64(0.1), 0) + testDatumToBool(c, float64(0.5), 1) + testDatumToBool(c, float64(0.499), 0) + testDatumToBool(c, "", 0) + testDatumToBool(c, "0.1", 0) + testDatumToBool(c, []byte{}, 0) + testDatumToBool(c, []byte("0.1"), 0) +} + +func (ts *testDatumSuite) TestEqualDatums(c *C) { + tests := []struct { + a []interface{} + b []interface{} + same bool + }{ + // Positive cases + {[]interface{}{1}, []interface{}{1}, true}, + {[]interface{}{1, "aa"}, []interface{}{1, "aa"}, true}, + {[]interface{}{1, "aa", 1}, []interface{}{1, "aa", 1}, true}, + + // negative cases + {[]interface{}{1}, []interface{}{2}, false}, + {[]interface{}{1, "a"}, []interface{}{1, "aaaaaa"}, false}, + {[]interface{}{1, "aa", 3}, []interface{}{1, "aa", 2}, false}, + + // Corner cases + {[]interface{}{}, []interface{}{}, true}, + {[]interface{}{nil}, []interface{}{nil}, true}, + {[]interface{}{}, []interface{}{1}, false}, + {[]interface{}{1}, []interface{}{1, 1}, false}, + {[]interface{}{nil}, []interface{}{1}, false}, + } + for _, tt := range tests { + testEqualDatums(c, tt.a, tt.b, tt.same) + } +} + +func testEqualDatums(c *C, a []interface{}, b []interface{}, same bool) { + sc := new(stmtctx.StatementContext) + sc.IgnoreTruncate = true + res, err := EqualDatums(sc, MakeDatums(a...), MakeDatums(b...)) + c.Assert(err, IsNil) + c.Assert(res, Equals, same, Commentf("a: %v, b: %v", a, b)) +} + +func testDatumToInt64(c *C, val interface{}, expect int64) { + d := NewDatum(val) + sc := new(stmtctx.StatementContext) + sc.IgnoreTruncate = true + b, err := d.ToInt64(sc) + c.Assert(err, IsNil) + c.Assert(b, Equals, expect) +} + +func (ts *testTypeConvertSuite) TestToInt64(c *C) { + testDatumToInt64(c, "0", int64(0)) + testDatumToInt64(c, int(0), int64(0)) + testDatumToInt64(c, int64(0), int64(0)) + testDatumToInt64(c, uint64(0), int64(0)) + testDatumToInt64(c, float32(3.1), int64(3)) + testDatumToInt64(c, float64(3.1), int64(3)) +} + +func (ts *testTypeConvertSuite) TestToFloat32(c *C) { + ft := NewFieldType(mysql.TypeFloat) + var datum = NewFloat64Datum(281.37) + sc := new(stmtctx.StatementContext) + sc.IgnoreTruncate = true + converted, err := datum.ConvertTo(sc, ft) + c.Assert(err, IsNil) + c.Assert(converted.Kind(), Equals, KindFloat32) + c.Assert(converted.GetFloat32(), Equals, float32(281.37)) + + datum.SetString("281.37") + converted, err = datum.ConvertTo(sc, ft) + c.Assert(err, IsNil) + c.Assert(converted.Kind(), Equals, KindFloat32) + c.Assert(converted.GetFloat32(), Equals, float32(281.37)) + + ft = NewFieldType(mysql.TypeDouble) + datum = NewFloat32Datum(281.37) + converted, err = datum.ConvertTo(sc, ft) + c.Assert(err, IsNil) + c.Assert(converted.Kind(), Equals, KindFloat64) + // Convert to float32 and convert back to float64, we will get a different value. + c.Assert(converted.GetFloat64(), Not(Equals), 281.37) + c.Assert(converted.GetFloat64(), Equals, datum.GetFloat64()) +} + +func (ts *testTypeConvertSuite) TestToFloat64(c *C) { + testCases := []struct { + d Datum + errMsg string + result float64 + }{ + {NewDatum(float32(3.00)), "", 3.00}, + {NewDatum(float64(12345.678)), "", 12345.678}, + {NewDatum("12345.678"), "", 12345.678}, + {NewDatum([]byte("12345.678")), "", 12345.678}, + {NewDatum(int64(12345)), "", 12345}, + {NewDatum(uint64(123456)), "", 123456}, + {NewDatum(byte(123)), "cannot convert .*", 0}, + } + + sc := new(stmtctx.StatementContext) + sc.IgnoreTruncate = true + for _, t := range testCases { + converted, err := t.d.ToFloat64(sc) + if t.errMsg == "" { + c.Assert(err, IsNil) + } else { + c.Assert(err, ErrorMatches, t.errMsg) + } + c.Assert(converted, Equals, t.result) + } +} + +func (ts *testDatumSuite) TestIsNull(c *C) { + tests := []struct { + data interface{} + isnull bool + }{ + {nil, true}, + {0, false}, + {1, false}, + {1.1, false}, + {"string", false}, + {"", false}, + } + for _, tt := range tests { + testIsNull(c, tt.data, tt.isnull) + } +} + +func testIsNull(c *C, data interface{}, isnull bool) { + d := NewDatum(data) + c.Assert(d.IsNull(), Equals, isnull, Commentf("data: %v, isnull: %v", data, isnull)) +} + +func (ts *testDatumSuite) TestToBytes(c *C) { + tests := []struct { + a Datum + out []byte + }{ + {NewIntDatum(1), []byte("1")}, + {NewFloat64Datum(1.23), []byte("1.23")}, + {NewStringDatum("abc"), []byte("abc")}, + } + sc := new(stmtctx.StatementContext) + sc.IgnoreTruncate = true + for _, tt := range tests { + bin, err := tt.a.ToBytes() + c.Assert(err, IsNil) + c.Assert(bin, BytesEquals, tt.out) + } +} + +func (ts *testDatumSuite) TestComputePlusAndMinus(c *C) { + sc := &stmtctx.StatementContext{TimeZone: time.UTC} + tests := []struct { + a Datum + b Datum + plus Datum + minus Datum + hasErr bool + }{ + {NewIntDatum(72), NewIntDatum(28), NewIntDatum(100), NewIntDatum(44), false}, + {NewIntDatum(72), NewUintDatum(28), NewIntDatum(100), NewIntDatum(44), false}, + {NewUintDatum(72), NewUintDatum(28), NewUintDatum(100), NewUintDatum(44), false}, + {NewUintDatum(72), NewIntDatum(28), NewUintDatum(100), NewUintDatum(44), false}, + {NewFloat64Datum(72.0), NewFloat64Datum(28.0), NewFloat64Datum(100.0), NewFloat64Datum(44.0), false}, + {NewIntDatum(72), NewFloat64Datum(42), Datum{}, Datum{}, true}, + {NewStringDatum("abcd"), NewIntDatum(42), Datum{}, Datum{}, true}, + } + + for ith, tt := range tests { + got, err := ComputePlus(tt.a, tt.b) + c.Assert(err != nil, Equals, tt.hasErr) + v, err := got.CompareDatum(sc, &tt.plus) + c.Assert(err, IsNil) + c.Assert(v, Equals, 0, Commentf("%dth got:%#v, expect:%#v", ith, got, tt.plus)) + } +} + +func (ts *testDatumSuite) TestCloneDatum(c *C) { + var raw Datum + raw.b = []byte("raw") + raw.k = KindRaw + tests := []Datum{ + NewIntDatum(72), + NewUintDatum(72), + NewStringDatum("abcd"), + NewBytesDatum([]byte("abcd")), + raw, + } + + sc := new(stmtctx.StatementContext) + sc.IgnoreTruncate = true + for _, tt := range tests { + tt1 := CloneDatum(tt) + res, err := tt.CompareDatum(sc, &tt1) + c.Assert(err, IsNil) + c.Assert(res, Equals, 0) + if tt.b != nil { + c.Assert(&tt.b[0], Not(Equals), &tt1.b[0]) + } + } +} + +func prepareCompareDatums() ([]Datum, []Datum) { + vals := make([]Datum, 0, 5) + vals = append(vals, NewIntDatum(1)) + vals = append(vals, NewFloat64Datum(1.23)) + vals = append(vals, NewStringDatum("abcde")) + + vals1 := make([]Datum, 0, 5) + vals1 = append(vals1, NewIntDatum(1)) + vals1 = append(vals1, NewFloat64Datum(1.23)) + vals1 = append(vals1, NewStringDatum("abcde")) + return vals, vals1 +} + +func BenchmarkCompareDatum(b *testing.B) { + vals, vals1 := prepareCompareDatums() + sc := new(stmtctx.StatementContext) + b.ResetTimer() + for i := 0; i < b.N; i++ { + for j, v := range vals { + v.CompareDatum(sc, &vals1[j]) + } + } +} + +func BenchmarkCompareDatumByReflect(b *testing.B) { + vals, vals1 := prepareCompareDatums() + b.ResetTimer() + for i := 0; i < b.N; i++ { + reflect.DeepEqual(vals, vals1) + } +} diff --git a/types/errors.go b/types/errors.go new file mode 100644 index 0000000..9e04890 --- /dev/null +++ b/types/errors.go @@ -0,0 +1,109 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + parser_types "github.com/pingcap/tidb/parser/types" +) + +// const strings for ErrWrongValue +const ( + DateTimeStr = "datetime" + TimeStr = "time" +) + +var ( + // ErrInvalidDefault is returned when meet a invalid default value. + ErrInvalidDefault = parser_types.ErrInvalidDefault + // ErrDataTooLong is returned when converts a string value that is longer than field type length. + ErrDataTooLong = terror.ClassTypes.New(mysql.ErrDataTooLong, mysql.MySQLErrName[mysql.ErrDataTooLong]) + // ErrIllegalValueForType is returned when value of type is illegal. + ErrIllegalValueForType = terror.ClassTypes.New(mysql.ErrIllegalValueForType, mysql.MySQLErrName[mysql.ErrIllegalValueForType]) + // ErrTruncated is returned when data has been truncated during conversion. + ErrTruncated = terror.ClassTypes.New(mysql.WarnDataTruncated, mysql.MySQLErrName[mysql.WarnDataTruncated]) + // ErrOverflow is returned when data is out of range for a field type. + ErrOverflow = terror.ClassTypes.New(mysql.ErrDataOutOfRange, mysql.MySQLErrName[mysql.ErrDataOutOfRange]) + // ErrDivByZero is return when do division by 0. + ErrDivByZero = terror.ClassTypes.New(mysql.ErrDivisionByZero, mysql.MySQLErrName[mysql.ErrDivisionByZero]) + // ErrTooBigDisplayWidth is return when display width out of range for column. + ErrTooBigDisplayWidth = terror.ClassTypes.New(mysql.ErrTooBigDisplaywidth, mysql.MySQLErrName[mysql.ErrTooBigDisplaywidth]) + // ErrTooBigFieldLength is return when column length too big for column. + ErrTooBigFieldLength = terror.ClassTypes.New(mysql.ErrTooBigFieldlength, mysql.MySQLErrName[mysql.ErrTooBigFieldlength]) + // ErrTooBigSet is returned when too many strings for column. + ErrTooBigSet = terror.ClassTypes.New(mysql.ErrTooBigSet, mysql.MySQLErrName[mysql.ErrTooBigSet]) + // ErrTooBigScale is returned when type DECIMAL/NUMERIC scale is bigger than mysql.MaxDecimalScale. + ErrTooBigScale = terror.ClassTypes.New(mysql.ErrTooBigScale, mysql.MySQLErrName[mysql.ErrTooBigScale]) + // ErrTooBigPrecision is returned when type DECIMAL/NUMERIC precision is bigger than mysql.MaxDecimalWidth + ErrTooBigPrecision = terror.ClassTypes.New(mysql.ErrTooBigPrecision, mysql.MySQLErrName[mysql.ErrTooBigPrecision]) + // ErrBadNumber is return when parsing an invalid binary decimal number. + ErrBadNumber = terror.ClassTypes.New(mysql.ErrBadNumber, mysql.MySQLErrName[mysql.ErrBadNumber]) + // ErrInvalidFieldSize is returned when the precision of a column is out of range. + ErrInvalidFieldSize = terror.ClassTypes.New(mysql.ErrInvalidFieldSize, mysql.MySQLErrName[mysql.ErrInvalidFieldSize]) + // ErrMBiggerThanD is returned when precision less than the scale. + ErrMBiggerThanD = terror.ClassTypes.New(mysql.ErrMBiggerThanD, mysql.MySQLErrName[mysql.ErrMBiggerThanD]) + // ErrWarnDataOutOfRange is returned when the value in a numeric column that is outside the permissible range of the column data type. + // See https://dev.mysql.com/doc/refman/5.5/en/out-of-range-and-overflow.html for details + ErrWarnDataOutOfRange = terror.ClassTypes.New(mysql.ErrWarnDataOutOfRange, mysql.MySQLErrName[mysql.ErrWarnDataOutOfRange]) + // ErrDuplicatedValueInType is returned when enum column has duplicated value. + ErrDuplicatedValueInType = terror.ClassTypes.New(mysql.ErrDuplicatedValueInType, mysql.MySQLErrName[mysql.ErrDuplicatedValueInType]) + // ErrDatetimeFunctionOverflow is returned when the calculation in datetime function cause overflow. + ErrDatetimeFunctionOverflow = terror.ClassTypes.New(mysql.ErrDatetimeFunctionOverflow, mysql.MySQLErrName[mysql.ErrDatetimeFunctionOverflow]) + // ErrCastAsSignedOverflow is returned when positive out-of-range integer, and convert to it's negative complement. + ErrCastAsSignedOverflow = terror.ClassTypes.New(mysql.ErrCastAsSignedOverflow, mysql.MySQLErrName[mysql.ErrCastAsSignedOverflow]) + // ErrCastNegIntAsUnsigned is returned when a negative integer be casted to an unsigned int. + ErrCastNegIntAsUnsigned = terror.ClassTypes.New(mysql.ErrCastNegIntAsUnsigned, mysql.MySQLErrName[mysql.ErrCastNegIntAsUnsigned]) + // ErrInvalidYearFormat is returned when the input is not a valid year format. + ErrInvalidYearFormat = terror.ClassTypes.New(mysql.ErrInvalidYearFormat, mysql.MySQLErrName[mysql.ErrInvalidYearFormat]) + // ErrInvalidYear is returned when the input value is not a valid year. + ErrInvalidYear = terror.ClassTypes.New(mysql.ErrInvalidYear, mysql.MySQLErrName[mysql.ErrInvalidYear]) + // ErrTruncatedWrongVal is returned when data has been truncated during conversion. + ErrTruncatedWrongVal = terror.ClassTypes.New(mysql.ErrTruncatedWrongValue, mysql.MySQLErrName[mysql.ErrTruncatedWrongValue]) + // ErrInvalidWeekModeFormat is returned when the week mode is wrong. + ErrInvalidWeekModeFormat = terror.ClassTypes.New(mysql.ErrInvalidWeekModeFormat, mysql.MySQLErrName[mysql.ErrInvalidWeekModeFormat]) + // ErrWrongValue is returned when the input value is in wrong format. + ErrWrongValue = terror.ClassTypes.New(mysql.ErrWrongValue, mysql.MySQLErrName[mysql.ErrWrongValue]) +) + +func init() { + typesMySQLErrCodes := map[terror.ErrCode]uint16{ + mysql.ErrInvalidDefault: mysql.ErrInvalidDefault, + mysql.ErrDataTooLong: mysql.ErrDataTooLong, + mysql.ErrIllegalValueForType: mysql.ErrIllegalValueForType, + mysql.WarnDataTruncated: mysql.WarnDataTruncated, + mysql.ErrDataOutOfRange: mysql.ErrDataOutOfRange, + mysql.ErrDivisionByZero: mysql.ErrDivisionByZero, + mysql.ErrTooBigDisplaywidth: mysql.ErrTooBigDisplaywidth, + mysql.ErrTooBigFieldlength: mysql.ErrTooBigFieldlength, + mysql.ErrTooBigSet: mysql.ErrTooBigSet, + mysql.ErrTooBigScale: mysql.ErrTooBigScale, + mysql.ErrTooBigPrecision: mysql.ErrTooBigPrecision, + mysql.ErrBadNumber: mysql.ErrBadNumber, + mysql.ErrInvalidFieldSize: mysql.ErrInvalidFieldSize, + mysql.ErrMBiggerThanD: mysql.ErrMBiggerThanD, + mysql.ErrWarnDataOutOfRange: mysql.ErrWarnDataOutOfRange, + mysql.ErrDuplicatedValueInType: mysql.ErrDuplicatedValueInType, + mysql.ErrDatetimeFunctionOverflow: mysql.ErrDatetimeFunctionOverflow, + mysql.ErrCastAsSignedOverflow: mysql.ErrCastAsSignedOverflow, + mysql.ErrCastNegIntAsUnsigned: mysql.ErrCastNegIntAsUnsigned, + mysql.ErrInvalidYearFormat: mysql.ErrInvalidYearFormat, + mysql.ErrInvalidYear: mysql.ErrInvalidYear, + mysql.ErrTruncatedWrongValue: mysql.ErrTruncatedWrongValue, + mysql.ErrInvalidTimeFormat: mysql.ErrInvalidTimeFormat, + mysql.ErrInvalidWeekModeFormat: mysql.ErrInvalidWeekModeFormat, + mysql.ErrWrongValue: mysql.ErrWrongValue, + } + terror.ErrClassToMySQLCodes[terror.ClassTypes] = typesMySQLErrCodes +} diff --git a/types/errors_test.go b/types/errors_test.go new file mode 100644 index 0000000..ea7e7e2 --- /dev/null +++ b/types/errors_test.go @@ -0,0 +1,57 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" +) + +type testErrorSuite struct{} + +var _ = Suite(testErrorSuite{}) + +func (s testErrorSuite) TestError(c *C) { + kvErrs := []*terror.Error{ + ErrInvalidDefault, + ErrDataTooLong, + ErrIllegalValueForType, + ErrTruncated, + ErrOverflow, + ErrDivByZero, + ErrTooBigDisplayWidth, + ErrTooBigFieldLength, + ErrTooBigSet, + ErrTooBigScale, + ErrTooBigPrecision, + ErrBadNumber, + ErrInvalidFieldSize, + ErrMBiggerThanD, + ErrWarnDataOutOfRange, + ErrDuplicatedValueInType, + ErrDatetimeFunctionOverflow, + ErrCastAsSignedOverflow, + ErrCastNegIntAsUnsigned, + ErrInvalidYearFormat, + ErrInvalidYear, + ErrTruncatedWrongVal, + ErrInvalidWeekModeFormat, + ErrWrongValue, + } + for _, err := range kvErrs { + code := err.ToSQLError().Code + c.Assert(code != mysql.ErrUnknown && code == uint16(err.Code()), IsTrue, Commentf("err: %v", err)) + } +} diff --git a/types/etc.go b/types/etc.go new file mode 100644 index 0000000..ee732e1 --- /dev/null +++ b/types/etc.go @@ -0,0 +1,133 @@ +// Copyright 2014 The ql Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSES/QL-LICENSE file. + +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "io" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/parser/charset" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/opcode" + "github.com/pingcap/tidb/parser/terror" + ast "github.com/pingcap/tidb/parser/types" +) + +// IsTypeBlob returns a boolean indicating whether the tp is a blob type. +var IsTypeBlob = ast.IsTypeBlob + +// IsTypeChar returns a boolean indicating +// whether the tp is the char type like a string type or a varchar type. +var IsTypeChar = ast.IsTypeChar + +// IsTypeVarchar returns a boolean indicating +// whether the tp is the varchar type like a varstring type or a varchar type. +func IsTypeVarchar(tp byte) bool { + return tp == mysql.TypeVarString || tp == mysql.TypeVarchar +} + +// IsTypeUnspecified returns a boolean indicating whether the tp is the Unspecified type. +func IsTypeUnspecified(tp byte) bool { + return tp == mysql.TypeUnspecified +} + +// IsTypePrefixable returns a boolean indicating +// whether an index on a column with the tp can be defined with a prefix. +func IsTypePrefixable(tp byte) bool { + return IsTypeBlob(tp) || IsTypeChar(tp) +} + +// IsTypeNumeric returns a boolean indicating whether the tp is numeric type. +func IsTypeNumeric(tp byte) bool { + switch tp { + case mysql.TypeBit, mysql.TypeTiny, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong, mysql.TypeNewDecimal, + mysql.TypeDecimal, mysql.TypeFloat, mysql.TypeDouble, mysql.TypeShort: + return true + } + return false +} + +// IsBinaryStr returns a boolean indicating +// whether the field type is a binary string type. +func IsBinaryStr(ft *FieldType) bool { + return ft.Collate == charset.CollationBin && IsString(ft.Tp) +} + +// IsNonBinaryStr returns a boolean indicating +// whether the field type is a non-binary string type. +func IsNonBinaryStr(ft *FieldType) bool { + if ft.Collate != charset.CollationBin && IsString(ft.Tp) { + return true + } + return false +} + +// IsString returns a boolean indicating +// whether the field type is a string type. +func IsString(tp byte) bool { + return IsTypeChar(tp) || IsTypeBlob(tp) || IsTypeVarchar(tp) || IsTypeUnspecified(tp) +} + +var kind2Str = map[byte]string{ + KindNull: "null", + KindInt64: "bigint", + KindUint64: "unsigned bigint", + KindFloat32: "float", + KindFloat64: "double", + KindString: "char", + KindBytes: "bytes", + KindInterface: "interface", + KindMinNotNull: "min_not_null", + KindMaxValue: "max_value", + KindRaw: "raw", +} + +// TypeStr converts tp to a string. +var TypeStr = ast.TypeStr + +// KindStr converts kind to a string. +func KindStr(kind byte) (r string) { + return kind2Str[kind] +} + +// TypeToStr converts a field to a string. +// It is used for converting Text to Blob, +// or converting Char to Binary. +// Args: +// tp: type enum +// cs: charset +var TypeToStr = ast.TypeToStr + +// EOFAsNil filtrates errors, +// If err is equal to io.EOF returns nil. +func EOFAsNil(err error) error { + if terror.ErrorEqual(err, io.EOF) { + return nil + } + return errors.Trace(err) +} + +// InvOp2 returns an invalid operation error. +func InvOp2(x, y interface{}, o opcode.Op) (interface{}, error) { + return nil, errors.Errorf("Invalid operation: %v %v %v (mismatched types %T and %T)", x, o, y, x, y) +} + +// overflow returns an overflowed error. +func overflow(v interface{}, tp byte) error { + return ErrOverflow.GenWithStack("constant %v overflows %s", v, TypeStr(tp)) +} diff --git a/types/etc_test.go b/types/etc_test.go new file mode 100644 index 0000000..c3e34e4 --- /dev/null +++ b/types/etc_test.go @@ -0,0 +1,292 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "io" + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/parser/charset" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/util/testleak" +) + +func TestT(t *testing.T) { + TestingT(t) +} + +var _ = Suite(&testTypeEtcSuite{}) + +type testTypeEtcSuite struct { +} + +func testIsTypeBlob(c *C, tp byte, expect bool) { + v := IsTypeBlob(tp) + c.Assert(v, Equals, expect) +} + +func testIsTypeChar(c *C, tp byte, expect bool) { + v := IsTypeChar(tp) + c.Assert(v, Equals, expect) +} + +func (s *testTypeEtcSuite) TestIsType(c *C) { + defer testleak.AfterTest(c)() + testIsTypeBlob(c, mysql.TypeTinyBlob, true) + testIsTypeBlob(c, mysql.TypeMediumBlob, true) + testIsTypeBlob(c, mysql.TypeBlob, true) + testIsTypeBlob(c, mysql.TypeLongBlob, true) + testIsTypeBlob(c, mysql.TypeInt24, false) + + testIsTypeChar(c, mysql.TypeString, true) + testIsTypeChar(c, mysql.TypeVarchar, true) + testIsTypeChar(c, mysql.TypeLong, false) +} + +func testTypeStr(c *C, tp byte, expect string) { + v := TypeStr(tp) + c.Assert(v, Equals, expect) +} + +func testTypeToStr(c *C, tp byte, charset string, expect string) { + v := TypeToStr(tp, charset) + c.Assert(v, Equals, expect) +} + +func (s *testTypeEtcSuite) TestTypeToStr(c *C) { + defer testleak.AfterTest(c)() + testTypeStr(c, mysql.TypeYear, "year") + testTypeStr(c, 0xdd, "") + + testTypeToStr(c, mysql.TypeBlob, "utf8", "text") + testTypeToStr(c, mysql.TypeLongBlob, "utf8", "longtext") + testTypeToStr(c, mysql.TypeTinyBlob, "utf8", "tinytext") + testTypeToStr(c, mysql.TypeMediumBlob, "utf8", "mediumtext") + testTypeToStr(c, mysql.TypeVarchar, "binary", "varbinary") + testTypeToStr(c, mysql.TypeString, "binary", "binary") + testTypeToStr(c, mysql.TypeTiny, "binary", "tinyint") + testTypeToStr(c, mysql.TypeBlob, "binary", "blob") + testTypeToStr(c, mysql.TypeLongBlob, "binary", "longblob") + testTypeToStr(c, mysql.TypeTinyBlob, "binary", "tinyblob") + testTypeToStr(c, mysql.TypeMediumBlob, "binary", "mediumblob") + testTypeToStr(c, mysql.TypeVarchar, "utf8", "varchar") + testTypeToStr(c, mysql.TypeString, "utf8", "char") + testTypeToStr(c, mysql.TypeShort, "binary", "smallint") + testTypeToStr(c, mysql.TypeInt24, "binary", "mediumint") + testTypeToStr(c, mysql.TypeLong, "binary", "int") + testTypeToStr(c, mysql.TypeLonglong, "binary", "bigint") + testTypeToStr(c, mysql.TypeFloat, "binary", "float") + testTypeToStr(c, mysql.TypeDouble, "binary", "double") + testTypeToStr(c, mysql.TypeYear, "binary", "year") + testTypeToStr(c, mysql.TypeDuration, "binary", "time") + testTypeToStr(c, mysql.TypeDatetime, "binary", "datetime") + testTypeToStr(c, mysql.TypeDate, "binary", "date") + testTypeToStr(c, mysql.TypeTimestamp, "binary", "timestamp") + testTypeToStr(c, mysql.TypeNewDecimal, "binary", "decimal") + testTypeToStr(c, mysql.TypeUnspecified, "binary", "unspecified") + testTypeToStr(c, 0xdd, "binary", "") + testTypeToStr(c, mysql.TypeBit, "binary", "bit") + testTypeToStr(c, mysql.TypeEnum, "binary", "enum") + testTypeToStr(c, mysql.TypeSet, "binary", "set") +} + +func (s *testTypeEtcSuite) TestEOFAsNil(c *C) { + defer testleak.AfterTest(c)() + err := EOFAsNil(io.EOF) + c.Assert(err, IsNil) + err = EOFAsNil(errors.New("test")) + c.Assert(err, ErrorMatches, "test") +} + +func (s *testTypeEtcSuite) TestMaxFloat(c *C) { + defer testleak.AfterTest(c)() + tbl := []struct { + Flen int + Decimal int + Expect float64 + }{ + {3, 2, 9.99}, + {5, 2, 999.99}, + {10, 1, 999999999.9}, + {5, 5, 0.99999}, + } + + for _, t := range tbl { + f := GetMaxFloat(t.Flen, t.Decimal) + c.Assert(f, Equals, t.Expect) + } +} + +func (s *testTypeEtcSuite) TestRoundFloat(c *C) { + defer testleak.AfterTest(c)() + tbl := []struct { + Input float64 + Expect float64 + }{ + {2.5, 3}, + {1.5, 2}, + {0.5, 1}, + {0.49999999999999997, 0}, + {0, 0}, + {-0.49999999999999997, 0}, + {-0.5, -1}, + {-2.5, -3}, + {-1.5, -2}, + } + + for _, t := range tbl { + f := RoundFloat(t.Input) + c.Assert(f, Equals, t.Expect) + } +} + +func (s *testTypeEtcSuite) TestRound(c *C) { + defer testleak.AfterTest(c)() + tbl := []struct { + Input float64 + Dec int + Expect float64 + }{ + {-1.23, 0, -1}, + {-1.58, 0, -2}, + {1.58, 0, 2}, + {1.298, 1, 1.3}, + {1.298, 0, 1}, + {23.298, -1, 20}, + } + + for _, t := range tbl { + f := Round(t.Input, t.Dec) + c.Assert(f, Equals, t.Expect) + } +} + +func (s *testTypeEtcSuite) TestTruncate(c *C) { + defer testleak.AfterTest(c)() + tbl := []struct { + Input float64 + Flen int + Decimal int + Expect float64 + Err error + }{ + {100.114, 10, 2, 100.11, nil}, + {100.115, 10, 2, 100.12, nil}, + {100.1156, 10, 3, 100.116, nil}, + {100.1156, 3, 1, 99.9, ErrOverflow}, + {1.36, 10, 2, 1.36, nil}, + } + for _, t := range tbl { + f, err := TruncateFloat(t.Input, t.Flen, t.Decimal) + c.Assert(f, Equals, t.Expect) + c.Assert(terror.ErrorEqual(err, t.Err), IsTrue, Commentf("err %v", err)) + } +} + +func (s *testTypeEtcSuite) TestIsBinaryStr(c *C) { + defer testleak.AfterTest(c)() + in := FieldType{ + Tp: mysql.TypeBit, + Flag: mysql.UnsignedFlag, + Flen: 1, + Decimal: 0, + Charset: charset.CharsetUTF8, + Collate: charset.CollationUTF8, + } + in.Collate = charset.CollationUTF8 + res := IsBinaryStr(&in) + c.Assert(res, Equals, false) + + in.Collate = charset.CollationBin + res = IsBinaryStr(&in) + c.Assert(res, Equals, false) + + in.Tp = mysql.TypeBlob + res = IsBinaryStr(&in) + c.Assert(res, Equals, true) +} + +func (s *testTypeEtcSuite) TestIsNonBinaryStr(c *C) { + defer testleak.AfterTest(c)() + in := FieldType{ + Tp: mysql.TypeBit, + Flag: mysql.UnsignedFlag, + Flen: 1, + Decimal: 0, + Charset: charset.CharsetUTF8, + Collate: charset.CollationUTF8, + } + + in.Collate = charset.CollationBin + res := IsBinaryStr(&in) + c.Assert(res, Equals, false) + + in.Collate = charset.CollationUTF8 + res = IsBinaryStr(&in) + c.Assert(res, Equals, false) + + in.Tp = mysql.TypeBlob + res = IsBinaryStr(&in) + c.Assert(res, Equals, false) +} + +func (s *testTypeEtcSuite) TestIsTypePrefixable(c *C) { + defer testleak.AfterTest(c)() + + res := IsTypePrefixable('t') + c.Assert(res, Equals, false) + + res = IsTypePrefixable(mysql.TypeBlob) + c.Assert(res, Equals, true) +} + +func (s *testTypeEtcSuite) TestIsTypeNumeric(c *C) { + defer testleak.AfterTest(c)() + + res := IsTypeNumeric(mysql.TypeBit) + c.Assert(res, Equals, true) + + res = IsTypeNumeric(mysql.TypeTiny) + c.Assert(res, Equals, true) + + res = IsTypeNumeric(mysql.TypeInt24) + c.Assert(res, Equals, true) + + res = IsTypeNumeric(mysql.TypeLong) + c.Assert(res, Equals, true) + + res = IsTypeNumeric(mysql.TypeLonglong) + c.Assert(res, Equals, true) + + res = IsTypeNumeric(mysql.TypeNewDecimal) + c.Assert(res, Equals, true) + + res = IsTypeNumeric(mysql.TypeDecimal) + c.Assert(res, Equals, true) + + res = IsTypeNumeric(mysql.TypeFloat) + c.Assert(res, Equals, true) + + res = IsTypeNumeric(mysql.TypeDouble) + c.Assert(res, Equals, true) + + res = IsTypeNumeric(mysql.TypeShort) + c.Assert(res, Equals, true) + + res = IsTypeNumeric('t') + c.Assert(res, Equals, false) +} diff --git a/types/eval_type.go b/types/eval_type.go new file mode 100644 index 0000000..fc46736 --- /dev/null +++ b/types/eval_type.go @@ -0,0 +1,28 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ast "github.com/pingcap/tidb/parser/types" + +// EvalType indicates the specified types that arguments and result of a built-in function should be. +type EvalType = ast.EvalType + +const ( + // ETInt represents type INT in evaluation. + ETInt = ast.ETInt + // ETReal represents type REAL in evaluation. + ETReal = ast.ETReal + // ETString represents type STRING in evaluation. + ETString = ast.ETString +) diff --git a/types/field_name.go b/types/field_name.go new file mode 100644 index 0000000..04547d1 --- /dev/null +++ b/types/field_name.go @@ -0,0 +1,62 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "strings" + + "github.com/pingcap/tidb/parser/model" +) + +// FieldName records the names used for mysql protocol. +type FieldName struct { + OrigTblName model.CIStr + OrigColName model.CIStr + DBName model.CIStr + TblName model.CIStr + ColName model.CIStr + + Hidden bool +} + +const emptyName = "EMPTY_NAME" + +// String implements Stringer interface. +func (name *FieldName) String() string { + builder := strings.Builder{} + if name.Hidden { + return emptyName + } + if name.DBName.L != "" { + builder.WriteString(name.DBName.L + ".") + } + if name.TblName.L != "" { + builder.WriteString(name.TblName.L + ".") + } + builder.WriteString(name.ColName.L) + return builder.String() +} + +// NameSlice is the slice of the *fieldName +type NameSlice []*FieldName + +// Shallow is a shallow copy, only making a new slice. +func (s NameSlice) Shallow() NameSlice { + ret := make(NameSlice, len(s)) + copy(ret, s) + return ret +} + +// EmptyName is to occupy the position in the name slice. If it's set, that column's name is hidden. +var EmptyName = &FieldName{Hidden: true} diff --git a/types/field_type.go b/types/field_type.go new file mode 100644 index 0000000..75e9bf9 --- /dev/null +++ b/types/field_type.go @@ -0,0 +1,1177 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "strconv" + + "github.com/pingcap/tidb/parser/charset" + "github.com/pingcap/tidb/parser/mysql" + ast "github.com/pingcap/tidb/parser/types" + utilMath "github.com/pingcap/tidb/util/math" +) + +// UnspecifiedLength is unspecified length. +const ( + UnspecifiedLength = -1 +) + +// FieldType records field type information. +type FieldType = ast.FieldType + +// NewFieldType returns a FieldType, +// with a type and other information about field type. +func NewFieldType(tp byte) *FieldType { + return &FieldType{ + Tp: tp, + Flen: UnspecifiedLength, + Decimal: UnspecifiedLength, + } +} + +// AggFieldType aggregates field types for a multi-argument function like `IF`, `IFNULL`, `COALESCE` +// whose return type is determined by the arguments' FieldTypes. +// Aggregation is performed by MergeFieldType function. +func AggFieldType(tps []*FieldType) *FieldType { + var currType FieldType + for i, t := range tps { + if i == 0 && currType.Tp == mysql.TypeUnspecified { + currType = *t + continue + } + mtp := MergeFieldType(currType.Tp, t.Tp) + currType.Tp = mtp + } + + return &currType +} + +// AggregateEvalType aggregates arguments' EvalType of a multi-argument function. +func AggregateEvalType(fts []*FieldType, flag *uint) EvalType { + var ( + aggregatedEvalType = ETString + unsigned bool + gotFirst bool + gotBinString bool + ) + lft := fts[0] + for _, ft := range fts { + if ft.Tp == mysql.TypeNull { + continue + } + et := ft.EvalType() + rft := ft + if (IsTypeBlob(ft.Tp) || IsTypeVarchar(ft.Tp) || IsTypeChar(ft.Tp)) && mysql.HasBinaryFlag(ft.Flag) { + gotBinString = true + } + if !gotFirst { + gotFirst = true + aggregatedEvalType = et + unsigned = mysql.HasUnsignedFlag(ft.Flag) + } else { + aggregatedEvalType = mergeEvalType(aggregatedEvalType, et, lft, rft, unsigned, mysql.HasUnsignedFlag(ft.Flag)) + unsigned = unsigned && mysql.HasUnsignedFlag(ft.Flag) + } + lft = rft + } + setTypeFlag(flag, mysql.UnsignedFlag, unsigned) + setTypeFlag(flag, mysql.BinaryFlag, !aggregatedEvalType.IsStringKind() || gotBinString) + return aggregatedEvalType +} + +func mergeEvalType(lhs, rhs EvalType, lft, rft *FieldType, isLHSUnsigned, isRHSUnsigned bool) EvalType { + if lft.Tp == mysql.TypeUnspecified || rft.Tp == mysql.TypeUnspecified { + if lft.Tp == rft.Tp { + return ETString + } + if lft.Tp == mysql.TypeUnspecified { + lhs = rhs + } else { + rhs = lhs + } + } + if lhs.IsStringKind() || rhs.IsStringKind() { + return ETString + } else if lhs == ETReal || rhs == ETReal { + return ETReal + } + return ETInt +} + +func setTypeFlag(flag *uint, flagItem uint, on bool) { + if on { + *flag |= flagItem + } else { + *flag &= ^flagItem + } +} + +// DefaultParamTypeForValue returns the default FieldType for the parameterized value. +func DefaultParamTypeForValue(value interface{}, tp *FieldType) { + switch value.(type) { + case nil: + tp.Tp = mysql.TypeVarString + tp.Flen = UnspecifiedLength + tp.Decimal = UnspecifiedLength + default: + DefaultTypeForValue(value, tp) + if hasVariantFieldLength(tp) { + tp.Flen = UnspecifiedLength + } + if tp.Tp == mysql.TypeUnspecified { + tp.Tp = mysql.TypeVarString + } + } +} + +func hasVariantFieldLength(tp *FieldType) bool { + switch tp.Tp { + case mysql.TypeLonglong, mysql.TypeVarString, mysql.TypeDouble, mysql.TypeBlob, + mysql.TypeBit, mysql.TypeDuration, mysql.TypeNewDecimal, mysql.TypeEnum, mysql.TypeSet: + return true + } + return false +} + +// DefaultTypeForValue returns the default FieldType for the value. +func DefaultTypeForValue(value interface{}, tp *FieldType) { + switch x := value.(type) { + case nil: + tp.Tp = mysql.TypeNull + tp.Flen = 0 + tp.Decimal = 0 + SetBinChsClnFlag(tp) + case bool: + tp.Tp = mysql.TypeLonglong + tp.Flen = 1 + tp.Decimal = 0 + tp.Flag |= mysql.IsBooleanFlag + SetBinChsClnFlag(tp) + case int: + tp.Tp = mysql.TypeLonglong + tp.Flen = utilMath.StrLenOfInt64Fast(int64(x)) + tp.Decimal = 0 + SetBinChsClnFlag(tp) + case int64: + tp.Tp = mysql.TypeLonglong + tp.Flen = utilMath.StrLenOfInt64Fast(x) + tp.Decimal = 0 + SetBinChsClnFlag(tp) + case uint64: + tp.Tp = mysql.TypeLonglong + tp.Flag |= mysql.UnsignedFlag + tp.Flen = utilMath.StrLenOfUint64Fast(x) + tp.Decimal = 0 + SetBinChsClnFlag(tp) + case string: + tp.Tp = mysql.TypeVarString + // TODO: tp.Flen should be len(x) * 3 (max bytes length of CharsetUTF8) + tp.Flen = len(x) + tp.Decimal = UnspecifiedLength + tp.Charset, tp.Collate = charset.GetDefaultCharsetAndCollate() + case float32: + tp.Tp = mysql.TypeFloat + s := strconv.FormatFloat(float64(x), 'f', -1, 32) + tp.Flen = len(s) + tp.Decimal = UnspecifiedLength + SetBinChsClnFlag(tp) + case float64: + tp.Tp = mysql.TypeDouble + s := strconv.FormatFloat(x, 'f', -1, 64) + tp.Flen = len(s) + tp.Decimal = UnspecifiedLength + SetBinChsClnFlag(tp) + case []byte: + tp.Tp = mysql.TypeBlob + tp.Flen = len(x) + tp.Decimal = UnspecifiedLength + SetBinChsClnFlag(tp) + default: + tp.Tp = mysql.TypeUnspecified + tp.Flen = UnspecifiedLength + tp.Decimal = UnspecifiedLength + } +} + +// DefaultCharsetForType returns the default charset/collation for mysql type. +func DefaultCharsetForType(tp byte) (string, string) { + switch tp { + case mysql.TypeVarString, mysql.TypeString, mysql.TypeVarchar: + // Default charset for string types is utf8. + return mysql.DefaultCharset, mysql.DefaultCollationName + } + return charset.CharsetBin, charset.CollationBin +} + +// MergeFieldType merges two MySQL type to a new type. +// This is used in hybrid field type expression. +// For example "select case c when 1 then 2 when 2 then 'tidb' from t;" +// The result field type of the case expression is the merged type of the two when clause. +// See https://github.com/mysql/mysql-server/blob/5.7/sql/field.cc#L1042 +func MergeFieldType(a byte, b byte) byte { + ia := getFieldTypeIndex(a) + ib := getFieldTypeIndex(b) + return fieldTypeMergeRules[ia][ib] +} + +func getFieldTypeIndex(tp byte) int { + itp := int(tp) + if itp < fieldTypeTearFrom { + return itp + } + return fieldTypeTearFrom + itp - fieldTypeTearTo - 1 +} + +const ( + fieldTypeTearFrom = int(mysql.TypeBit) + 1 + fieldTypeTearTo = int(mysql.TypeJSON) - 1 + fieldTypeNum = fieldTypeTearFrom + (255 - fieldTypeTearTo) +) + +var fieldTypeMergeRules = [fieldTypeNum][fieldTypeNum]byte{ + /* mysql.TypeDecimal -> */ + { + //mysql.TypeDecimal mysql.TypeTiny + mysql.TypeNewDecimal, mysql.TypeNewDecimal, + //mysql.TypeShort mysql.TypeLong + mysql.TypeNewDecimal, mysql.TypeNewDecimal, + //mysql.TypeFloat mysql.TypeDouble + mysql.TypeDouble, mysql.TypeDouble, + //mysql.TypeNull mysql.TypeTimestamp + mysql.TypeNewDecimal, mysql.TypeVarchar, + //mysql.TypeLonglong mysql.TypeInt24 + mysql.TypeDecimal, mysql.TypeDecimal, + //mysql.TypeDate mysql.TypeTime + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeDatetime mysql.TypeYear + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeNewDate mysql.TypeVarchar + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeBit <16>-<244> + mysql.TypeVarchar, + //mysql.TypeJSON + mysql.TypeVarchar, + //mysql.TypeNewDecimal mysql.TypeEnum + mysql.TypeNewDecimal, mysql.TypeVarchar, + //mysql.TypeSet mysql.TypeTinyBlob + mysql.TypeVarchar, mysql.TypeTinyBlob, + //mysql.TypeMediumBlob mysql.TypeLongBlob + mysql.TypeMediumBlob, mysql.TypeLongBlob, + //mysql.TypeBlob mysql.TypeVarString + mysql.TypeBlob, mysql.TypeVarchar, + //mysql.TypeString mysql.TypeGeometry + mysql.TypeString, mysql.TypeVarchar, + }, + /* mysql.TypeTiny -> */ + { + //mysql.TypeDecimal mysql.TypeTiny + mysql.TypeNewDecimal, mysql.TypeTiny, + //mysql.TypeShort mysql.TypeLong + mysql.TypeShort, mysql.TypeLong, + //mysql.TypeFloat mysql.TypeDouble + mysql.TypeFloat, mysql.TypeDouble, + //mysql.TypeNull mysql.TypeTimestamp + mysql.TypeTiny, mysql.TypeVarchar, + //mysql.TypeLonglong mysql.TypeInt24 + mysql.TypeLonglong, mysql.TypeInt24, + //mysql.TypeDate mysql.TypeTime + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeDatetime mysql.TypeYear + mysql.TypeVarchar, mysql.TypeTiny, + //mysql.TypeNewDate mysql.TypeVarchar + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeBit <16>-<244> + mysql.TypeVarchar, + //mysql.TypeJSON + mysql.TypeVarchar, + //mysql.TypeNewDecimal mysql.TypeEnum + mysql.TypeNewDecimal, mysql.TypeVarchar, + //mysql.TypeSet mysql.TypeTinyBlob + mysql.TypeVarchar, mysql.TypeTinyBlob, + //mysql.TypeMediumBlob mysql.TypeLongBlob + mysql.TypeMediumBlob, mysql.TypeLongBlob, + //mysql.TypeBlob mysql.TypeVarString + mysql.TypeBlob, mysql.TypeVarchar, + //mysql.TypeString mysql.TypeGeometry + mysql.TypeString, mysql.TypeVarchar, + }, + /* mysql.TypeShort -> */ + { + //mysql.TypeDecimal mysql.TypeTiny + mysql.TypeNewDecimal, mysql.TypeShort, + //mysql.TypeShort mysql.TypeLong + mysql.TypeShort, mysql.TypeLong, + //mysql.TypeFloat mysql.TypeDouble + mysql.TypeFloat, mysql.TypeDouble, + //mysql.TypeNull mysql.TypeTimestamp + mysql.TypeShort, mysql.TypeVarchar, + //mysql.TypeLonglong mysql.TypeInt24 + mysql.TypeLonglong, mysql.TypeInt24, + //mysql.TypeDate mysql.TypeTime + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeDatetime mysql.TypeYear + mysql.TypeVarchar, mysql.TypeShort, + //mysql.TypeNewDate mysql.TypeVarchar + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeBit <16>-<244> + mysql.TypeVarchar, + //mysql.TypeJSON + mysql.TypeVarchar, + //mysql.TypeNewDecimal mysql.TypeEnum + mysql.TypeNewDecimal, mysql.TypeVarchar, + //mysql.TypeSet mysql.TypeTinyBlob + mysql.TypeVarchar, mysql.TypeTinyBlob, + //mysql.TypeMediumBlob mysql.TypeLongBlob + mysql.TypeMediumBlob, mysql.TypeLongBlob, + //mysql.TypeBlob mysql.TypeVarString + mysql.TypeBlob, mysql.TypeVarchar, + //mysql.TypeString mysql.TypeGeometry + mysql.TypeString, mysql.TypeVarchar, + }, + /* mysql.TypeLong -> */ + { + //mysql.TypeDecimal mysql.TypeTiny + mysql.TypeNewDecimal, mysql.TypeLong, + //mysql.TypeShort mysql.TypeLong + mysql.TypeLong, mysql.TypeLong, + //mysql.TypeFloat mysql.TypeDouble + mysql.TypeDouble, mysql.TypeDouble, + //mysql.TypeNull mysql.TypeTimestamp + mysql.TypeLong, mysql.TypeVarchar, + //mysql.TypeLonglong mysql.TypeInt24 + mysql.TypeLonglong, mysql.TypeLong, + //mysql.TypeDate mysql.TypeTime + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeDatetime mysql.TypeYear + mysql.TypeVarchar, mysql.TypeLong, + //mysql.TypeNewDate mysql.TypeVarchar + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeBit <16>-<244> + mysql.TypeVarchar, + //mysql.TypeJSON + mysql.TypeVarchar, + //mysql.TypeNewDecimal mysql.TypeEnum + mysql.TypeNewDecimal, mysql.TypeVarchar, + //mysql.TypeSet mysql.TypeTinyBlob + mysql.TypeVarchar, mysql.TypeTinyBlob, + //mysql.TypeMediumBlob mysql.TypeLongBlob + mysql.TypeMediumBlob, mysql.TypeLongBlob, + //mysql.TypeBlob mysql.TypeVarString + mysql.TypeBlob, mysql.TypeVarchar, + //mysql.TypeString mysql.TypeGeometry + mysql.TypeString, mysql.TypeVarchar, + }, + /* mysql.TypeFloat -> */ + { + //mysql.TypeDecimal mysql.TypeTiny + mysql.TypeDouble, mysql.TypeFloat, + //mysql.TypeShort mysql.TypeLong + mysql.TypeFloat, mysql.TypeDouble, + //mysql.TypeFloat mysql.TypeDouble + mysql.TypeFloat, mysql.TypeDouble, + //mysql.TypeNull mysql.TypeTimestamp + mysql.TypeFloat, mysql.TypeVarchar, + //mysql.TypeLonglong mysql.TypeInt24 + mysql.TypeFloat, mysql.TypeFloat, + //mysql.TypeDate mysql.TypeTime + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeDatetime mysql.TypeYear + mysql.TypeVarchar, mysql.TypeFloat, + //mysql.TypeNewDate mysql.TypeVarchar + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeBit <16>-<244> + mysql.TypeVarchar, + //mysql.TypeJSON + mysql.TypeVarchar, + //mysql.TypeNewDecimal mysql.TypeEnum + mysql.TypeDouble, mysql.TypeVarchar, + //mysql.TypeSet mysql.TypeTinyBlob + mysql.TypeVarchar, mysql.TypeTinyBlob, + //mysql.TypeMediumBlob mysql.TypeLongBlob + mysql.TypeMediumBlob, mysql.TypeLongBlob, + //mysql.TypeBlob mysql.TypeVarString + mysql.TypeBlob, mysql.TypeVarchar, + //mysql.TypeString mysql.TypeGeometry + mysql.TypeString, mysql.TypeVarchar, + }, + /* mysql.TypeDouble -> */ + { + //mysql.TypeDecimal mysql.TypeTiny + mysql.TypeDouble, mysql.TypeDouble, + //mysql.TypeShort mysql.TypeLong + mysql.TypeDouble, mysql.TypeDouble, + //mysql.TypeFloat mysql.TypeDouble + mysql.TypeDouble, mysql.TypeDouble, + //mysql.TypeNull mysql.TypeTimestamp + mysql.TypeDouble, mysql.TypeVarchar, + //mysql.TypeLonglong mysql.TypeInt24 + mysql.TypeDouble, mysql.TypeDouble, + //mysql.TypeDate mysql.TypeTime + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeDatetime mysql.TypeYear + mysql.TypeVarchar, mysql.TypeDouble, + //mysql.TypeNewDate mysql.TypeVarchar + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeBit <16>-<244> + mysql.TypeVarchar, + //mysql.TypeJSON + mysql.TypeVarchar, + //mysql.TypeNewDecimal mysql.TypeEnum + mysql.TypeDouble, mysql.TypeVarchar, + //mysql.TypeSet mysql.TypeTinyBlob + mysql.TypeVarchar, mysql.TypeTinyBlob, + //mysql.TypeMediumBlob mysql.TypeLongBlob + mysql.TypeMediumBlob, mysql.TypeLongBlob, + //mysql.TypeBlob mysql.TypeVarString + mysql.TypeBlob, mysql.TypeVarchar, + //mysql.TypeString mysql.TypeGeometry + mysql.TypeString, mysql.TypeVarchar, + }, + /* mysql.TypeNull -> */ + { + //mysql.TypeDecimal mysql.TypeTiny + mysql.TypeNewDecimal, mysql.TypeTiny, + //mysql.TypeShort mysql.TypeLong + mysql.TypeShort, mysql.TypeLong, + //mysql.TypeFloat mysql.TypeDouble + mysql.TypeFloat, mysql.TypeDouble, + //mysql.TypeNull mysql.TypeTimestamp + mysql.TypeNull, mysql.TypeTimestamp, + //mysql.TypeLonglong mysql.TypeInt24 + mysql.TypeLonglong, mysql.TypeLonglong, + //mysql.TypeDate mysql.TypeTime + mysql.TypeDate, mysql.TypeDuration, + //mysql.TypeDatetime mysql.TypeYear + mysql.TypeDatetime, mysql.TypeYear, + //mysql.TypeNewDate mysql.TypeVarchar + mysql.TypeNewDate, mysql.TypeVarchar, + //mysql.TypeBit <16>-<244> + mysql.TypeBit, + //mysql.TypeJSON + mysql.TypeJSON, + //mysql.TypeNewDecimal mysql.TypeEnum + mysql.TypeNewDecimal, mysql.TypeEnum, + //mysql.TypeSet mysql.TypeTinyBlob + mysql.TypeSet, mysql.TypeTinyBlob, + //mysql.TypeMediumBlob mysql.TypeLongBlob + mysql.TypeMediumBlob, mysql.TypeLongBlob, + //mysql.TypeBlob mysql.TypeVarString + mysql.TypeBlob, mysql.TypeVarchar, + //mysql.TypeString mysql.TypeGeometry + mysql.TypeString, mysql.TypeGeometry, + }, + /* mysql.TypeTimestamp -> */ + { + //mysql.TypeDecimal mysql.TypeTiny + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeShort mysql.TypeLong + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeFloat mysql.TypeDouble + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeNull mysql.TypeTimestamp + mysql.TypeTimestamp, mysql.TypeTimestamp, + //mysql.TypeLonglong mysql.TypeInt24 + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeDate mysql.TypeTime + mysql.TypeDatetime, mysql.TypeDatetime, + //mysql.TypeDatetime mysql.TypeYear + mysql.TypeDatetime, mysql.TypeVarchar, + //mysql.TypeNewDate mysql.TypeVarchar + mysql.TypeNewDate, mysql.TypeVarchar, + //mysql.TypeBit <16>-<244> + mysql.TypeVarchar, + //mysql.TypeJSON + mysql.TypeVarchar, + //mysql.TypeNewDecimal mysql.TypeEnum + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeSet mysql.TypeTinyBlob + mysql.TypeVarchar, mysql.TypeTinyBlob, + //mysql.TypeMediumBlob mysql.TypeLongBlob + mysql.TypeMediumBlob, mysql.TypeLongBlob, + //mysql.TypeBlob mysql.TypeVarString + mysql.TypeBlob, mysql.TypeVarchar, + //mysql.TypeString mysql.TypeGeometry + mysql.TypeString, mysql.TypeVarchar, + }, + /* mysql.TypeLonglong -> */ + { + //mysql.TypeDecimal mysql.TypeTiny + mysql.TypeNewDecimal, mysql.TypeLonglong, + //mysql.TypeShort mysql.TypeLong + mysql.TypeLonglong, mysql.TypeLonglong, + //mysql.TypeFloat mysql.TypeDouble + mysql.TypeDouble, mysql.TypeDouble, + //mysql.TypeNull mysql.TypeTimestamp + mysql.TypeLonglong, mysql.TypeVarchar, + //mysql.TypeLonglong mysql.TypeInt24 + mysql.TypeLonglong, mysql.TypeLong, + //mysql.TypeDate mysql.TypeTime + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeDatetime mysql.TypeYear + mysql.TypeVarchar, mysql.TypeLonglong, + //mysql.TypeNewDate mysql.TypeVarchar + mysql.TypeNewDate, mysql.TypeVarchar, + //mysql.TypeBit <16>-<244> + mysql.TypeVarchar, + //mysql.TypeJSON + mysql.TypeVarchar, + //mysql.TypeNewDecimal mysql.TypeEnum + mysql.TypeNewDecimal, mysql.TypeVarchar, + //mysql.TypeSet mysql.TypeTinyBlob + mysql.TypeVarchar, mysql.TypeTinyBlob, + //mysql.TypeMediumBlob mysql.TypeLongBlob + mysql.TypeMediumBlob, mysql.TypeLongBlob, + //mysql.TypeBlob mysql.TypeVarString + mysql.TypeBlob, mysql.TypeVarchar, + //mysql.TypeString mysql.TypeGeometry + mysql.TypeString, mysql.TypeVarchar, + }, + /* mysql.TypeInt24 -> */ + { + //mysql.TypeDecimal mysql.TypeTiny + mysql.TypeNewDecimal, mysql.TypeInt24, + //mysql.TypeShort mysql.TypeLong + mysql.TypeInt24, mysql.TypeLong, + //mysql.TypeFloat mysql.TypeDouble + mysql.TypeFloat, mysql.TypeDouble, + //mysql.TypeNull mysql.TypeTimestamp + mysql.TypeInt24, mysql.TypeVarchar, + //mysql.TypeLonglong mysql.TypeInt24 + mysql.TypeLonglong, mysql.TypeInt24, + //mysql.TypeDate mysql.TypeTime + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeDatetime mysql.TypeYear + mysql.TypeVarchar, mysql.TypeInt24, + //mysql.TypeNewDate mysql.TypeVarchar + mysql.TypeNewDate, mysql.TypeVarchar, + //mysql.TypeBit <16>-<244> + mysql.TypeVarchar, + //mysql.TypeJSON + mysql.TypeVarchar, + //mysql.TypeNewDecimal mysql.TypeEnum + mysql.TypeNewDecimal, mysql.TypeVarchar, + //mysql.TypeSet mysql.TypeTinyBlob + mysql.TypeVarchar, mysql.TypeTinyBlob, + //mysql.TypeMediumBlob mysql.TypeLongBlob + mysql.TypeMediumBlob, mysql.TypeLongBlob, + //mysql.TypeBlob mysql.TypeVarString + mysql.TypeBlob, mysql.TypeVarchar, + //mysql.TypeString mysql.TypeGeometry + mysql.TypeString, mysql.TypeVarchar, + }, + /* mysql.TypeDate -> */ + { + //mysql.TypeDecimal mysql.TypeTiny + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeShort mysql.TypeLong + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeFloat mysql.TypeDouble + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeNull mysql.TypeTimestamp + mysql.TypeDate, mysql.TypeDatetime, + //mysql.TypeLonglong mysql.TypeInt24 + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeDate mysql.TypeTime + mysql.TypeDate, mysql.TypeDatetime, + //mysql.TypeDatetime mysql.TypeYear + mysql.TypeDatetime, mysql.TypeVarchar, + //mysql.TypeNewDate mysql.TypeVarchar + mysql.TypeNewDate, mysql.TypeVarchar, + //mysql.TypeBit <16>-<244> + mysql.TypeVarchar, + //mysql.TypeJSON + mysql.TypeVarchar, + //mysql.TypeNewDecimal mysql.TypeEnum + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeSet mysql.TypeTinyBlob + mysql.TypeVarchar, mysql.TypeTinyBlob, + //mysql.TypeMediumBlob mysql.TypeLongBlob + mysql.TypeMediumBlob, mysql.TypeLongBlob, + //mysql.TypeBlob mysql.TypeVarString + mysql.TypeBlob, mysql.TypeVarchar, + //mysql.TypeString mysql.TypeGeometry + mysql.TypeString, mysql.TypeVarchar, + }, + /* mysql.TypeTime -> */ + { + //mysql.TypeDecimal mysql.TypeTiny + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeShort mysql.TypeLong + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeFloat mysql.TypeDouble + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeNull mysql.TypeTimestamp + mysql.TypeDuration, mysql.TypeDatetime, + //mysql.TypeLonglong mysql.TypeInt24 + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeDate mysql.TypeTime + mysql.TypeDatetime, mysql.TypeDuration, + //mysql.TypeDatetime mysql.TypeYear + mysql.TypeDatetime, mysql.TypeVarchar, + //mysql.TypeNewDate mysql.TypeVarchar + mysql.TypeNewDate, mysql.TypeVarchar, + //mysql.TypeBit <16>-<244> + mysql.TypeVarchar, + //mysql.TypeJSON + mysql.TypeVarchar, + //mysql.TypeNewDecimal mysql.TypeEnum + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeSet mysql.TypeTinyBlob + mysql.TypeVarchar, mysql.TypeTinyBlob, + //mysql.TypeMediumBlob mysql.TypeLongBlob + mysql.TypeMediumBlob, mysql.TypeLongBlob, + //mysql.TypeBlob mysql.TypeVarString + mysql.TypeBlob, mysql.TypeVarchar, + //mysql.TypeString mysql.TypeGeometry + mysql.TypeString, mysql.TypeVarchar, + }, + /* mysql.TypeDatetime -> */ + { + //mysql.TypeDecimal mysql.TypeTiny + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeShort mysql.TypeLong + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeFloat mysql.TypeDouble + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeNull mysql.TypeTimestamp + mysql.TypeDatetime, mysql.TypeDatetime, + //mysql.TypeLonglong mysql.TypeInt24 + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeDate mysql.TypeTime + mysql.TypeDatetime, mysql.TypeDatetime, + //mysql.TypeDatetime mysql.TypeYear + mysql.TypeDatetime, mysql.TypeVarchar, + //mysql.TypeNewDate mysql.TypeVarchar + mysql.TypeNewDate, mysql.TypeVarchar, + //mysql.TypeBit <16>-<244> + mysql.TypeVarchar, + //mysql.TypeJSON + mysql.TypeVarchar, + //mysql.TypeNewDecimal mysql.TypeEnum + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeSet mysql.TypeTinyBlob + mysql.TypeVarchar, mysql.TypeTinyBlob, + //mysql.TypeMediumBlob mysql.TypeLongBlob + mysql.TypeMediumBlob, mysql.TypeLongBlob, + //mysql.TypeBlob mysql.TypeVarString + mysql.TypeBlob, mysql.TypeVarchar, + //mysql.TypeString mysql.TypeGeometry + mysql.TypeString, mysql.TypeVarchar, + }, + /* mysql.TypeYear -> */ + { + //mysql.TypeDecimal mysql.TypeTiny + mysql.TypeDecimal, mysql.TypeTiny, + //mysql.TypeShort mysql.TypeLong + mysql.TypeShort, mysql.TypeLong, + //mysql.TypeFloat mysql.TypeDouble + mysql.TypeFloat, mysql.TypeDouble, + //mysql.TypeNull mysql.TypeTimestamp + mysql.TypeYear, mysql.TypeVarchar, + //mysql.TypeLonglong mysql.TypeInt24 + mysql.TypeLonglong, mysql.TypeInt24, + //mysql.TypeDate mysql.TypeTime + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeDatetime mysql.TypeYear + mysql.TypeVarchar, mysql.TypeYear, + //mysql.TypeNewDate mysql.TypeVarchar + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeBit <16>-<244> + mysql.TypeVarchar, + //mysql.TypeJSON + mysql.TypeVarchar, + //mysql.TypeNewDecimal mysql.TypeEnum + mysql.TypeNewDecimal, mysql.TypeVarchar, + //mysql.TypeSet mysql.TypeTinyBlob + mysql.TypeVarchar, mysql.TypeTinyBlob, + //mysql.TypeMediumBlob mysql.TypeLongBlob + mysql.TypeMediumBlob, mysql.TypeLongBlob, + //mysql.TypeBlob mysql.TypeVarString + mysql.TypeBlob, mysql.TypeVarchar, + //mysql.TypeString mysql.TypeGeometry + mysql.TypeString, mysql.TypeVarchar, + }, + /* mysql.TypeNewDate -> */ + { + //mysql.TypeDecimal mysql.TypeTiny + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeShort mysql.TypeLong + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeFloat mysql.TypeDouble + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeNull mysql.TypeTimestamp + mysql.TypeNewDate, mysql.TypeDatetime, + //mysql.TypeLonglong mysql.TypeInt24 + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeDate mysql.TypeTime + mysql.TypeNewDate, mysql.TypeDatetime, + //mysql.TypeDatetime mysql.TypeYear + mysql.TypeDatetime, mysql.TypeVarchar, + //mysql.TypeNewDate mysql.TypeVarchar + mysql.TypeNewDate, mysql.TypeVarchar, + //mysql.TypeBit <16>-<244> + mysql.TypeVarchar, + //mysql.TypeJSON + mysql.TypeVarchar, + //mysql.TypeNewDecimal mysql.TypeEnum + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeSet mysql.TypeTinyBlob + mysql.TypeVarchar, mysql.TypeTinyBlob, + //mysql.TypeMediumBlob mysql.TypeLongBlob + mysql.TypeMediumBlob, mysql.TypeLongBlob, + //mysql.TypeBlob mysql.TypeVarString + mysql.TypeBlob, mysql.TypeVarchar, + //mysql.TypeString mysql.TypeGeometry + mysql.TypeString, mysql.TypeVarchar, + }, + /* mysql.TypeVarchar -> */ + { + //mysql.TypeDecimal mysql.TypeTiny + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeShort mysql.TypeLong + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeFloat mysql.TypeDouble + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeNull mysql.TypeTimestamp + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeLonglong mysql.TypeInt24 + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeDate mysql.TypeTime + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeDatetime mysql.TypeYear + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeNewDate mysql.TypeVarchar + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeBit <16>-<244> + mysql.TypeVarchar, + //mysql.TypeJSON + mysql.TypeVarchar, + //mysql.TypeNewDecimal mysql.TypeEnum + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeSet mysql.TypeTinyBlob + mysql.TypeVarchar, mysql.TypeTinyBlob, + //mysql.TypeMediumBlob mysql.TypeLongBlob + mysql.TypeMediumBlob, mysql.TypeLongBlob, + //mysql.TypeBlob mysql.TypeVarString + mysql.TypeBlob, mysql.TypeVarchar, + //mysql.TypeString mysql.TypeGeometry + mysql.TypeVarchar, mysql.TypeVarchar, + }, + /* mysql.TypeBit -> */ + { + //mysql.TypeDecimal mysql.TypeTiny + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeShort mysql.TypeLong + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeFloat mysql.TypeDouble + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeNull mysql.TypeTimestamp + mysql.TypeBit, mysql.TypeVarchar, + //mysql.TypeLonglong mysql.TypeInt24 + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeDate mysql.TypeTime + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeDatetime mysql.TypeYear + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeNewDate mysql.TypeVarchar + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeBit <16>-<244> + mysql.TypeBit, + //mysql.TypeJSON + mysql.TypeVarchar, + //mysql.TypeNewDecimal mysql.TypeEnum + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeSet mysql.TypeTinyBlob + mysql.TypeVarchar, mysql.TypeTinyBlob, + //mysql.TypeMediumBlob mysql.TypeLongBlob + mysql.TypeMediumBlob, mysql.TypeLongBlob, + //mysql.TypeBlob mysql.TypeVarString + mysql.TypeBlob, mysql.TypeVarchar, + //mysql.TypeString mysql.TypeGeometry + mysql.TypeString, mysql.TypeVarchar, + }, + /* mysql.TypeJSON -> */ + { + //mysql.TypeDecimal mysql.TypeTiny + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeShort mysql.TypeLong + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeNewFloat mysql.TypeDouble + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeNull mysql.TypeTimestamp + mysql.TypeJSON, mysql.TypeVarchar, + //mysql.TypeLongLONG mysql.TypeInt24 + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeDate MYSQL_TYPE_TIME + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeDatetime MYSQL_TYPE_YEAR + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeNewDate mysql.TypeVarchar + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeBit <16>-<244> + mysql.TypeVarchar, + //mysql.TypeJSON + mysql.TypeJSON, + //mysql.TypeNewDecimal MYSQL_TYPE_ENUM + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeSet mysql.TypeTinyBlob + mysql.TypeVarchar, mysql.TypeLongBlob, + //mysql.TypeMediumBlob mysql.TypeLongBlob + mysql.TypeLongBlob, mysql.TypeLongBlob, + //mysql.TypeBlob mysql.TypeVarString + mysql.TypeLongBlob, mysql.TypeVarchar, + //mysql.TypeString MYSQL_TYPE_GEOMETRY + mysql.TypeString, mysql.TypeVarchar, + }, + /* mysql.TypeNewDecimal -> */ + { + //mysql.TypeDecimal mysql.TypeTiny + mysql.TypeNewDecimal, mysql.TypeNewDecimal, + //mysql.TypeShort mysql.TypeLong + mysql.TypeNewDecimal, mysql.TypeNewDecimal, + //mysql.TypeFloat mysql.TypeDouble + mysql.TypeDouble, mysql.TypeDouble, + //mysql.TypeNull mysql.TypeTimestamp + mysql.TypeNewDecimal, mysql.TypeVarchar, + //mysql.TypeLonglong mysql.TypeInt24 + mysql.TypeNewDecimal, mysql.TypeNewDecimal, + //mysql.TypeDate mysql.TypeTime + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeDatetime mysql.TypeYear + mysql.TypeVarchar, mysql.TypeNewDecimal, + //mysql.TypeNewDate mysql.TypeVarchar + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeBit <16>-<244> + mysql.TypeVarchar, + //mysql.TypeJSON + mysql.TypeVarchar, + //mysql.TypeNewDecimal mysql.TypeEnum + mysql.TypeNewDecimal, mysql.TypeVarchar, + //mysql.TypeSet mysql.TypeTinyBlob + mysql.TypeVarchar, mysql.TypeTinyBlob, + //mysql.TypeMediumBlob mysql.TypeLongBlob + mysql.TypeMediumBlob, mysql.TypeLongBlob, + //mysql.TypeBlob mysql.TypeVarString + mysql.TypeBlob, mysql.TypeVarchar, + //mysql.TypeString mysql.TypeGeometry + mysql.TypeString, mysql.TypeVarchar, + }, + /* mysql.TypeEnum -> */ + { + //mysql.TypeDecimal mysql.TypeTiny + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeShort mysql.TypeLong + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeFloat mysql.TypeDouble + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeNull mysql.TypeTimestamp + mysql.TypeEnum, mysql.TypeVarchar, + //mysql.TypeLonglong mysql.TypeInt24 + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeDate mysql.TypeTime + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeDatetime mysql.TypeYear + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeNewDate mysql.TypeVarchar + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeBit <16>-<244> + mysql.TypeVarchar, + //mysql.TypeJSON + mysql.TypeVarchar, + //mysql.TypeNewDecimal mysql.TypeEnum + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeSet mysql.TypeTinyBlob + mysql.TypeVarchar, mysql.TypeTinyBlob, + //mysql.TypeMediumBlob mysql.TypeLongBlob + mysql.TypeMediumBlob, mysql.TypeLongBlob, + //mysql.TypeBlob mysql.TypeVarString + mysql.TypeBlob, mysql.TypeVarchar, + //mysql.TypeString mysql.TypeGeometry + mysql.TypeString, mysql.TypeVarchar, + }, + /* mysql.TypeSet -> */ + { + //mysql.TypeDecimal mysql.TypeTiny + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeShort mysql.TypeLong + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeFloat mysql.TypeDouble + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeNull mysql.TypeTimestamp + mysql.TypeSet, mysql.TypeVarchar, + //mysql.TypeLonglong mysql.TypeInt24 + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeDate mysql.TypeTime + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeDatetime mysql.TypeYear + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeNewDate mysql.TypeVarchar + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeBit <16>-<244> + mysql.TypeVarchar, + //mysql.TypeJSON + mysql.TypeVarchar, + //mysql.TypeNewDecimal mysql.TypeEnum + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeSet mysql.TypeTinyBlob + mysql.TypeVarchar, mysql.TypeTinyBlob, + //mysql.TypeMediumBlob mysql.TypeLongBlob + mysql.TypeMediumBlob, mysql.TypeLongBlob, + //mysql.TypeBlob mysql.TypeVarString + mysql.TypeBlob, mysql.TypeVarchar, + //mysql.TypeString mysql.TypeGeometry + mysql.TypeString, mysql.TypeVarchar, + }, + /* mysql.TypeTinyBlob -> */ + { + //mysql.TypeDecimal mysql.TypeTiny + mysql.TypeTinyBlob, mysql.TypeTinyBlob, + //mysql.TypeShort mysql.TypeLong + mysql.TypeTinyBlob, mysql.TypeTinyBlob, + //mysql.TypeFloat mysql.TypeDouble + mysql.TypeTinyBlob, mysql.TypeTinyBlob, + //mysql.TypeNull mysql.TypeTimestamp + mysql.TypeTinyBlob, mysql.TypeTinyBlob, + //mysql.TypeLonglong mysql.TypeInt24 + mysql.TypeTinyBlob, mysql.TypeTinyBlob, + //mysql.TypeDate mysql.TypeTime + mysql.TypeTinyBlob, mysql.TypeTinyBlob, + //mysql.TypeDatetime mysql.TypeYear + mysql.TypeTinyBlob, mysql.TypeTinyBlob, + //mysql.TypeNewDate mysql.TypeVarchar + mysql.TypeTinyBlob, mysql.TypeTinyBlob, + //mysql.TypeBit <16>-<244> + mysql.TypeTinyBlob, + //mysql.TypeJSON + mysql.TypeLongBlob, + //mysql.TypeNewDecimal mysql.TypeEnum + mysql.TypeTinyBlob, mysql.TypeTinyBlob, + //mysql.TypeSet mysql.TypeTinyBlob + mysql.TypeTinyBlob, mysql.TypeTinyBlob, + //mysql.TypeMediumBlob mysql.TypeLongBlob + mysql.TypeMediumBlob, mysql.TypeLongBlob, + //mysql.TypeBlob mysql.TypeVarString + mysql.TypeBlob, mysql.TypeTinyBlob, + //mysql.TypeString mysql.TypeGeometry + mysql.TypeTinyBlob, mysql.TypeTinyBlob, + }, + /* mysql.TypeMediumBlob -> */ + { + //mysql.TypeDecimal mysql.TypeTiny + mysql.TypeMediumBlob, mysql.TypeMediumBlob, + //mysql.TypeShort mysql.TypeLong + mysql.TypeMediumBlob, mysql.TypeMediumBlob, + //mysql.TypeFloat mysql.TypeDouble + mysql.TypeMediumBlob, mysql.TypeMediumBlob, + //mysql.TypeNull mysql.TypeTimestamp + mysql.TypeMediumBlob, mysql.TypeMediumBlob, + //mysql.TypeLonglong mysql.TypeInt24 + mysql.TypeMediumBlob, mysql.TypeMediumBlob, + //mysql.TypeDate mysql.TypeTime + mysql.TypeMediumBlob, mysql.TypeMediumBlob, + //mysql.TypeDatetime mysql.TypeYear + mysql.TypeMediumBlob, mysql.TypeMediumBlob, + //mysql.TypeNewDate mysql.TypeVarchar + mysql.TypeMediumBlob, mysql.TypeMediumBlob, + //mysql.TypeBit <16>-<244> + mysql.TypeMediumBlob, + //mysql.TypeJSON + mysql.TypeLongBlob, + //mysql.TypeNewDecimal mysql.TypeEnum + mysql.TypeMediumBlob, mysql.TypeMediumBlob, + //mysql.TypeSet mysql.TypeTinyBlob + mysql.TypeMediumBlob, mysql.TypeMediumBlob, + //mysql.TypeMediumBlob mysql.TypeLongBlob + mysql.TypeMediumBlob, mysql.TypeLongBlob, + //mysql.TypeBlob mysql.TypeVarString + mysql.TypeMediumBlob, mysql.TypeMediumBlob, + //mysql.TypeString mysql.TypeGeometry + mysql.TypeMediumBlob, mysql.TypeMediumBlob, + }, + /* mysql.TypeLongBlob -> */ + { + //mysql.TypeDecimal mysql.TypeTiny + mysql.TypeLongBlob, mysql.TypeLongBlob, + //mysql.TypeShort mysql.TypeLong + mysql.TypeLongBlob, mysql.TypeLongBlob, + //mysql.TypeFloat mysql.TypeDouble + mysql.TypeLongBlob, mysql.TypeLongBlob, + //mysql.TypeNull mysql.TypeTimestamp + mysql.TypeLongBlob, mysql.TypeLongBlob, + //mysql.TypeLonglong mysql.TypeInt24 + mysql.TypeLongBlob, mysql.TypeLongBlob, + //mysql.TypeDate mysql.TypeTime + mysql.TypeLongBlob, mysql.TypeLongBlob, + //mysql.TypeDatetime mysql.TypeYear + mysql.TypeLongBlob, mysql.TypeLongBlob, + //mysql.TypeNewDate mysql.TypeVarchar + mysql.TypeLongBlob, mysql.TypeLongBlob, + //mysql.TypeBit <16>-<244> + mysql.TypeLongBlob, + //mysql.TypeJSON + mysql.TypeLongBlob, + //mysql.TypeNewDecimal mysql.TypeEnum + mysql.TypeLongBlob, mysql.TypeLongBlob, + //mysql.TypeSet mysql.TypeTinyBlob + mysql.TypeLongBlob, mysql.TypeLongBlob, + //mysql.TypeMediumBlob mysql.TypeLongBlob + mysql.TypeLongBlob, mysql.TypeLongBlob, + //mysql.TypeBlob mysql.TypeVarString + mysql.TypeLongBlob, mysql.TypeLongBlob, + //mysql.TypeString mysql.TypeGeometry + mysql.TypeLongBlob, mysql.TypeLongBlob, + }, + /* mysql.TypeBlob -> */ + { + //mysql.TypeDecimal mysql.TypeTiny + mysql.TypeBlob, mysql.TypeBlob, + //mysql.TypeShort mysql.TypeLong + mysql.TypeBlob, mysql.TypeBlob, + //mysql.TypeFloat mysql.TypeDouble + mysql.TypeBlob, mysql.TypeBlob, + //mysql.TypeNull mysql.TypeTimestamp + mysql.TypeBlob, mysql.TypeBlob, + //mysql.TypeLonglong mysql.TypeInt24 + mysql.TypeBlob, mysql.TypeBlob, + //mysql.TypeDate mysql.TypeTime + mysql.TypeBlob, mysql.TypeBlob, + //mysql.TypeDatetime mysql.TypeYear + mysql.TypeBlob, mysql.TypeBlob, + //mysql.TypeNewDate mysql.TypeVarchar + mysql.TypeBlob, mysql.TypeBlob, + //mysql.TypeBit <16>-<244> + mysql.TypeBlob, + //mysql.TypeJSON + mysql.TypeLongBlob, + //mysql.TypeNewDecimal mysql.TypeEnum + mysql.TypeBlob, mysql.TypeBlob, + //mysql.TypeSet mysql.TypeTinyBlob + mysql.TypeBlob, mysql.TypeBlob, + //mysql.TypeMediumBlob mysql.TypeLongBlob + mysql.TypeMediumBlob, mysql.TypeLongBlob, + //mysql.TypeBlob mysql.TypeVarString + mysql.TypeBlob, mysql.TypeBlob, + //mysql.TypeString mysql.TypeGeometry + mysql.TypeBlob, mysql.TypeBlob, + }, + /* mysql.TypeVarString -> */ + { + //mysql.TypeDecimal mysql.TypeTiny + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeShort mysql.TypeLong + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeFloat mysql.TypeDouble + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeNull mysql.TypeTimestamp + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeLonglong mysql.TypeInt24 + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeDate mysql.TypeTime + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeDatetime mysql.TypeYear + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeNewDate mysql.TypeVarchar + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeBit <16>-<244> + mysql.TypeVarchar, + //mysql.TypeJSON + mysql.TypeVarchar, + //mysql.TypeNewDecimal mysql.TypeEnum + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeSet mysql.TypeTinyBlob + mysql.TypeVarchar, mysql.TypeTinyBlob, + //mysql.TypeMediumBlob mysql.TypeLongBlob + mysql.TypeMediumBlob, mysql.TypeLongBlob, + //mysql.TypeBlob mysql.TypeVarString + mysql.TypeBlob, mysql.TypeVarchar, + //mysql.TypeString mysql.TypeGeometry + mysql.TypeVarchar, mysql.TypeVarchar, + }, + /* mysql.TypeString -> */ + { + //mysql.TypeDecimal mysql.TypeTiny + mysql.TypeString, mysql.TypeString, + //mysql.TypeShort mysql.TypeLong + mysql.TypeString, mysql.TypeString, + //mysql.TypeFloat mysql.TypeDouble + mysql.TypeString, mysql.TypeString, + //mysql.TypeNull mysql.TypeTimestamp + mysql.TypeString, mysql.TypeString, + //mysql.TypeLonglong mysql.TypeInt24 + mysql.TypeString, mysql.TypeString, + //mysql.TypeDate mysql.TypeTime + mysql.TypeString, mysql.TypeString, + //mysql.TypeDatetime mysql.TypeYear + mysql.TypeString, mysql.TypeString, + //mysql.TypeNewDate mysql.TypeVarchar + mysql.TypeString, mysql.TypeVarchar, + //mysql.TypeBit <16>-<244> + mysql.TypeString, + //mysql.TypeJSON + mysql.TypeString, + //mysql.TypeNewDecimal mysql.TypeEnum + mysql.TypeString, mysql.TypeString, + //mysql.TypeSet mysql.TypeTinyBlob + mysql.TypeString, mysql.TypeTinyBlob, + //mysql.TypeMediumBlob mysql.TypeLongBlob + mysql.TypeMediumBlob, mysql.TypeLongBlob, + //mysql.TypeBlob mysql.TypeVarString + mysql.TypeBlob, mysql.TypeVarchar, + //mysql.TypeString mysql.TypeGeometry + mysql.TypeString, mysql.TypeString, + }, + /* mysql.TypeGeometry -> */ + { + //mysql.TypeDecimal mysql.TypeTiny + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeShort mysql.TypeLong + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeFloat mysql.TypeDouble + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeNull mysql.TypeTimestamp + mysql.TypeGeometry, mysql.TypeVarchar, + //mysql.TypeLonglong mysql.TypeInt24 + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeDate mysql.TypeTime + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeDatetime mysql.TypeYear + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeNewDate mysql.TypeVarchar + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeBit <16>-<244> + mysql.TypeVarchar, + //mysql.TypeJSON + mysql.TypeVarchar, + //mysql.TypeNewDecimal mysql.TypeEnum + mysql.TypeVarchar, mysql.TypeVarchar, + //mysql.TypeSet mysql.TypeTinyBlob + mysql.TypeVarchar, mysql.TypeTinyBlob, + //mysql.TypeMediumBlob mysql.TypeLongBlob + mysql.TypeMediumBlob, mysql.TypeLongBlob, + //mysql.TypeBlob mysql.TypeVarString + mysql.TypeBlob, mysql.TypeVarchar, + //mysql.TypeString mysql.TypeGeometry + mysql.TypeString, mysql.TypeGeometry, + }, +} + +// SetBinChsClnFlag sets charset, collation as 'binary' and adds binaryFlag to FieldType. +func SetBinChsClnFlag(ft *FieldType) { + ft.Charset = charset.CharsetBin + ft.Collate = charset.CollationBin + ft.Flag |= mysql.BinaryFlag +} + +// VarStorageLen indicates this column is a variable length column. +const VarStorageLen = ast.VarStorageLen diff --git a/types/field_type_test.go b/types/field_type_test.go new file mode 100644 index 0000000..996645f --- /dev/null +++ b/types/field_type_test.go @@ -0,0 +1,370 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/charset" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/util/testleak" +) + +var _ = Suite(&testFieldTypeSuite{}) + +type testFieldTypeSuite struct { +} + +func (s *testFieldTypeSuite) TestFieldType(c *C) { + defer testleak.AfterTest(c)() + ft := NewFieldType(mysql.TypeDuration) + c.Assert(ft.Flen, Equals, UnspecifiedLength) + c.Assert(ft.Decimal, Equals, UnspecifiedLength) + ft.Decimal = 5 + c.Assert(ft.String(), Equals, "time(5)") + + ft = NewFieldType(mysql.TypeLong) + ft.Flen = 5 + ft.Flag = mysql.UnsignedFlag | mysql.ZerofillFlag + c.Assert(ft.String(), Equals, "int(5) UNSIGNED ZEROFILL") + c.Assert(ft.InfoSchemaStr(), Equals, "int(5) unsigned") + + ft = NewFieldType(mysql.TypeFloat) + ft.Flen = 12 // Default + ft.Decimal = 3 // Not Default + c.Assert(ft.String(), Equals, "float(12,3)") + ft = NewFieldType(mysql.TypeFloat) + ft.Flen = 12 // Default + ft.Decimal = -1 // Default + c.Assert(ft.String(), Equals, "float") + ft = NewFieldType(mysql.TypeFloat) + ft.Flen = 5 // Not Default + ft.Decimal = -1 // Default + c.Assert(ft.String(), Equals, "float") + ft = NewFieldType(mysql.TypeFloat) + ft.Flen = 7 // Not Default + ft.Decimal = 3 // Not Default + c.Assert(ft.String(), Equals, "float(7,3)") + + ft = NewFieldType(mysql.TypeDouble) + ft.Flen = 22 // Default + ft.Decimal = 3 // Not Default + c.Assert(ft.String(), Equals, "double(22,3)") + ft = NewFieldType(mysql.TypeDouble) + ft.Flen = 22 // Default + ft.Decimal = -1 // Default + c.Assert(ft.String(), Equals, "double") + ft = NewFieldType(mysql.TypeDouble) + ft.Flen = 5 // Not Default + ft.Decimal = -1 // Default + c.Assert(ft.String(), Equals, "double") + ft = NewFieldType(mysql.TypeDouble) + ft.Flen = 7 // Not Default + ft.Decimal = 3 // Not Default + c.Assert(ft.String(), Equals, "double(7,3)") + + ft = NewFieldType(mysql.TypeBlob) + ft.Flen = 10 + ft.Charset = "UTF8" + ft.Collate = "UTF8_UNICODE_GI" + c.Assert(ft.String(), Equals, "text CHARACTER SET UTF8 COLLATE UTF8_UNICODE_GI") + + ft = NewFieldType(mysql.TypeVarchar) + ft.Flen = 10 + ft.Flag |= mysql.BinaryFlag + c.Assert(ft.String(), Equals, "varchar(10) BINARY") + + ft = NewFieldType(mysql.TypeString) + ft.Charset = charset.CollationBin + ft.Flag |= mysql.BinaryFlag + c.Assert(ft.String(), Equals, "binary(1)") + + ft = NewFieldType(mysql.TypeEnum) + ft.Elems = []string{"a", "b"} + c.Assert(ft.String(), Equals, "enum('a','b')") + + ft = NewFieldType(mysql.TypeEnum) + ft.Elems = []string{"'a'", "'b'"} + c.Assert(ft.String(), Equals, "enum('''a''','''b''')") + + ft = NewFieldType(mysql.TypeEnum) + ft.Elems = []string{"a\nb", "a\tb", "a\rb"} + c.Assert(ft.String(), Equals, "enum('a\\nb','a\tb','a\\rb')") + + ft = NewFieldType(mysql.TypeEnum) + ft.Elems = []string{"a\nb", "a'\t\r\nb", "a\rb"} + c.Assert(ft.String(), Equals, "enum('a\\nb','a'' \\r\\nb','a\\rb')") + + ft = NewFieldType(mysql.TypeSet) + ft.Elems = []string{"a", "b"} + c.Assert(ft.String(), Equals, "set('a','b')") + + ft = NewFieldType(mysql.TypeSet) + ft.Elems = []string{"'a'", "'b'"} + c.Assert(ft.String(), Equals, "set('''a''','''b''')") + + ft = NewFieldType(mysql.TypeSet) + ft.Elems = []string{"a\nb", "a'\t\r\nb", "a\rb"} + c.Assert(ft.String(), Equals, "set('a\\nb','a'' \\r\\nb','a\\rb')") + + ft = NewFieldType(mysql.TypeSet) + ft.Elems = []string{"a'\nb", "a'b\tc"} + c.Assert(ft.String(), Equals, "set('a''\\nb','a''b c')") + + ft = NewFieldType(mysql.TypeTimestamp) + ft.Flen = 8 + ft.Decimal = 2 + c.Assert(ft.String(), Equals, "timestamp(2)") + ft = NewFieldType(mysql.TypeTimestamp) + ft.Flen = 8 + ft.Decimal = 0 + c.Assert(ft.String(), Equals, "timestamp") + + ft = NewFieldType(mysql.TypeDatetime) + ft.Flen = 8 + ft.Decimal = 2 + c.Assert(ft.String(), Equals, "datetime(2)") + ft = NewFieldType(mysql.TypeDatetime) + ft.Flen = 8 + ft.Decimal = 0 + c.Assert(ft.String(), Equals, "datetime") + + ft = NewFieldType(mysql.TypeDate) + ft.Flen = 8 + ft.Decimal = 2 + c.Assert(ft.String(), Equals, "date") + ft = NewFieldType(mysql.TypeDate) + ft.Flen = 8 + ft.Decimal = 0 + c.Assert(ft.String(), Equals, "date") + + ft = NewFieldType(mysql.TypeYear) + ft.Flen = 4 + ft.Decimal = 0 + c.Assert(ft.String(), Equals, "year(4)") + ft = NewFieldType(mysql.TypeYear) + ft.Flen = 2 + ft.Decimal = 2 + c.Assert(ft.String(), Equals, "year(2)") // Note: Invalid year. +} + +func (s *testFieldTypeSuite) TestDefaultTypeForValue(c *C) { + defer testleak.AfterTest(c)() + tests := []struct { + value interface{} + tp byte + flen int + decimal int + charset string + collation string + flag uint + }{ + {nil, mysql.TypeNull, 0, 0, charset.CharsetBin, charset.CharsetBin, mysql.BinaryFlag}, + {1, mysql.TypeLonglong, 1, 0, charset.CharsetBin, charset.CharsetBin, mysql.BinaryFlag}, + {0, mysql.TypeLonglong, 1, 0, charset.CharsetBin, charset.CharsetBin, mysql.BinaryFlag}, + {432, mysql.TypeLonglong, 3, 0, charset.CharsetBin, charset.CharsetBin, mysql.BinaryFlag}, + {4321, mysql.TypeLonglong, 4, 0, charset.CharsetBin, charset.CharsetBin, mysql.BinaryFlag}, + {1234567, mysql.TypeLonglong, 7, 0, charset.CharsetBin, charset.CharsetBin, mysql.BinaryFlag}, + {12345678, mysql.TypeLonglong, 8, 0, charset.CharsetBin, charset.CharsetBin, mysql.BinaryFlag}, + {12345678901234567, mysql.TypeLonglong, 17, 0, charset.CharsetBin, charset.CharsetBin, mysql.BinaryFlag}, + {-42, mysql.TypeLonglong, 3, 0, charset.CharsetBin, charset.CharsetBin, mysql.BinaryFlag}, + {uint64(1), mysql.TypeLonglong, 1, 0, charset.CharsetBin, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag}, + {uint64(123), mysql.TypeLonglong, 3, 0, charset.CharsetBin, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag}, + {uint64(1234), mysql.TypeLonglong, 4, 0, charset.CharsetBin, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag}, + {uint64(1234567), mysql.TypeLonglong, 7, 0, charset.CharsetBin, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag}, + {uint64(12345678), mysql.TypeLonglong, 8, 0, charset.CharsetBin, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag}, + {uint64(12345678901234567), mysql.TypeLonglong, 17, 0, charset.CharsetBin, charset.CharsetBin, mysql.BinaryFlag | mysql.UnsignedFlag}, + {"abc", mysql.TypeVarString, 3, UnspecifiedLength, charset.CharsetUTF8MB4, charset.CollationUTF8MB4, 0}, + {1.1, mysql.TypeDouble, 3, -1, charset.CharsetBin, charset.CharsetBin, mysql.BinaryFlag}, + {[]byte("abc"), mysql.TypeBlob, 3, UnspecifiedLength, charset.CharsetBin, charset.CharsetBin, mysql.BinaryFlag}, + } + for _, tt := range tests { + var ft FieldType + DefaultTypeForValue(tt.value, &ft) + c.Assert(ft.Tp, Equals, tt.tp, Commentf("%v %v", ft.Tp, tt.tp)) + c.Assert(ft.Flen, Equals, tt.flen, Commentf("%v %v", ft.Flen, tt.flen)) + c.Assert(ft.Charset, Equals, tt.charset, Commentf("%v %v", ft.Charset, tt.charset)) + c.Assert(ft.Decimal, Equals, tt.decimal, Commentf("%v %v", ft.Decimal, tt.decimal)) + c.Assert(ft.Collate, Equals, tt.collation, Commentf("%v %v", ft.Collate, tt.collation)) + c.Assert(ft.Flag, Equals, tt.flag, Commentf("%v %v", ft.Flag, tt.flag)) + } +} + +func (s *testFieldTypeSuite) TestAggFieldType(c *C) { + defer testleak.AfterTest(c)() + fts := []*FieldType{ + NewFieldType(mysql.TypeDecimal), + NewFieldType(mysql.TypeTiny), + NewFieldType(mysql.TypeShort), + NewFieldType(mysql.TypeLong), + NewFieldType(mysql.TypeFloat), + NewFieldType(mysql.TypeDouble), + NewFieldType(mysql.TypeNull), + NewFieldType(mysql.TypeTimestamp), + NewFieldType(mysql.TypeLonglong), + NewFieldType(mysql.TypeInt24), + NewFieldType(mysql.TypeDate), + NewFieldType(mysql.TypeDuration), + NewFieldType(mysql.TypeDatetime), + NewFieldType(mysql.TypeYear), + NewFieldType(mysql.TypeNewDate), + NewFieldType(mysql.TypeVarchar), + NewFieldType(mysql.TypeBit), + NewFieldType(mysql.TypeNewDecimal), + NewFieldType(mysql.TypeEnum), + NewFieldType(mysql.TypeSet), + NewFieldType(mysql.TypeTinyBlob), + NewFieldType(mysql.TypeMediumBlob), + NewFieldType(mysql.TypeLongBlob), + NewFieldType(mysql.TypeBlob), + NewFieldType(mysql.TypeVarString), + NewFieldType(mysql.TypeString), + NewFieldType(mysql.TypeGeometry), + } + + for i := range fts { + aggTp := AggFieldType(fts[i : i+1]) + c.Assert(aggTp.Tp, Equals, fts[i].Tp) + + aggTp = AggFieldType([]*FieldType{fts[i], fts[i]}) + switch fts[i].Tp { + case mysql.TypeDate: + c.Assert(aggTp.Tp, Equals, mysql.TypeDate) + case mysql.TypeEnum, mysql.TypeSet, mysql.TypeVarString: + c.Assert(aggTp.Tp, Equals, mysql.TypeVarchar) + case mysql.TypeDecimal: + c.Assert(aggTp.Tp, Equals, mysql.TypeNewDecimal) + default: + c.Assert(aggTp.Tp, Equals, fts[i].Tp) + } + + aggTp = AggFieldType([]*FieldType{fts[i], NewFieldType(mysql.TypeLong)}) + switch fts[i].Tp { + case mysql.TypeTiny, mysql.TypeShort, mysql.TypeLong, + mysql.TypeYear, mysql.TypeInt24, mysql.TypeNull: + c.Assert(aggTp.Tp, Equals, mysql.TypeLong) + case mysql.TypeLonglong: + c.Assert(aggTp.Tp, Equals, mysql.TypeLonglong) + case mysql.TypeFloat, mysql.TypeDouble: + c.Assert(aggTp.Tp, Equals, mysql.TypeDouble) + case mysql.TypeTimestamp, mysql.TypeDate, mysql.TypeDuration, + mysql.TypeDatetime, mysql.TypeNewDate, mysql.TypeVarchar, + mysql.TypeBit, mysql.TypeEnum, mysql.TypeSet, + mysql.TypeVarString, mysql.TypeGeometry: + c.Assert(aggTp.Tp, Equals, mysql.TypeVarchar) + case mysql.TypeString: + c.Assert(aggTp.Tp, Equals, mysql.TypeString) + case mysql.TypeDecimal, mysql.TypeNewDecimal: + c.Assert(aggTp.Tp, Equals, mysql.TypeNewDecimal) + case mysql.TypeTinyBlob: + c.Assert(aggTp.Tp, Equals, mysql.TypeTinyBlob) + case mysql.TypeBlob: + c.Assert(aggTp.Tp, Equals, mysql.TypeBlob) + case mysql.TypeMediumBlob: + c.Assert(aggTp.Tp, Equals, mysql.TypeMediumBlob) + case mysql.TypeLongBlob: + c.Assert(aggTp.Tp, Equals, mysql.TypeLongBlob) + } + } +} + +func (s *testFieldTypeSuite) TestAggregateEvalType(c *C) { + defer testleak.AfterTest(c)() + fts := []*FieldType{ + NewFieldType(mysql.TypeDecimal), + NewFieldType(mysql.TypeTiny), + NewFieldType(mysql.TypeShort), + NewFieldType(mysql.TypeLong), + NewFieldType(mysql.TypeFloat), + NewFieldType(mysql.TypeDouble), + NewFieldType(mysql.TypeNull), + NewFieldType(mysql.TypeTimestamp), + NewFieldType(mysql.TypeLonglong), + NewFieldType(mysql.TypeInt24), + NewFieldType(mysql.TypeDate), + NewFieldType(mysql.TypeDuration), + NewFieldType(mysql.TypeDatetime), + NewFieldType(mysql.TypeYear), + NewFieldType(mysql.TypeNewDate), + NewFieldType(mysql.TypeVarchar), + NewFieldType(mysql.TypeBit), + NewFieldType(mysql.TypeNewDecimal), + NewFieldType(mysql.TypeEnum), + NewFieldType(mysql.TypeSet), + NewFieldType(mysql.TypeTinyBlob), + NewFieldType(mysql.TypeMediumBlob), + NewFieldType(mysql.TypeLongBlob), + NewFieldType(mysql.TypeBlob), + NewFieldType(mysql.TypeVarString), + NewFieldType(mysql.TypeString), + NewFieldType(mysql.TypeGeometry), + } + + for i := range fts { + var flag uint + aggregatedEvalType := AggregateEvalType(fts[i:i+1], &flag) + switch fts[i].Tp { + case mysql.TypeDecimal, mysql.TypeNull, mysql.TypeTimestamp, mysql.TypeDate, + mysql.TypeDuration, mysql.TypeDatetime, mysql.TypeNewDate, mysql.TypeVarchar, + mysql.TypeEnum, mysql.TypeSet, mysql.TypeTinyBlob, + mysql.TypeMediumBlob, mysql.TypeLongBlob, mysql.TypeBlob, + mysql.TypeVarString, mysql.TypeString, mysql.TypeGeometry: + c.Assert(aggregatedEvalType.IsStringKind(), IsTrue) + c.Assert(flag, Equals, uint(0)) + case mysql.TypeTiny, mysql.TypeShort, mysql.TypeLong, mysql.TypeLonglong, mysql.TypeBit, + mysql.TypeInt24, mysql.TypeYear: + c.Assert(aggregatedEvalType, Equals, ETInt) + c.Assert(flag, Equals, uint(mysql.BinaryFlag)) + case mysql.TypeFloat, mysql.TypeDouble: + c.Assert(aggregatedEvalType, Equals, ETReal) + c.Assert(flag, Equals, uint(mysql.BinaryFlag)) + } + + flag = 0 + aggregatedEvalType = AggregateEvalType([]*FieldType{fts[i], fts[i]}, &flag) + switch fts[i].Tp { + case mysql.TypeDecimal, mysql.TypeNull, mysql.TypeTimestamp, mysql.TypeDate, + mysql.TypeDuration, mysql.TypeDatetime, mysql.TypeNewDate, mysql.TypeVarchar, + mysql.TypeEnum, mysql.TypeSet, mysql.TypeTinyBlob, + mysql.TypeMediumBlob, mysql.TypeLongBlob, mysql.TypeBlob, + mysql.TypeVarString, mysql.TypeString, mysql.TypeGeometry: + c.Assert(aggregatedEvalType.IsStringKind(), IsTrue) + c.Assert(flag, Equals, uint(0)) + case mysql.TypeTiny, mysql.TypeShort, mysql.TypeLong, mysql.TypeLonglong, mysql.TypeBit, + mysql.TypeInt24, mysql.TypeYear: + c.Assert(aggregatedEvalType, Equals, ETInt) + c.Assert(flag, Equals, uint(mysql.BinaryFlag)) + case mysql.TypeFloat, mysql.TypeDouble: + c.Assert(aggregatedEvalType, Equals, ETReal) + c.Assert(flag, Equals, uint(mysql.BinaryFlag)) + } + flag = 0 + aggregatedEvalType = AggregateEvalType([]*FieldType{fts[i], NewFieldType(mysql.TypeLong)}, &flag) + switch fts[i].Tp { + case mysql.TypeTimestamp, mysql.TypeDate, mysql.TypeDuration, + mysql.TypeDatetime, mysql.TypeNewDate, mysql.TypeVarchar, + mysql.TypeEnum, mysql.TypeSet, mysql.TypeTinyBlob, mysql.TypeMediumBlob, + mysql.TypeLongBlob, mysql.TypeBlob, mysql.TypeVarString, + mysql.TypeString, mysql.TypeGeometry: + c.Assert(aggregatedEvalType.IsStringKind(), IsTrue) + c.Assert(flag, Equals, uint(0)) + case mysql.TypeDecimal, mysql.TypeTiny, mysql.TypeShort, mysql.TypeLong, mysql.TypeNull, mysql.TypeBit, + mysql.TypeLonglong, mysql.TypeYear, mysql.TypeInt24: + c.Assert(aggregatedEvalType, Equals, ETInt) + c.Assert(flag, Equals, uint(mysql.BinaryFlag)) + case mysql.TypeFloat, mysql.TypeDouble: + c.Assert(aggregatedEvalType, Equals, ETReal) + c.Assert(flag, Equals, uint(mysql.BinaryFlag)) + } + } +} diff --git a/types/fsp.go b/types/fsp.go new file mode 100644 index 0000000..1059c9d --- /dev/null +++ b/types/fsp.go @@ -0,0 +1,100 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "math" + "strconv" + "strings" + + "github.com/pingcap/errors" +) + +const ( + // UnspecifiedFsp is the unspecified fractional seconds part. + UnspecifiedFsp = int8(-1) + // MaxFsp is the maximum digit of fractional seconds part. + MaxFsp = int8(6) + // MinFsp is the minimum digit of fractional seconds part. + MinFsp = int8(0) + // DefaultFsp is the default digit of fractional seconds part. + // MySQL use 0 as the default Fsp. + DefaultFsp = int8(0) +) + +// CheckFsp checks whether fsp is in valid range. +func CheckFsp(fsp int) (int8, error) { + if fsp == int(UnspecifiedFsp) { + return DefaultFsp, nil + } + if fsp < int(MinFsp) || fsp > int(MaxFsp) { + return DefaultFsp, errors.Errorf("Invalid fsp %d", fsp) + } + return int8(fsp), nil +} + +// ParseFrac parses the input string according to fsp, returns the microsecond, +// and also a bool value to indice overflow. eg: +// "999" fsp=2 will overflow. +func ParseFrac(s string, fsp int8) (v int, overflow bool, err error) { + if len(s) == 0 { + return 0, false, nil + } + + fsp, err = CheckFsp(int(fsp)) + if err != nil { + return 0, false, errors.Trace(err) + } + + if int(fsp) >= len(s) { + tmp, e := strconv.ParseInt(s, 10, 64) + if e != nil { + return 0, false, errors.Trace(e) + } + v = int(float64(tmp) * math.Pow10(int(MaxFsp)-len(s))) + return + } + + // Round when fsp < string length. + tmp, e := strconv.ParseInt(s[:fsp+1], 10, 64) + if e != nil { + return 0, false, errors.Trace(e) + } + tmp = (tmp + 5) / 10 + + if float64(tmp) >= math.Pow10(int(fsp)) { + // overflow + return 0, true, nil + } + + // Get the final frac, with 6 digit number + // 1236 round 3 -> 124 -> 124000 + // 0312 round 2 -> 3 -> 30000 + // 999 round 2 -> 100 -> overflow + v = int(float64(tmp) * math.Pow10(int(MaxFsp-fsp))) + return +} + +// alignFrac is used to generate alignment frac, like `100` -> `100000` ,`-100` -> `-100000` +func alignFrac(s string, fsp int) string { + sl := len(s) + if sl > 0 && s[0] == '-' { + sl = sl - 1 + } + if sl < fsp { + return s + strings.Repeat("0", fsp-sl) + } + + return s +} diff --git a/types/fsp_test.go b/types/fsp_test.go new file mode 100644 index 0000000..1dc02cc --- /dev/null +++ b/types/fsp_test.go @@ -0,0 +1,123 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "strconv" + + . "github.com/pingcap/check" +) + +var _ = Suite(&FspTest{}) + +type FspTest struct{} + +func (s *FspTest) TestCheckFsp(c *C) { + c.Parallel() + obtained, err := CheckFsp(int(UnspecifiedFsp)) + c.Assert(obtained, Equals, DefaultFsp) + c.Assert(err, IsNil) + + obtained, err = CheckFsp(-2019) + c.Assert(obtained, Equals, DefaultFsp) + c.Assert(err, ErrorMatches, "Invalid fsp -2019") + + obtained, err = CheckFsp(int(MinFsp) - 4294967296) + c.Assert(obtained, Equals, DefaultFsp) + c.Assert(err, ErrorMatches, "Invalid fsp "+strconv.Itoa(int(MinFsp)-4294967296)) + + // UnspecifiedFsp + obtained, err = CheckFsp(-1) + c.Assert(obtained, Equals, DefaultFsp) + c.Assert(err, IsNil) + + obtained, err = CheckFsp(int(MaxFsp) + 1) + c.Assert(obtained, Equals, DefaultFsp) + c.Assert(err, ErrorMatches, "Invalid fsp "+strconv.Itoa(int(MaxFsp)+1)) + + obtained, err = CheckFsp(int(MaxFsp) + 2019) + c.Assert(obtained, Equals, DefaultFsp) + c.Assert(err, ErrorMatches, "Invalid fsp "+strconv.Itoa(int(MaxFsp)+2019)) + + obtained, err = CheckFsp(int(MaxFsp) + 4294967296) + c.Assert(obtained, Equals, DefaultFsp) + c.Assert(err, ErrorMatches, "Invalid fsp "+strconv.Itoa(int(MaxFsp)+4294967296)) + + obtained, err = CheckFsp(int(MaxFsp+MinFsp) / 2) + c.Assert(obtained, Equals, (MaxFsp+MinFsp)/2) + c.Assert(err, IsNil) + + obtained, err = CheckFsp(5) + c.Assert(obtained, Equals, int8(5)) + c.Assert(err, IsNil) +} + +func (s *FspTest) TestParseFrac(c *C) { + c.Parallel() + obtained, overflow, err := ParseFrac("", 5) + c.Assert(obtained, Equals, 0) + c.Assert(overflow, Equals, false) + c.Assert(err, IsNil) + + a := 200 + obtained, overflow, err = ParseFrac("999", int8(a)) + c.Assert(obtained, Equals, 0) + c.Assert(overflow, Equals, false) + c.Assert(err, ErrorMatches, "Invalid fsp .*") + + obtained, overflow, err = ParseFrac("NotNum", MaxFsp) + c.Assert(obtained, Equals, 0) + c.Assert(overflow, Equals, false) + c.Assert(err, ErrorMatches, "strconv.ParseInt:.*") + + obtained, overflow, err = ParseFrac("1235", 6) + c.Assert(obtained, Equals, 123500) + c.Assert(overflow, Equals, false) + c.Assert(err, IsNil) + + obtained, overflow, err = ParseFrac("123456", 4) + c.Assert(obtained, Equals, 123500) + c.Assert(overflow, Equals, false) + c.Assert(err, IsNil) + + // 1236 round 3 -> 124 -> 124000 + obtained, overflow, err = ParseFrac("1236", 3) + c.Assert(obtained, Equals, 124000) + c.Assert(overflow, Equals, false) + c.Assert(err, IsNil) + + // 03123 round 2 -> 3 -> 30000 + obtained, overflow, err = ParseFrac("0312", 2) + c.Assert(obtained, Equals, 30000) + c.Assert(overflow, Equals, false) + c.Assert(err, IsNil) + + // 999 round 2 -> 100 -> overflow + obtained, overflow, err = ParseFrac("999", 2) + c.Assert(obtained, Equals, 0) + c.Assert(overflow, Equals, true) + c.Assert(err, IsNil) +} + +func (s *FspTest) TestAlignFrac(c *C) { + c.Parallel() + obtained := alignFrac("100", 6) + c.Assert(obtained, Equals, "100000") + obtained = alignFrac("10000000000", 6) + c.Assert(obtained, Equals, "10000000000") + obtained = alignFrac("-100", 6) + c.Assert(obtained, Equals, "-100000") + obtained = alignFrac("-10000000000", 6) + c.Assert(obtained, Equals, "-10000000000") +} diff --git a/types/helper.go b/types/helper.go new file mode 100644 index 0000000..124f377 --- /dev/null +++ b/types/helper.go @@ -0,0 +1,163 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "math" + "strings" + "unicode" + + "github.com/pingcap/errors" +) + +// RoundFloat rounds float val to the nearest integer value with float64 format, like MySQL Round function. +// RoundFloat uses default rounding mode, see https://dev.mysql.com/doc/refman/5.7/en/precision-math-rounding.html +// so rounding use "round half away from zero". +// e.g, 1.5 -> 2, -1.5 -> -2. +func RoundFloat(f float64) float64 { + if math.Abs(f) < 0.5 { + return 0 + } + + return math.Trunc(f + math.Copysign(0.5, f)) +} + +// Round rounds the argument f to dec decimal places. +// dec defaults to 0 if not specified. dec can be negative +// to cause dec digits left of the decimal point of the +// value f to become zero. +func Round(f float64, dec int) float64 { + shift := math.Pow10(dec) + tmp := f * shift + if math.IsInf(tmp, 0) { + return f + } + return RoundFloat(tmp) / shift +} + +// Truncate truncates the argument f to dec decimal places. +// dec defaults to 0 if not specified. dec can be negative +// to cause dec digits left of the decimal point of the +// value f to become zero. +func Truncate(f float64, dec int) float64 { + shift := math.Pow10(dec) + tmp := f * shift + if math.IsInf(tmp, 0) { + return f + } + return math.Trunc(tmp) / shift +} + +// GetMaxFloat gets the max float for given flen and decimal. +func GetMaxFloat(flen int, decimal int) float64 { + intPartLen := flen - decimal + f := math.Pow10(intPartLen) + f -= math.Pow10(-decimal) + return f +} + +// TruncateFloat tries to truncate f. +// If the result exceeds the max/min float that flen/decimal allowed, returns the max/min float allowed. +func TruncateFloat(f float64, flen int, decimal int) (float64, error) { + if math.IsNaN(f) { + // nan returns 0 + return 0, ErrOverflow.GenWithStackByArgs("DOUBLE", "") + } + + maxF := GetMaxFloat(flen, decimal) + + if !math.IsInf(f, 0) { + f = Round(f, decimal) + } + + var err error + if f > maxF { + f = maxF + err = ErrOverflow.GenWithStackByArgs("DOUBLE", "") + } else if f < -maxF { + f = -maxF + err = ErrOverflow.GenWithStackByArgs("DOUBLE", "") + } + + return f, errors.Trace(err) +} + +func isDigit(c byte) bool { + return c >= '0' && c <= '9' +} + +const ( + maxUint = uint64(math.MaxUint64) + uintCutOff = maxUint/uint64(10) + 1 + intCutOff = uint64(math.MaxInt64) + 1 +) + +// strToInt converts a string to an integer in best effort. +func strToInt(str string) (int64, error) { + str = strings.TrimSpace(str) + if len(str) == 0 { + return 0, ErrTruncated + } + negative := false + i := 0 + if str[i] == '-' { + negative = true + i++ + } else if str[i] == '+' { + i++ + } + + var ( + err error + hasNum = false + ) + r := uint64(0) + for ; i < len(str); i++ { + if !unicode.IsDigit(rune(str[i])) { + err = ErrTruncated + break + } + hasNum = true + if r >= uintCutOff { + r = 0 + err = errors.Trace(ErrBadNumber) + break + } + r = r * uint64(10) + + r1 := r + uint64(str[i]-'0') + if r1 < r || r1 > maxUint { + r = 0 + err = errors.Trace(ErrBadNumber) + break + } + r = r1 + } + if !hasNum { + err = ErrTruncated + } + + if !negative && r >= intCutOff { + return math.MaxInt64, errors.Trace(ErrBadNumber) + } + + if negative && r > intCutOff { + return math.MinInt64, errors.Trace(ErrBadNumber) + } + + if negative { + r = -r + } + return int64(r), err +} diff --git a/types/helper_test.go b/types/helper_test.go new file mode 100644 index 0000000..434a88c --- /dev/null +++ b/types/helper_test.go @@ -0,0 +1,65 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "strconv" + + . "github.com/pingcap/check" + "github.com/pingcap/errors" +) + +var _ = Suite(&testTypeHelperSuite{}) + +type testTypeHelperSuite struct { +} + +func (s *testTypeHelperSuite) TestStrToInt(c *C) { + c.Parallel() + tests := []struct { + input string + output string + err error + }{ + {"9223372036854775806", "9223372036854775806", nil}, + {"9223372036854775807", "9223372036854775807", nil}, + {"9223372036854775808", "9223372036854775807", ErrBadNumber}, + {"-9223372036854775807", "-9223372036854775807", nil}, + {"-9223372036854775808", "-9223372036854775808", nil}, + {"-9223372036854775809", "-9223372036854775808", ErrBadNumber}, + } + for _, tt := range tests { + output, err := strToInt(tt.input) + c.Assert(errors.Cause(err), Equals, tt.err) + c.Check(strconv.FormatInt(output, 10), Equals, tt.output) + } +} + +func (s *testTypeHelperSuite) TestTruncate(c *C) { + c.Parallel() + tests := []struct { + f float64 + dec int + expected float64 + }{ + {123.45, 0, 123}, + {123.45, 1, 123.4}, + {123.45, 2, 123.45}, + {123.45, 3, 123.450}, + } + for _, tt := range tests { + res := Truncate(tt.f, tt.dec) + c.Assert(res, Equals, tt.expected) + } +} diff --git a/types/overflow.go b/types/overflow.go new file mode 100644 index 0000000..07bdc78 --- /dev/null +++ b/types/overflow.go @@ -0,0 +1,200 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "math" + "time" + + "github.com/pingcap/errors" +) + +// AddUint64 adds uint64 a and b if no overflow, else returns error. +func AddUint64(a uint64, b uint64) (uint64, error) { + if math.MaxUint64-a < b { + return 0, ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%d, %d)", a, b)) + } + return a + b, nil +} + +// AddInt64 adds int64 a and b if no overflow, otherwise returns error. +func AddInt64(a int64, b int64) (int64, error) { + if (a > 0 && b > 0 && math.MaxInt64-a < b) || + (a < 0 && b < 0 && math.MinInt64-a > b) { + return 0, ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%d, %d)", a, b)) + } + + return a + b, nil +} + +// AddDuration adds time.Duration a and b if no overflow, otherwise returns error. +func AddDuration(a time.Duration, b time.Duration) (time.Duration, error) { + if (a > 0 && b > 0 && math.MaxInt64-a < b) || + (a < 0 && b < 0 && math.MinInt64-a > b) { + return 0, ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%d, %d)", int64(a), int64(b))) + } + + return a + b, nil +} + +// AddInteger adds uint64 a and int64 b and returns uint64 if no overflow error. +func AddInteger(a uint64, b int64) (uint64, error) { + if b >= 0 { + return AddUint64(a, uint64(b)) + } + + if uint64(-b) > a { + return 0, ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%d, %d)", a, b)) + } + return a - uint64(-b), nil +} + +// SubUint64 subtracts uint64 a with b and returns uint64 if no overflow error. +func SubUint64(a uint64, b uint64) (uint64, error) { + if a < b { + return 0, ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%d, %d)", a, b)) + } + return a - b, nil +} + +// SubInt64 subtracts int64 a with b and returns int64 if no overflow error. +func SubInt64(a int64, b int64) (int64, error) { + if (a > 0 && b < 0 && math.MaxInt64-a < -b) || + (a < 0 && b > 0 && math.MinInt64-a > -b) || + (a == 0 && b == math.MinInt64) { + return 0, ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%d, %d)", a, b)) + } + return a - b, nil +} + +// SubUintWithInt subtracts uint64 a with int64 b and returns uint64 if no overflow error. +func SubUintWithInt(a uint64, b int64) (uint64, error) { + if b < 0 { + return AddUint64(a, uint64(-b)) + } + return SubUint64(a, uint64(b)) +} + +// SubIntWithUint subtracts int64 a with uint64 b and returns uint64 if no overflow error. +func SubIntWithUint(a int64, b uint64) (uint64, error) { + if a < 0 || uint64(a) < b { + return 0, ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%d, %d)", a, b)) + } + return uint64(a) - b, nil +} + +// MulUint64 multiplies uint64 a and b and returns uint64 if no overflow error. +func MulUint64(a uint64, b uint64) (uint64, error) { + if b > 0 && a > math.MaxUint64/b { + return 0, ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%d, %d)", a, b)) + } + return a * b, nil +} + +// MulInt64 multiplies int64 a and b and returns int64 if no overflow error. +func MulInt64(a int64, b int64) (int64, error) { + if a == 0 || b == 0 { + return 0, nil + } + + var ( + res uint64 + err error + negative = false + ) + + if a > 0 && b > 0 { + res, err = MulUint64(uint64(a), uint64(b)) + } else if a < 0 && b < 0 { + res, err = MulUint64(uint64(-a), uint64(-b)) + } else if a < 0 && b > 0 { + negative = true + res, err = MulUint64(uint64(-a), uint64(b)) + } else { + negative = true + res, err = MulUint64(uint64(a), uint64(-b)) + } + + if err != nil { + return 0, errors.Trace(err) + } + + if negative { + // negative result + if res > math.MaxInt64+1 { + return 0, ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%d, %d)", a, b)) + } + + return -int64(res), nil + } + + // positive result + if res > math.MaxInt64 { + return 0, ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%d, %d)", a, b)) + } + + return int64(res), nil +} + +// MulInteger multiplies uint64 a and int64 b, and returns uint64 if no overflow error. +func MulInteger(a uint64, b int64) (uint64, error) { + if a == 0 || b == 0 { + return 0, nil + } + + if b < 0 { + return 0, ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%d, %d)", a, b)) + } + + return MulUint64(a, uint64(b)) +} + +// DivInt64 divides int64 a with b, returns int64 if no overflow error. +// It just checks overflow, if b is zero, a "divide by zero" panic throws. +func DivInt64(a int64, b int64) (int64, error) { + if a == math.MinInt64 && b == -1 { + return 0, ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%d, %d)", a, b)) + } + + return a / b, nil +} + +// DivUintWithInt divides uint64 a with int64 b, returns uint64 if no overflow error. +// It just checks overflow, if b is zero, a "divide by zero" panic throws. +func DivUintWithInt(a uint64, b int64) (uint64, error) { + if b < 0 { + if a != 0 && uint64(-b) <= a { + return 0, ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%d, %d)", a, b)) + } + + return 0, nil + } + + return a / uint64(b), nil +} + +// DivIntWithUint divides int64 a with uint64 b, returns uint64 if no overflow error. +// It just checks overflow, if b is zero, a "divide by zero" panic throws. +func DivIntWithUint(a int64, b uint64) (uint64, error) { + if a < 0 { + if uint64(-a) >= b { + return 0, ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%d, %d)", a, b)) + } + + return 0, nil + } + + return uint64(a) / b, nil +} diff --git a/types/overflow_test.go b/types/overflow_test.go new file mode 100644 index 0000000..27b7c2f --- /dev/null +++ b/types/overflow_test.go @@ -0,0 +1,346 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "math" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/util/testleak" +) + +var _ = Suite(&testOverflowSuite{}) + +type testOverflowSuite struct { +} + +func (s *testOverflowSuite) TestAdd(c *C) { + defer testleak.AfterTest(c)() + tblUint64 := []struct { + lsh uint64 + rsh uint64 + ret uint64 + overflow bool + }{ + {math.MaxUint64, 1, 0, true}, + {math.MaxUint64, 0, math.MaxUint64, false}, + {1, 1, 2, false}, + } + + for _, t := range tblUint64 { + ret, err := AddUint64(t.lsh, t.rsh) + if t.overflow { + c.Assert(err, NotNil) + } else { + c.Assert(ret, Equals, t.ret) + } + } + + tblInt64 := []struct { + lsh int64 + rsh int64 + ret int64 + overflow bool + }{ + {math.MaxInt64, 1, 0, true}, + {math.MaxInt64, 0, math.MaxInt64, false}, + {0, math.MinInt64, math.MinInt64, false}, + {-1, math.MinInt64, 0, true}, + {math.MaxInt64, math.MinInt64, -1, false}, + {1, 1, 2, false}, + {1, -1, 0, false}, + } + + for _, t := range tblInt64 { + ret, err := AddInt64(t.lsh, t.rsh) + if t.overflow { + c.Assert(err, NotNil) + } else { + c.Assert(ret, Equals, t.ret) + } + ret2, err := AddDuration(time.Duration(t.lsh), time.Duration(t.rsh)) + if t.overflow { + c.Assert(err, NotNil) + } else { + c.Assert(ret2, Equals, time.Duration(t.ret)) + } + } + + tblInt := []struct { + lsh uint64 + rsh int64 + ret uint64 + overflow bool + }{ + {math.MaxUint64, math.MinInt64, math.MaxUint64 + math.MinInt64, false}, + {math.MaxInt64, math.MinInt64, 0, true}, + {0, -1, 0, true}, + {1, -1, 0, false}, + {0, 1, 1, false}, + {1, 1, 2, false}, + } + + for _, t := range tblInt { + ret, err := AddInteger(t.lsh, t.rsh) + if t.overflow { + c.Assert(err, NotNil) + } else { + c.Assert(ret, Equals, t.ret) + } + } +} + +func (s *testOverflowSuite) TestSub(c *C) { + defer testleak.AfterTest(c)() + tblUint64 := []struct { + lsh uint64 + rsh uint64 + ret uint64 + overflow bool + }{ + {math.MaxUint64, 1, math.MaxUint64 - 1, false}, + {math.MaxUint64, 0, math.MaxUint64, false}, + {0, math.MaxUint64, 0, true}, + {0, 1, 0, true}, + {1, math.MaxUint64, 0, true}, + {1, 1, 0, false}, + } + + for _, t := range tblUint64 { + ret, err := SubUint64(t.lsh, t.rsh) + if t.overflow { + c.Assert(err, NotNil) + } else { + c.Assert(ret, Equals, t.ret) + } + } + + tblInt64 := []struct { + lsh int64 + rsh int64 + ret int64 + overflow bool + }{ + {math.MinInt64, 0, math.MinInt64, false}, + {math.MinInt64, 1, 0, true}, + {math.MaxInt64, -1, 0, true}, + {0, math.MinInt64, 0, true}, + {-1, math.MinInt64, math.MaxInt64, false}, + {math.MinInt64, math.MaxInt64, 0, true}, + {math.MinInt64, math.MinInt64, 0, false}, + {math.MinInt64, -math.MaxInt64, -1, false}, + {1, 1, 0, false}, + } + + for _, t := range tblInt64 { + ret, err := SubInt64(t.lsh, t.rsh) + if t.overflow { + c.Assert(err, NotNil) + } else { + c.Assert(ret, Equals, t.ret) + } + } + + tblInt := []struct { + lsh uint64 + rsh int64 + ret uint64 + overflow bool + }{ + {0, math.MinInt64, -math.MinInt64, false}, + {0, 1, 0, true}, + {math.MaxUint64, math.MinInt64, 0, true}, + {math.MaxInt64, math.MinInt64, 2*math.MaxInt64 + 1, false}, + {math.MaxUint64, -1, 0, true}, + {0, -1, 1, false}, + {1, 1, 0, false}, + } + + for _, t := range tblInt { + ret, err := SubUintWithInt(t.lsh, t.rsh) + if t.overflow { + c.Assert(err, NotNil) + } else { + c.Assert(ret, Equals, t.ret) + } + } + + tblInt2 := []struct { + lsh int64 + rsh uint64 + ret uint64 + overflow bool + }{ + {math.MinInt64, 0, 0, true}, + {math.MaxInt64, 0, math.MaxInt64, false}, + {math.MaxInt64, math.MaxUint64, 0, true}, + {math.MaxInt64, -math.MinInt64, 0, true}, + {-1, 0, 0, true}, + {1, 1, 0, false}, + } + + for _, t := range tblInt2 { + ret, err := SubIntWithUint(t.lsh, t.rsh) + if t.overflow { + c.Assert(err, NotNil) + } else { + c.Assert(ret, Equals, t.ret) + } + } +} + +func (s *testOverflowSuite) TestMul(c *C) { + defer testleak.AfterTest(c)() + tblUint64 := []struct { + lsh uint64 + rsh uint64 + ret uint64 + overflow bool + }{ + {math.MaxUint64, 1, math.MaxUint64, false}, + {math.MaxUint64, 0, 0, false}, + {math.MaxUint64, 2, 0, true}, + {1, 1, 1, false}, + } + + for _, t := range tblUint64 { + ret, err := MulUint64(t.lsh, t.rsh) + if t.overflow { + c.Assert(err, NotNil) + } else { + c.Assert(ret, Equals, t.ret) + } + } + + tblInt64 := []struct { + lsh int64 + rsh int64 + ret int64 + overflow bool + }{ + {math.MaxInt64, 1, math.MaxInt64, false}, + {math.MinInt64, 1, math.MinInt64, false}, + {math.MaxInt64, -1, -math.MaxInt64, false}, + {math.MinInt64, -1, 0, true}, + {math.MinInt64, 0, 0, false}, + {math.MaxInt64, 0, 0, false}, + {math.MaxInt64, math.MaxInt64, 0, true}, + {math.MaxInt64, math.MinInt64, 0, true}, + {math.MinInt64 / 10, 11, 0, true}, + {1, 1, 1, false}, + } + + for _, t := range tblInt64 { + ret, err := MulInt64(t.lsh, t.rsh) + if t.overflow { + c.Assert(err, NotNil) + } else { + c.Assert(ret, Equals, t.ret) + } + } + + tblInt := []struct { + lsh uint64 + rsh int64 + ret uint64 + overflow bool + }{ + {math.MaxUint64, 0, 0, false}, + {0, -1, 0, false}, + {1, -1, 0, true}, + {math.MaxUint64, -1, 0, true}, + {math.MaxUint64, 10, 0, true}, + {1, 1, 1, false}, + } + + for _, t := range tblInt { + ret, err := MulInteger(t.lsh, t.rsh) + if t.overflow { + c.Assert(err, NotNil) + } else { + c.Assert(ret, Equals, t.ret) + } + } +} + +func (s *testOverflowSuite) TestDiv(c *C) { + defer testleak.AfterTest(c)() + tblInt64 := []struct { + lsh int64 + rsh int64 + ret int64 + overflow bool + }{ + {math.MaxInt64, 1, math.MaxInt64, false}, + {math.MinInt64, 1, math.MinInt64, false}, + {math.MinInt64, -1, 0, true}, + {math.MaxInt64, -1, -math.MaxInt64, false}, + {1, -1, -1, false}, + {-1, 1, -1, false}, + {-1, 2, 0, false}, + {math.MinInt64, 2, math.MinInt64 / 2, false}, + } + + for _, t := range tblInt64 { + ret, err := DivInt64(t.lsh, t.rsh) + if t.overflow { + c.Assert(err, NotNil) + } else { + c.Assert(ret, Equals, t.ret) + } + } + + tblInt := []struct { + lsh uint64 + rsh int64 + ret uint64 + overflow bool + }{ + {0, -1, 0, false}, + {1, -1, 0, true}, + {math.MaxInt64, math.MinInt64, 0, false}, + {math.MaxInt64, -1, 0, true}, + {100, 20, 5, false}, + } + + for _, t := range tblInt { + ret, err := DivUintWithInt(t.lsh, t.rsh) + if t.overflow { + c.Assert(err, NotNil) + } else { + c.Assert(ret, Equals, t.ret) + } + } + + tblInt2 := []struct { + lsh int64 + rsh uint64 + ret uint64 + overflow bool + }{ + {math.MinInt64, math.MaxInt64, 0, true}, + {0, 1, 0, false}, + {-1, math.MaxInt64, 0, false}, + } + + for _, t := range tblInt2 { + ret, err := DivIntWithUint(t.lsh, t.rsh) + if t.overflow { + c.Assert(err, NotNil) + } else { + c.Assert(ret, Equals, t.ret) + } + } +} diff --git a/types/parser_driver/value_expr.go b/types/parser_driver/value_expr.go new file mode 100644 index 0000000..5fe8bd4 --- /dev/null +++ b/types/parser_driver/value_expr.go @@ -0,0 +1,131 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "fmt" + "io" + "strconv" + + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" +) + +// The purpose of driver package is to decompose the dependency of the parser and +// types package. +// It provides the NewValueExpr function for the ast package, so the ast package +// do not depends on the concrete definition of `types.Datum`, thus get rid of +// the dependency of the types package. +// The parser package depends on the ast package, but not the types package. +// The whole relationship: +// ast imports [] +// tidb/types imports [parser/types] +// parser imports [ast, parser/types] +// driver imports [ast, tidb/types] +// tidb imports [parser, driver] + +func init() { + ast.NewValueExpr = newValueExpr + ast.NewDecimal = func(str string) (interface{}, error) { + n, err := strconv.ParseFloat(str, 64) + return n, err + } + ast.NewHexLiteral = func(str string) (interface{}, error) { + h, err := types.NewHexLiteral(str) + return h, err + } + ast.NewBitLiteral = func(str string) (interface{}, error) { + b, err := types.NewBitLiteral(str) + return b, err + } +} + +var ( + _ ast.ValueExpr = &ValueExpr{} +) + +// ValueExpr is the simple value expression. +type ValueExpr struct { + ast.TexprNode + types.Datum + projectionOffset int +} + +// GetDatumString implements the ast.ValueExpr interface. +func (n *ValueExpr) GetDatumString() string { + return n.GetString() +} + +// Format the ExprNode into a Writer. +func (n *ValueExpr) Format(w io.Writer) { + var s string + switch n.Kind() { + case types.KindNull: + s = "NULL" + case types.KindInt64: + if n.Type.Flag&mysql.IsBooleanFlag != 0 { + if n.GetInt64() > 0 { + s = "TRUE" + } else { + s = "FALSE" + } + } else { + s = strconv.FormatInt(n.GetInt64(), 10) + } + case types.KindUint64: + s = strconv.FormatUint(n.GetUint64(), 10) + case types.KindFloat32: + s = strconv.FormatFloat(n.GetFloat64(), 'e', -1, 32) + case types.KindFloat64: + s = strconv.FormatFloat(n.GetFloat64(), 'e', -1, 64) + case types.KindString, types.KindBytes: + s = strconv.Quote(n.GetString()) + default: + panic("Can't format to string") + } + fmt.Fprint(w, s) +} + +// newValueExpr creates a ValueExpr with value, and sets default field type. +func newValueExpr(value interface{}) ast.ValueExpr { + if ve, ok := value.(*ValueExpr); ok { + return ve + } + ve := &ValueExpr{} + ve.SetValue(value) + types.DefaultTypeForValue(value, &ve.Type) + ve.projectionOffset = -1 + return ve +} + +// SetProjectionOffset sets ValueExpr.projectionOffset for logical plan builder. +func (n *ValueExpr) SetProjectionOffset(offset int) { + n.projectionOffset = offset +} + +// GetProjectionOffset returns ValueExpr.projectionOffset. +func (n *ValueExpr) GetProjectionOffset() int { + return n.projectionOffset +} + +// Accept implements Node interface. +func (n *ValueExpr) Accept(v ast.Visitor) (ast.Node, bool) { + newNode, skipChildren := v.Enter(n) + if skipChildren { + return v.Leave(newNode) + } + n = newNode.(*ValueExpr) + return v.Leave(n) +} diff --git a/util/admin/admin.go b/util/admin/admin.go new file mode 100644 index 0000000..a69bab0 --- /dev/null +++ b/util/admin/admin.go @@ -0,0 +1,264 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package admin + +import ( + "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" + "sort" +) + +// DDLInfo is for DDL information. +type DDLInfo struct { + SchemaVer int64 + ReorgHandle int64 // It's only used for DDL information. + Jobs []*model.Job // It's the currently running jobs. +} + +// GetDDLInfo returns DDL information. +func GetDDLInfo(txn kv.Transaction) (*DDLInfo, error) { + var err error + info := &DDLInfo{} + t := meta.NewMeta(txn) + + info.Jobs = make([]*model.Job, 0, 2) + job, err := t.GetDDLJobByIdx(0) + if err != nil { + return nil, errors.Trace(err) + } + if job != nil { + info.Jobs = append(info.Jobs, job) + } + addIdxJob, err := t.GetDDLJobByIdx(0, meta.AddIndexJobListKey) + if err != nil { + return nil, errors.Trace(err) + } + if addIdxJob != nil { + info.Jobs = append(info.Jobs, addIdxJob) + } + + info.SchemaVer, err = t.GetSchemaVersion() + if err != nil { + return nil, errors.Trace(err) + } + if addIdxJob == nil { + return info, nil + } + + info.ReorgHandle, _, _, err = t.GetDDLReorgHandle(addIdxJob) + if err != nil { + return nil, errors.Trace(err) + } + + return info, nil +} + +// IsJobRollbackable checks whether the job can be rollback. +func IsJobRollbackable(job *model.Job) bool { + switch job.Type { + case model.ActionDropIndex, model.ActionDropPrimaryKey: + // We can't cancel if index current state is in StateDeleteOnly or StateDeleteReorganization, otherwise will cause inconsistent between record and index. + if job.SchemaState == model.StateDeleteOnly || + job.SchemaState == model.StateDeleteReorganization { + return false + } + case model.ActionDropSchema, model.ActionDropTable: + // To simplify the rollback logic, cannot be canceled in the following states. + if job.SchemaState == model.StateWriteOnly || + job.SchemaState == model.StateDeleteOnly { + return false + } + case model.ActionDropColumn, model.ActionModifyColumn, + model.ActionDropTablePartition, model.ActionAddTablePartition, + model.ActionRebaseAutoID, model.ActionShardRowID, + model.ActionModifyTableCharsetAndCollate, + model.ActionModifySchemaCharsetAndCollate: + return job.SchemaState == model.StateNone + } + return true +} + +// CancelJobs cancels the DDL jobs. +func CancelJobs(txn kv.Transaction, ids []int64) ([]error, error) { + if len(ids) == 0 { + return nil, nil + } + + errs := make([]error, len(ids)) + t := meta.NewMeta(txn) + generalJobs, err := getDDLJobsInQueue(t, meta.DefaultJobListKey) + if err != nil { + return nil, errors.Trace(err) + } + addIdxJobs, err := getDDLJobsInQueue(t, meta.AddIndexJobListKey) + if err != nil { + return nil, errors.Trace(err) + } + jobs := append(generalJobs, addIdxJobs...) + + for i, id := range ids { + found := false + for j, job := range jobs { + if id != job.ID { + logutil.BgLogger().Debug("the job that needs to be canceled isn't equal to current job", + zap.Int64("need to canceled job ID", id), + zap.Int64("current job ID", job.ID)) + continue + } + found = true + // These states can't be cancelled. + if job.IsDone() || job.IsSynced() { + errs[i] = ErrCancelFinishedDDLJob.GenWithStackByArgs(id) + continue + } + // If the state is rolling back, it means the work is cleaning the data after cancelling the job. + if job.IsCancelled() || job.IsRollingback() || job.IsRollbackDone() { + continue + } + if !IsJobRollbackable(job) { + errs[i] = ErrCannotCancelDDLJob.GenWithStackByArgs(job.ID) + continue + } + + job.State = model.JobStateCancelling + // Make sure RawArgs isn't overwritten. + err := job.DecodeArgs(job.RawArgs) + if err != nil { + errs[i] = errors.Trace(err) + continue + } + if job.Type == model.ActionAddIndex || job.Type == model.ActionAddPrimaryKey { + offset := int64(j - len(generalJobs)) + err = t.UpdateDDLJob(offset, job, true, meta.AddIndexJobListKey) + } else { + err = t.UpdateDDLJob(int64(j), job, true) + } + if err != nil { + errs[i] = errors.Trace(err) + } + } + if !found { + errs[i] = ErrDDLJobNotFound.GenWithStackByArgs(id) + } + } + return errs, nil +} + +func getDDLJobsInQueue(t *meta.Meta, jobListKey meta.JobListKeyType) ([]*model.Job, error) { + cnt, err := t.DDLJobQueueLen(jobListKey) + if err != nil { + return nil, errors.Trace(err) + } + jobs := make([]*model.Job, cnt) + for i := range jobs { + jobs[i], err = t.GetDDLJobByIdx(int64(i), jobListKey) + if err != nil { + return nil, errors.Trace(err) + } + } + return jobs, nil +} + +// GetDDLJobs get all DDL jobs and sorts jobs by job.ID. +func GetDDLJobs(txn kv.Transaction) ([]*model.Job, error) { + t := meta.NewMeta(txn) + generalJobs, err := getDDLJobsInQueue(t, meta.DefaultJobListKey) + if err != nil { + return nil, errors.Trace(err) + } + addIdxJobs, err := getDDLJobsInQueue(t, meta.AddIndexJobListKey) + if err != nil { + return nil, errors.Trace(err) + } + jobs := append(generalJobs, addIdxJobs...) + sort.Sort(jobArray(jobs)) + return jobs, nil +} + +type jobArray []*model.Job + +func (v jobArray) Len() int { + return len(v) +} + +func (v jobArray) Less(i, j int) bool { + return v[i].ID < v[j].ID +} + +func (v jobArray) Swap(i, j int) { + v[i], v[j] = v[j], v[i] +} + +// MaxHistoryJobs is exported for testing. +const MaxHistoryJobs = 10 + +// DefNumHistoryJobs is default value of the default number of history job +const DefNumHistoryJobs = 10 + +// GetHistoryDDLJobs returns the DDL history jobs and an error. +// The maximum count of history jobs is num. +func GetHistoryDDLJobs(txn kv.Transaction, maxNumJobs int) ([]*model.Job, error) { + t := meta.NewMeta(txn) + jobs, err := t.GetLastNHistoryDDLJobs(maxNumJobs) + if err != nil { + return nil, errors.Trace(err) + } + + jobsLen := len(jobs) + if jobsLen > maxNumJobs { + start := jobsLen - maxNumJobs + jobs = jobs[start:] + } + jobsLen = len(jobs) + ret := make([]*model.Job, 0, jobsLen) + for i := jobsLen - 1; i >= 0; i-- { + ret = append(ret, jobs[i]) + } + return ret, nil +} + +// RecordData is the record data composed of a handle and values. +type RecordData struct { + Handle int64 + Values []types.Datum +} + +var ( + // ErrDataInConsistent indicate that meets inconsistent data. + ErrDataInConsistent = terror.ClassAdmin.New(mysql.ErrDataInConsistent, mysql.MySQLErrName[mysql.ErrDataInConsistent]) + // ErrDDLJobNotFound indicates the job id was not found. + ErrDDLJobNotFound = terror.ClassAdmin.New(mysql.ErrDDLJobNotFound, mysql.MySQLErrName[mysql.ErrDDLJobNotFound]) + // ErrCancelFinishedDDLJob returns when cancel a finished ddl job. + ErrCancelFinishedDDLJob = terror.ClassAdmin.New(mysql.ErrCancelFinishedDDLJob, mysql.MySQLErrName[mysql.ErrCancelFinishedDDLJob]) + // ErrCannotCancelDDLJob returns when cancel a almost finished ddl job, because cancel in now may cause data inconsistency. + ErrCannotCancelDDLJob = terror.ClassAdmin.New(mysql.ErrCannotCancelDDLJob, mysql.MySQLErrName[mysql.ErrCannotCancelDDLJob]) +) + +func init() { + // Register terror to mysql error map. + mySQLErrCodes := map[terror.ErrCode]uint16{ + mysql.ErrDataInConsistent: mysql.ErrDataInConsistent, + mysql.ErrDDLJobNotFound: mysql.ErrDDLJobNotFound, + mysql.ErrCancelFinishedDDLJob: mysql.ErrCancelFinishedDDLJob, + mysql.ErrCannotCancelDDLJob: mysql.ErrCannotCancelDDLJob, + } + terror.ErrClassToMySQLCodes[terror.ClassAdmin] = mySQLErrCodes +} diff --git a/util/admin/admin_test.go b/util/admin/admin_test.go new file mode 100644 index 0000000..fe69236 --- /dev/null +++ b/util/admin/admin_test.go @@ -0,0 +1,352 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package admin_test + +import ( + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/store/mockstore" + . "github.com/pingcap/tidb/util/admin" + "github.com/pingcap/tidb/util/mock" + "github.com/pingcap/tidb/util/testleak" +) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + TestingT(t) +} + +var _ = Suite(&testSuite{}) + +type testSuite struct { + store kv.Storage + ctx *mock.Context +} + +func (s *testSuite) SetUpSuite(c *C) { + testleak.BeforeTest() + var err error + s.store, err = mockstore.NewMockTikvStore() + c.Assert(err, IsNil) + s.ctx = mock.NewContext() + s.ctx.Store = s.store +} + +func (s *testSuite) TearDownSuite(c *C) { + err := s.store.Close() + c.Assert(err, IsNil) + testleak.AfterTest(c)() +} + +func (s *testSuite) TestGetDDLInfo(c *C) { + txn, err := s.store.Begin() + c.Assert(err, IsNil) + t := meta.NewMeta(txn) + + dbInfo2 := &model.DBInfo{ + ID: 2, + Name: model.NewCIStr("b"), + State: model.StateNone, + } + job := &model.Job{ + SchemaID: dbInfo2.ID, + Type: model.ActionCreateSchema, + RowCount: 0, + } + job1 := &model.Job{ + SchemaID: dbInfo2.ID, + Type: model.ActionAddIndex, + RowCount: 0, + } + err = t.EnQueueDDLJob(job) + c.Assert(err, IsNil) + info, err := GetDDLInfo(txn) + c.Assert(err, IsNil) + c.Assert(info.Jobs, HasLen, 1) + c.Assert(info.Jobs[0], DeepEquals, job) + c.Assert(info.ReorgHandle, Equals, int64(0)) + // Two jobs. + t = meta.NewMeta(txn, meta.AddIndexJobListKey) + err = t.EnQueueDDLJob(job1) + c.Assert(err, IsNil) + info, err = GetDDLInfo(txn) + c.Assert(err, IsNil) + c.Assert(info.Jobs, HasLen, 2) + c.Assert(info.Jobs[0], DeepEquals, job) + c.Assert(info.Jobs[1], DeepEquals, job1) + c.Assert(info.ReorgHandle, Equals, int64(0)) + err = txn.Rollback() + c.Assert(err, IsNil) +} + +func (s *testSuite) TestGetDDLJobs(c *C) { + txn, err := s.store.Begin() + c.Assert(err, IsNil) + t := meta.NewMeta(txn) + cnt := 10 + jobs := make([]*model.Job, cnt) + for i := 0; i < cnt; i++ { + jobs[i] = &model.Job{ + ID: int64(i), + SchemaID: 1, + Type: model.ActionCreateTable, + } + err = t.EnQueueDDLJob(jobs[i]) + c.Assert(err, IsNil) + currJobs, err1 := GetDDLJobs(txn) + c.Assert(err1, IsNil) + c.Assert(currJobs, HasLen, i+1) + } + + currJobs, err := GetDDLJobs(txn) + c.Assert(err, IsNil) + for i, job := range jobs { + c.Assert(job.ID, Equals, currJobs[i].ID) + c.Assert(job.SchemaID, Equals, int64(1)) + c.Assert(job.Type, Equals, model.ActionCreateTable) + } + + err = txn.Rollback() + c.Assert(err, IsNil) +} + +func isJobsSorted(jobs []*model.Job) bool { + if len(jobs) <= 1 { + return true + } + for i := 1; i < len(jobs); i++ { + if jobs[i].ID <= jobs[i-1].ID { + return false + } + } + return true +} + +func enQueueDDLJobs(c *C, t *meta.Meta, jobType model.ActionType, start, end int) { + for i := start; i < end; i++ { + job := &model.Job{ + ID: int64(i), + SchemaID: 1, + Type: jobType, + } + err := t.EnQueueDDLJob(job) + c.Assert(err, IsNil) + } +} + +func (s *testSuite) TestGetDDLJobsIsSort(c *C) { + txn, err := s.store.Begin() + c.Assert(err, IsNil) + + // insert 5 drop table jobs to DefaultJobListKey queue + t := meta.NewMeta(txn) + enQueueDDLJobs(c, t, model.ActionDropTable, 10, 15) + + // insert 5 create table jobs to DefaultJobListKey queue + enQueueDDLJobs(c, t, model.ActionCreateTable, 0, 5) + + // insert add index jobs to AddIndexJobListKey queue + t = meta.NewMeta(txn, meta.AddIndexJobListKey) + enQueueDDLJobs(c, t, model.ActionAddIndex, 5, 10) + + currJobs, err := GetDDLJobs(txn) + c.Assert(err, IsNil) + c.Assert(currJobs, HasLen, 15) + + isSort := isJobsSorted(currJobs) + c.Assert(isSort, Equals, true) + + err = txn.Rollback() + c.Assert(err, IsNil) +} + +func (s *testSuite) TestCancelJobs(c *C) { + txn, err := s.store.Begin() + c.Assert(err, IsNil) + t := meta.NewMeta(txn) + cnt := 10 + ids := make([]int64, cnt) + for i := 0; i < cnt; i++ { + job := &model.Job{ + ID: int64(i), + SchemaID: 1, + Type: model.ActionCreateTable, + } + if i == 0 { + job.State = model.JobStateDone + } + if i == 1 { + job.State = model.JobStateCancelled + } + ids[i] = int64(i) + err = t.EnQueueDDLJob(job) + c.Assert(err, IsNil) + } + + errs, err := CancelJobs(txn, ids) + c.Assert(err, IsNil) + for i, err := range errs { + if i == 0 { + c.Assert(err, NotNil) + continue + } + c.Assert(err, IsNil) + } + + errs, err = CancelJobs(txn, []int64{}) + c.Assert(err, IsNil) + c.Assert(errs, IsNil) + errs, err = CancelJobs(txn, []int64{-1}) + c.Assert(err, IsNil) + c.Assert(errs[0], NotNil) + c.Assert(errs[0].Error(), Matches, "*DDL Job:-1 not found") + + // test cancel finish job. + job := &model.Job{ + ID: 100, + SchemaID: 1, + Type: model.ActionCreateTable, + State: model.JobStateDone, + } + err = t.EnQueueDDLJob(job) + c.Assert(err, IsNil) + errs, err = CancelJobs(txn, []int64{100}) + c.Assert(err, IsNil) + c.Assert(errs[0], NotNil) + c.Assert(errs[0].Error(), Matches, "*This job:100 is finished, so can't be cancelled") + + // test can't cancelable job. + job.Type = model.ActionDropIndex + job.SchemaState = model.StateDeleteOnly + job.State = model.JobStateRunning + job.ID = 101 + err = t.EnQueueDDLJob(job) + c.Assert(err, IsNil) + errs, err = CancelJobs(txn, []int64{101}) + c.Assert(err, IsNil) + c.Assert(errs[0], NotNil) + c.Assert(errs[0].Error(), Matches, "*This job:101 is almost finished, can't be cancelled now") + + // When both types of jobs exist in the DDL queue, + // we first cancel the job with a larger ID. + job = &model.Job{ + ID: 1000, + SchemaID: 1, + TableID: 2, + Type: model.ActionAddIndex, + } + job1 := &model.Job{ + ID: 1001, + SchemaID: 1, + TableID: 2, + Type: model.ActionAddColumn, + } + job2 := &model.Job{ + ID: 1002, + SchemaID: 1, + TableID: 2, + Type: model.ActionAddIndex, + } + err = t.EnQueueDDLJob(job, meta.AddIndexJobListKey) + c.Assert(err, IsNil) + err = t.EnQueueDDLJob(job1) + c.Assert(err, IsNil) + err = t.EnQueueDDLJob(job2, meta.AddIndexJobListKey) + c.Assert(err, IsNil) + errs, err = CancelJobs(txn, []int64{job1.ID, job.ID, job2.ID}) + c.Assert(err, IsNil) + for _, err := range errs { + c.Assert(err, IsNil) + } + + err = txn.Rollback() + c.Assert(err, IsNil) +} + +func (s *testSuite) TestGetHistoryDDLJobs(c *C) { + txn, err := s.store.Begin() + c.Assert(err, IsNil) + t := meta.NewMeta(txn) + cnt := 11 + jobs := make([]*model.Job, cnt) + for i := 0; i < cnt; i++ { + jobs[i] = &model.Job{ + ID: int64(i), + SchemaID: 1, + Type: model.ActionCreateTable, + } + err = t.AddHistoryDDLJob(jobs[i], true) + c.Assert(err, IsNil) + historyJobs, err1 := GetHistoryDDLJobs(txn, DefNumHistoryJobs) + c.Assert(err1, IsNil) + if i+1 > MaxHistoryJobs { + c.Assert(historyJobs, HasLen, MaxHistoryJobs) + } else { + c.Assert(historyJobs, HasLen, i+1) + } + } + + delta := cnt - MaxHistoryJobs + historyJobs, err := GetHistoryDDLJobs(txn, DefNumHistoryJobs) + c.Assert(err, IsNil) + c.Assert(historyJobs, HasLen, MaxHistoryJobs) + l := len(historyJobs) - 1 + for i, job := range historyJobs { + c.Assert(job.ID, Equals, jobs[delta+l-i].ID) + c.Assert(job.SchemaID, Equals, int64(1)) + c.Assert(job.Type, Equals, model.ActionCreateTable) + } + + err = txn.Rollback() + c.Assert(err, IsNil) +} + +func (s *testSuite) TestIsJobRollbackable(c *C) { + cases := []struct { + tp model.ActionType + state model.SchemaState + result bool + }{ + {model.ActionDropIndex, model.StateNone, true}, + {model.ActionDropIndex, model.StateDeleteOnly, false}, + {model.ActionDropSchema, model.StateDeleteOnly, false}, + {model.ActionDropColumn, model.StateDeleteOnly, false}, + } + job := &model.Job{} + for _, ca := range cases { + job.Type = ca.tp + job.SchemaState = ca.state + re := IsJobRollbackable(job) + c.Assert(re == ca.result, IsTrue) + } +} + +func (s *testSuite) TestError(c *C) { + kvErrs := []*terror.Error{ + ErrDataInConsistent, + ErrDDLJobNotFound, + ErrCancelFinishedDDLJob, + ErrCannotCancelDDLJob, + } + for _, err := range kvErrs { + code := err.ToSQLError().Code + c.Assert(code != mysql.ErrUnknown && code == uint16(err.Code()), IsTrue, Commentf("err: %v", err)) + } +} diff --git a/util/arena/arena.go b/util/arena/arena.go new file mode 100644 index 0000000..a8f8e1a --- /dev/null +++ b/util/arena/arena.go @@ -0,0 +1,80 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package arena + +// Allocator pre-allocates memory to reduce memory allocation cost. +// It is not thread-safe. +type Allocator interface { + // Alloc allocates memory with 0 len and capacity cap. + Alloc(capacity int) []byte + + // AllocWithLen allocates memory with length and capacity. + AllocWithLen(length int, capacity int) []byte + + // Reset resets arena offset. + // Make sure all the allocated memory are not used any more. + Reset() +} + +// SimpleAllocator is a simple implementation of ArenaAllocator. +type SimpleAllocator struct { + arena []byte + off int +} + +type stdAllocator struct { +} + +func (a *stdAllocator) Alloc(capacity int) []byte { + return make([]byte, 0, capacity) +} + +func (a *stdAllocator) AllocWithLen(length int, capacity int) []byte { + return make([]byte, length, capacity) +} + +func (a *stdAllocator) Reset() { +} + +var _ Allocator = &stdAllocator{} + +// StdAllocator implements Allocator but do not pre-allocate memory. +var StdAllocator = &stdAllocator{} + +// NewAllocator creates an Allocator with a specified capacity. +func NewAllocator(capacity int) *SimpleAllocator { + return &SimpleAllocator{arena: make([]byte, 0, capacity)} +} + +// Alloc implements Allocator.AllocBytes interface. +func (s *SimpleAllocator) Alloc(capacity int) []byte { + if s.off+capacity < cap(s.arena) { + slice := s.arena[s.off : s.off : s.off+capacity] + s.off += capacity + return slice + } + + return make([]byte, 0, capacity) +} + +// AllocWithLen implements Allocator.AllocWithLen interface. +func (s *SimpleAllocator) AllocWithLen(length int, capacity int) []byte { + slice := s.Alloc(capacity) + return slice[:length:capacity] +} + +// Reset implements Allocator.Reset interface. +func (s *SimpleAllocator) Reset() { + s.off = 0 +} diff --git a/util/arena/arena_test.go b/util/arena/arena_test.go new file mode 100644 index 0000000..649903b --- /dev/null +++ b/util/arena/arena_test.go @@ -0,0 +1,88 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package arena + +import ( + "testing" + + . "github.com/pingcap/check" +) + +func TestT(t *testing.T) { + TestingT(t) +} + +func TestSimpleArenaAllocator(t *testing.T) { + arena := NewAllocator(1000) + slice := arena.Alloc(10) + if arena.off != 10 { + t.Error("off not match, expect 10 bug got", arena.off) + } + + if len(slice) != 0 || cap(slice) != 10 { + t.Error("slice length or cap not match") + } + + slice = arena.Alloc(20) + if arena.off != 30 { + t.Error("off not match, expect 30 bug got", arena.off) + } + + if len(slice) != 0 || cap(slice) != 20 { + t.Error("slice length or cap not match") + } + + slice = arena.Alloc(1024) + if arena.off != 30 { + t.Error("off not match, expect 30 bug got", arena.off) + } + + if len(slice) != 0 || cap(slice) != 1024 { + t.Error("slice length or cap not match") + } + + slice = arena.AllocWithLen(2, 10) + if arena.off != 40 { + t.Error("off not match, expect 40 bug got", arena.off) + } + + if len(slice) != 2 || cap(slice) != 10 { + t.Error("slice length or cap not match") + } + + arena.Reset() + if arena.off != 0 || cap(arena.arena) != 1000 { + t.Error("off or cap not match") + } +} + +func TestStdAllocator(t *testing.T) { + slice := StdAllocator.Alloc(20) + if len(slice) != 0 { + t.Error("length not match") + } + + if cap(slice) != 20 { + t.Error("cap not match") + } + + slice = StdAllocator.AllocWithLen(10, 20) + if len(slice) != 10 { + t.Error("length not match") + } + + if cap(slice) != 20 { + t.Error("cap not match") + } +} diff --git a/util/chunk/chunk.go b/util/chunk/chunk.go new file mode 100644 index 0000000..be60028 --- /dev/null +++ b/util/chunk/chunk.go @@ -0,0 +1,592 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package chunk + +import ( + "reflect" + "unsafe" + + "github.com/cznic/mathutil" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/types" +) + +var msgErrSelNotNil = "The selection vector of Chunk is not nil. Please file a bug to the TiDB Team" + +// Chunk stores multiple rows of data in Apache Arrow format. +// See https://arrow.apache.org/docs/memory_layout.html +// Values are appended in compact format and can be directly accessed without decoding. +// When the chunk is done processing, we can reuse the allocated memory by resetting it. +type Chunk struct { + // sel indicates which rows are selected. + // If it is nil, all rows are selected. + sel []int + + columns []*Column + // numVirtualRows indicates the number of virtual rows, which have zero Column. + // It is used only when this Chunk doesn't hold any data, i.e. "len(columns)==0". + numVirtualRows int + // capacity indicates the max number of rows this chunk can hold. + // TODO: replace all usages of capacity to requiredRows and remove this field + capacity int + + // requiredRows indicates how many rows the parent executor want. + requiredRows int +} + +// Capacity constants. +const ( + InitialCapacity = 32 + ZeroCapacity = 0 +) + +// NewChunkWithCapacity creates a new chunk with field types and capacity. +func NewChunkWithCapacity(fields []*types.FieldType, cap int) *Chunk { + return New(fields, cap, cap) //FIXME: in following PR. +} + +// New creates a new chunk. +// cap: the limit for the max number of rows. +// maxChunkSize: the max limit for the number of rows. +func New(fields []*types.FieldType, cap, maxChunkSize int) *Chunk { + chk := &Chunk{ + columns: make([]*Column, 0, len(fields)), + capacity: mathutil.Min(cap, maxChunkSize), + // set the default value of requiredRows to maxChunkSize to let chk.IsFull() behave + // like how we judge whether a chunk is full now, then the statement + // "chk.NumRows() < maxChunkSize" + // equals to "!chk.IsFull()". + requiredRows: maxChunkSize, + } + + for _, f := range fields { + chk.columns = append(chk.columns, NewColumn(f, chk.capacity)) + } + + return chk +} + +// renewWithCapacity creates a new Chunk based on an existing Chunk with capacity. The newly +// created Chunk has the same data schema with the old Chunk. +func renewWithCapacity(chk *Chunk, cap, maxChunkSize int) *Chunk { + newChk := new(Chunk) + if chk.columns == nil { + return newChk + } + newChk.columns = renewColumns(chk.columns, cap) + newChk.numVirtualRows = 0 + newChk.capacity = cap + newChk.requiredRows = maxChunkSize + return newChk +} + +// Renew creates a new Chunk based on an existing Chunk. The newly created Chunk +// has the same data schema with the old Chunk. The capacity of the new Chunk +// might be doubled based on the capacity of the old Chunk and the maxChunkSize. +// chk: old chunk(often used in previous call). +// maxChunkSize: the limit for the max number of rows. +func Renew(chk *Chunk, maxChunkSize int) *Chunk { + newCap := reCalcCapacity(chk, maxChunkSize) + return renewWithCapacity(chk, newCap, maxChunkSize) +} + +// renewColumns creates the columns of a Chunk. The capacity of the newly +// created columns is equal to cap. +func renewColumns(oldCol []*Column, cap int) []*Column { + columns := make([]*Column, 0, len(oldCol)) + for _, col := range oldCol { + columns = append(columns, newColumn(col.typeSize(), cap)) + } + return columns +} + +// MemoryUsage returns the total memory usage of a Chunk in B. +// We ignore the size of Column.length and Column.nullCount +// since they have little effect of the total memory usage. +func (c *Chunk) MemoryUsage() (sum int64) { + for _, col := range c.columns { + curColMemUsage := int64(unsafe.Sizeof(*col)) + int64(cap(col.nullBitmap)) + int64(cap(col.offsets)*4) + int64(cap(col.data)) + int64(cap(col.elemBuf)) + sum += curColMemUsage + } + return +} + +// newFixedLenColumn creates a fixed length Column with elemLen and initial data capacity. +func newFixedLenColumn(elemLen, cap int) *Column { + return &Column{ + elemBuf: make([]byte, elemLen), + data: make([]byte, 0, cap*elemLen), + nullBitmap: make([]byte, 0, (cap+7)>>3), + } +} + +// newVarLenColumn creates a variable length Column with initial data capacity. +func newVarLenColumn(cap int, old *Column) *Column { + estimatedElemLen := 8 + // For varLenColumn (e.g. varchar), the accurate length of an element is unknown. + // Therefore, in the first executor.Next we use an experience value -- 8 (so it may make runtime.growslice) + // but in the following Next call we estimate the length as AVG x 1.125 elemLen of the previous call. + if old != nil && old.length != 0 { + estimatedElemLen = (len(old.data) + len(old.data)/8) / old.length + } + return &Column{ + offsets: make([]int64, 1, cap+1), + data: make([]byte, 0, cap*estimatedElemLen), + nullBitmap: make([]byte, 0, (cap+7)>>3), + } +} + +// RequiredRows returns how many rows is considered full. +func (c *Chunk) RequiredRows() int { + return c.requiredRows +} + +// SetRequiredRows sets the number of required rows. +func (c *Chunk) SetRequiredRows(requiredRows, maxChunkSize int) *Chunk { + if requiredRows <= 0 || requiredRows > maxChunkSize { + requiredRows = maxChunkSize + } + c.requiredRows = requiredRows + return c +} + +// IsFull returns if this chunk is considered full. +func (c *Chunk) IsFull() bool { + return c.NumRows() >= c.requiredRows +} + +// MakeRef makes Column in "dstColIdx" reference to Column in "srcColIdx". +func (c *Chunk) MakeRef(srcColIdx, dstColIdx int) { + c.columns[dstColIdx] = c.columns[srcColIdx] +} + +// MakeRefTo copies columns `src.columns[srcColIdx]` to `c.columns[dstColIdx]`. +func (c *Chunk) MakeRefTo(dstColIdx int, src *Chunk, srcColIdx int) error { + if c.sel != nil || src.sel != nil { + return errors.New(msgErrSelNotNil) + } + c.columns[dstColIdx] = src.columns[srcColIdx] + return nil +} + +// SwapColumn swaps Column "c.columns[colIdx]" with Column +// "other.columns[otherIdx]". If there exists columns refer to the Column to be +// swapped, we need to re-build the reference. +func (c *Chunk) SwapColumn(colIdx int, other *Chunk, otherIdx int) error { + if c.sel != nil || other.sel != nil { + return errors.New(msgErrSelNotNil) + } + // Find the leftmost Column of the reference which is the actual Column to + // be swapped. + for i := 0; i < colIdx; i++ { + if c.columns[i] == c.columns[colIdx] { + colIdx = i + } + } + for i := 0; i < otherIdx; i++ { + if other.columns[i] == other.columns[otherIdx] { + otherIdx = i + } + } + + // Find the columns which refer to the actual Column to be swapped. + refColsIdx := make([]int, 0, len(c.columns)-colIdx) + for i := colIdx; i < len(c.columns); i++ { + if c.columns[i] == c.columns[colIdx] { + refColsIdx = append(refColsIdx, i) + } + } + refColsIdx4Other := make([]int, 0, len(other.columns)-otherIdx) + for i := otherIdx; i < len(other.columns); i++ { + if other.columns[i] == other.columns[otherIdx] { + refColsIdx4Other = append(refColsIdx4Other, i) + } + } + + // Swap columns from two chunks. + c.columns[colIdx], other.columns[otherIdx] = other.columns[otherIdx], c.columns[colIdx] + + // Rebuild the reference. + for _, i := range refColsIdx { + c.MakeRef(colIdx, i) + } + for _, i := range refColsIdx4Other { + other.MakeRef(otherIdx, i) + } + return nil +} + +// SwapColumns swaps columns with another Chunk. +func (c *Chunk) SwapColumns(other *Chunk) { + c.sel, other.sel = other.sel, c.sel + c.columns, other.columns = other.columns, c.columns + c.numVirtualRows, other.numVirtualRows = other.numVirtualRows, c.numVirtualRows +} + +// SetNumVirtualRows sets the virtual row number for a Chunk. +// It should only be used when there exists no Column in the Chunk. +func (c *Chunk) SetNumVirtualRows(numVirtualRows int) { + c.numVirtualRows = numVirtualRows +} + +// Reset resets the chunk, so the memory it allocated can be reused. +// Make sure all the data in the chunk is not used anymore before you reuse this chunk. +func (c *Chunk) Reset() { + c.sel = nil + if c.columns == nil { + return + } + for _, col := range c.columns { + col.reset() + } + c.numVirtualRows = 0 +} + +// CopyConstruct creates a new chunk and copies this chunk's data into it. +func (c *Chunk) CopyConstruct() *Chunk { + newChk := &Chunk{numVirtualRows: c.numVirtualRows, capacity: c.capacity, columns: make([]*Column, len(c.columns))} + for i := range c.columns { + newChk.columns[i] = c.columns[i].CopyConstruct(nil) + } + if c.sel != nil { + newChk.sel = make([]int, len(c.sel)) + copy(newChk.sel, c.sel) + } + return newChk +} + +// GrowAndReset resets the Chunk and doubles the capacity of the Chunk. +// The doubled capacity should not be larger than maxChunkSize. +// TODO: this method will be used in following PR. +func (c *Chunk) GrowAndReset(maxChunkSize int) { + c.sel = nil + if c.columns == nil { + return + } + newCap := reCalcCapacity(c, maxChunkSize) + if newCap <= c.capacity { + c.Reset() + return + } + c.capacity = newCap + c.columns = renewColumns(c.columns, newCap) + c.numVirtualRows = 0 + c.requiredRows = maxChunkSize +} + +// reCalcCapacity calculates the capacity for another Chunk based on the current +// Chunk. The new capacity is doubled only when the current Chunk is full. +func reCalcCapacity(c *Chunk, maxChunkSize int) int { + if c.NumRows() < c.capacity { + return c.capacity + } + return mathutil.Min(c.capacity*2, maxChunkSize) +} + +// Capacity returns the capacity of the Chunk. +func (c *Chunk) Capacity() int { + return c.capacity +} + +// NumCols returns the number of columns in the chunk. +func (c *Chunk) NumCols() int { + return len(c.columns) +} + +// NumRows returns the number of rows in the chunk. +func (c *Chunk) NumRows() int { + if c.sel != nil { + return len(c.sel) + } + if c.NumCols() == 0 { + return c.numVirtualRows + } + return c.columns[0].length +} + +// GetRow gets the Row in the chunk with the row index. +func (c *Chunk) GetRow(idx int) Row { + if c.sel != nil { + // mapping the logical RowIdx to the actual physical RowIdx; + // for example, if the Sel is [1, 5, 6], then + // logical 0 -> physical 1, + // logical 1 -> physical 5, + // logical 2 -> physical 6. + // Then when we iterate this Chunk according to Row, only selected rows will be + // accessed while all filtered rows will be ignored. + return Row{c: c, idx: int(c.sel[idx])} + } + return Row{c: c, idx: idx} +} + +// AppendRow appends a row to the chunk. +func (c *Chunk) AppendRow(row Row) { + c.AppendPartialRow(0, row) + c.numVirtualRows++ +} + +// AppendPartialRow appends a row to the chunk. +func (c *Chunk) AppendPartialRow(colIdx int, row Row) { + c.appendSel(colIdx) + for i, rowCol := range row.c.columns { + chkCol := c.columns[colIdx+i] + chkCol.appendNullBitmap(!rowCol.IsNull(row.idx)) + if rowCol.isFixed() { + elemLen := len(rowCol.elemBuf) + offset := row.idx * elemLen + chkCol.data = append(chkCol.data, rowCol.data[offset:offset+elemLen]...) + } else { + start, end := rowCol.offsets[row.idx], rowCol.offsets[row.idx+1] + chkCol.data = append(chkCol.data, rowCol.data[start:end]...) + chkCol.offsets = append(chkCol.offsets, int64(len(chkCol.data))) + } + chkCol.length++ + } +} + +// preAlloc pre-allocates the memory space in a Chunk to store the Row. +// NOTE: only used in test. +// 1. The Chunk must be empty or holds no useful data. +// 2. The schema of the Row must be the same with the Chunk. +// 3. This API is paired with the `Insert()` function, which inserts all the +// rows data into the Chunk after the pre-allocation. +// 4. We set the null bitmap here instead of in the Insert() function because +// when the Insert() function is called parallelly, the data race on a byte +// can not be avoided although the manipulated bits are different inside a +// byte. +func (c *Chunk) preAlloc(row Row) (rowIdx uint32) { + rowIdx = uint32(c.NumRows()) + for i, srcCol := range row.c.columns { + dstCol := c.columns[i] + dstCol.appendNullBitmap(!srcCol.IsNull(row.idx)) + elemLen := len(srcCol.elemBuf) + if !srcCol.isFixed() { + elemLen = int(srcCol.offsets[row.idx+1] - srcCol.offsets[row.idx]) + dstCol.offsets = append(dstCol.offsets, int64(len(dstCol.data)+elemLen)) + } + dstCol.length++ + needCap := len(dstCol.data) + elemLen + if needCap <= cap(dstCol.data) { + (*reflect.SliceHeader)(unsafe.Pointer(&dstCol.data)).Len = len(dstCol.data) + elemLen + continue + } + // Grow the capacity according to golang.growslice. + // Implementation differences with golang: + // 1. We double the capacity when `dstCol.data < 1024*elemLen bytes` but + // not `1024 bytes`. + // 2. We expand the capacity to 1.5*originCap rather than 1.25*originCap + // during the slow-increasing phase. + newCap := cap(dstCol.data) + doubleCap := newCap << 1 + if needCap > doubleCap { + newCap = needCap + } else { + avgElemLen := elemLen + if !srcCol.isFixed() { + avgElemLen = len(dstCol.data) / len(dstCol.offsets) + } + // slowIncThreshold indicates the threshold exceeding which the + // dstCol.data capacity increase fold decreases from 2 to 1.5. + slowIncThreshold := 1024 * avgElemLen + if len(dstCol.data) < slowIncThreshold { + newCap = doubleCap + } else { + for 0 < newCap && newCap < needCap { + newCap += newCap / 2 + } + if newCap <= 0 { + newCap = needCap + } + } + } + dstCol.data = make([]byte, len(dstCol.data)+elemLen, newCap) + } + return +} + +// insert inserts `row` on the position specified by `rowIdx`. +// NOTE: only used in test. +// Note: Insert will cover the origin data, it should be called after +// PreAlloc. +func (c *Chunk) insert(rowIdx int, row Row) { + for i, srcCol := range row.c.columns { + if row.IsNull(i) { + continue + } + dstCol := c.columns[i] + var srcStart, srcEnd, destStart, destEnd int + if srcCol.isFixed() { + srcElemLen, destElemLen := len(srcCol.elemBuf), len(dstCol.elemBuf) + srcStart, destStart = row.idx*srcElemLen, rowIdx*destElemLen + srcEnd, destEnd = srcStart+srcElemLen, destStart+destElemLen + } else { + srcStart, srcEnd = int(srcCol.offsets[row.idx]), int(srcCol.offsets[row.idx+1]) + destStart, destEnd = int(dstCol.offsets[rowIdx]), int(dstCol.offsets[rowIdx+1]) + } + copy(dstCol.data[destStart:destEnd], srcCol.data[srcStart:srcEnd]) + } +} + +// Append appends rows in [begin, end) in another Chunk to a Chunk. +func (c *Chunk) Append(other *Chunk, begin, end int) { + for colID, src := range other.columns { + dst := c.columns[colID] + if src.isFixed() { + elemLen := len(src.elemBuf) + dst.data = append(dst.data, src.data[begin*elemLen:end*elemLen]...) + } else { + beginOffset, endOffset := src.offsets[begin], src.offsets[end] + dst.data = append(dst.data, src.data[beginOffset:endOffset]...) + for i := begin; i < end; i++ { + dst.offsets = append(dst.offsets, dst.offsets[len(dst.offsets)-1]+src.offsets[i+1]-src.offsets[i]) + } + } + for i := begin; i < end; i++ { + c.appendSel(colID) + dst.appendNullBitmap(!src.IsNull(i)) + dst.length++ + } + } + c.numVirtualRows += end - begin +} + +// TruncateTo truncates rows from tail to head in a Chunk to "numRows" rows. +func (c *Chunk) TruncateTo(numRows int) { + c.Reconstruct() + for _, col := range c.columns { + if col.isFixed() { + elemLen := len(col.elemBuf) + col.data = col.data[:numRows*elemLen] + } else { + col.data = col.data[:col.offsets[numRows]] + col.offsets = col.offsets[:numRows+1] + } + col.length = numRows + bitmapLen := (col.length + 7) / 8 + col.nullBitmap = col.nullBitmap[:bitmapLen] + if col.length%8 != 0 { + // When we append null, we simply increment the nullCount, + // so we need to clear the unused bits in the last bitmap byte. + lastByte := col.nullBitmap[bitmapLen-1] + unusedBitsLen := 8 - uint(col.length%8) + lastByte <<= unusedBitsLen + lastByte >>= unusedBitsLen + col.nullBitmap[bitmapLen-1] = lastByte + } + } + c.numVirtualRows = numRows +} + +// AppendNull appends a null value to the chunk. +func (c *Chunk) AppendNull(colIdx int) { + c.appendSel(colIdx) + c.columns[colIdx].AppendNull() +} + +// AppendInt64 appends a int64 value to the chunk. +func (c *Chunk) AppendInt64(colIdx int, i int64) { + c.appendSel(colIdx) + c.columns[colIdx].AppendInt64(i) +} + +// AppendUint64 appends a uint64 value to the chunk. +func (c *Chunk) AppendUint64(colIdx int, u uint64) { + c.appendSel(colIdx) + c.columns[colIdx].AppendUint64(u) +} + +// AppendFloat32 appends a float32 value to the chunk. +func (c *Chunk) AppendFloat32(colIdx int, f float32) { + c.appendSel(colIdx) + c.columns[colIdx].AppendFloat32(f) +} + +// AppendFloat64 appends a float64 value to the chunk. +func (c *Chunk) AppendFloat64(colIdx int, f float64) { + c.appendSel(colIdx) + c.columns[colIdx].AppendFloat64(f) +} + +// AppendString appends a string value to the chunk. +func (c *Chunk) AppendString(colIdx int, str string) { + c.appendSel(colIdx) + c.columns[colIdx].AppendString(str) +} + +// AppendBytes appends a bytes value to the chunk. +func (c *Chunk) AppendBytes(colIdx int, b []byte) { + c.appendSel(colIdx) + c.columns[colIdx].AppendBytes(b) +} + +func (c *Chunk) appendSel(colIdx int) { + if colIdx == 0 && c.sel != nil { // use column 0 as standard + c.sel = append(c.sel, c.columns[0].length) + } +} + +// AppendDatum appends a datum into the chunk. +func (c *Chunk) AppendDatum(colIdx int, d *types.Datum) { + switch d.Kind() { + case types.KindNull: + c.AppendNull(colIdx) + case types.KindInt64: + c.AppendInt64(colIdx, d.GetInt64()) + case types.KindUint64: + c.AppendUint64(colIdx, d.GetUint64()) + case types.KindFloat32: + c.AppendFloat32(colIdx, d.GetFloat32()) + case types.KindFloat64: + c.AppendFloat64(colIdx, d.GetFloat64()) + case types.KindString, types.KindBytes: + c.AppendBytes(colIdx, d.GetBytes()) + } +} + +// Column returns the specific column. +func (c *Chunk) Column(colIdx int) *Column { + return c.columns[colIdx] +} + +// SetCol sets the colIdx Column to col and returns the old Column. +func (c *Chunk) SetCol(colIdx int, col *Column) *Column { + if col == c.columns[colIdx] { + return nil + } + old := c.columns[colIdx] + c.columns[colIdx] = col + return old +} + +// Sel returns Sel of this Chunk. +func (c *Chunk) Sel() []int { + return c.sel +} + +// SetSel sets a Sel for this Chunk. +func (c *Chunk) SetSel(sel []int) { + c.sel = sel +} + +// Reconstruct removes all filtered rows in this Chunk. +func (c *Chunk) Reconstruct() { + if c.sel == nil { + return + } + for _, col := range c.columns { + col.reconstruct(c.sel) + } + c.numVirtualRows = len(c.sel) + c.sel = nil +} diff --git a/util/chunk/chunk_util.go b/util/chunk/chunk_util.go new file mode 100644 index 0000000..688bfb4 --- /dev/null +++ b/util/chunk/chunk_util.go @@ -0,0 +1,112 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package chunk + +import "github.com/pingcap/errors" + +// CopySelectedJoinRows copies the selected joined rows from the source Chunk +// to the destination Chunk. +// Return true if at least one joined row was selected. +// +// NOTE: All the outer rows in the source Chunk should be the same. +func CopySelectedJoinRows(src *Chunk, innerColOffset, outerColOffset int, selected []bool, dst *Chunk) (bool, error) { + if src.NumRows() == 0 { + return false, nil + } + if src.sel != nil || dst.sel != nil { + return false, errors.New(msgErrSelNotNil) + } + + numSelected := copySelectedInnerRows(innerColOffset, outerColOffset, src, selected, dst) + copyOuterRows(innerColOffset, outerColOffset, src, numSelected, dst) + dst.numVirtualRows += numSelected + return numSelected > 0, nil +} + +// copySelectedInnerRows copies the selected inner rows from the source Chunk +// to the destination Chunk. +// return the number of rows which is selected. +func copySelectedInnerRows(innerColOffset, outerColOffset int, src *Chunk, selected []bool, dst *Chunk) int { + oldLen := dst.columns[innerColOffset].length + var srcCols []*Column + if innerColOffset == 0 { + srcCols = src.columns[:outerColOffset] + } else { + srcCols = src.columns[innerColOffset:] + } + for j, srcCol := range srcCols { + dstCol := dst.columns[innerColOffset+j] + if srcCol.isFixed() { + for i := 0; i < len(selected); i++ { + if !selected[i] { + continue + } + dstCol.appendNullBitmap(!srcCol.IsNull(i)) + dstCol.length++ + + elemLen := len(srcCol.elemBuf) + offset := i * elemLen + dstCol.data = append(dstCol.data, srcCol.data[offset:offset+elemLen]...) + } + } else { + for i := 0; i < len(selected); i++ { + if !selected[i] { + continue + } + dstCol.appendNullBitmap(!srcCol.IsNull(i)) + dstCol.length++ + + start, end := srcCol.offsets[i], srcCol.offsets[i+1] + dstCol.data = append(dstCol.data, srcCol.data[start:end]...) + dstCol.offsets = append(dstCol.offsets, int64(len(dstCol.data))) + } + } + } + return dst.columns[innerColOffset].length - oldLen +} + +// copyOuterRows copies the continuous 'numRows' outer rows in the source Chunk +// to the destination Chunk. +func copyOuterRows(innerColOffset, outerColOffset int, src *Chunk, numRows int, dst *Chunk) { + if numRows <= 0 { + return + } + row := src.GetRow(0) + var srcCols []*Column + if innerColOffset == 0 { + srcCols = src.columns[outerColOffset:] + } else { + srcCols = src.columns[:innerColOffset] + } + for i, srcCol := range srcCols { + dstCol := dst.columns[outerColOffset+i] + dstCol.appendMultiSameNullBitmap(!srcCol.IsNull(row.idx), numRows) + dstCol.length += numRows + if srcCol.isFixed() { + elemLen := len(srcCol.elemBuf) + start := row.idx * elemLen + end := start + numRows*elemLen + dstCol.data = append(dstCol.data, srcCol.data[start:end]...) + } else { + start, end := srcCol.offsets[row.idx], srcCol.offsets[row.idx+numRows] + dstCol.data = append(dstCol.data, srcCol.data[start:end]...) + offsets := dstCol.offsets + elemLen := srcCol.offsets[row.idx+1] - srcCol.offsets[row.idx] + for j := 0; j < numRows; j++ { + offsets = append(offsets, int64(offsets[len(offsets)-1]+elemLen)) + } + dstCol.offsets = offsets + } + } +} diff --git a/util/chunk/codec.go b/util/chunk/codec.go new file mode 100644 index 0000000..913b170 --- /dev/null +++ b/util/chunk/codec.go @@ -0,0 +1,353 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package chunk + +import ( + "encoding/binary" + "reflect" + "unsafe" + + "github.com/cznic/mathutil" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" +) + +// Codec is used to: +// 1. encode a Chunk to a byte slice. +// 2. decode a Chunk from a byte slice. +type Codec struct { + // colTypes is used to check whether a Column is fixed sized and what the + // fixed size for every element. + // NOTE: It's only used for decoding. + colTypes []*types.FieldType +} + +// NewCodec creates a new Codec object for encode or decode a Chunk. +func NewCodec(colTypes []*types.FieldType) *Codec { + return &Codec{colTypes} +} + +// Encode encodes a Chunk to a byte slice. +func (c *Codec) Encode(chk *Chunk) []byte { + buffer := make([]byte, 0, chk.MemoryUsage()) + for _, col := range chk.columns { + buffer = c.encodeColumn(buffer, col) + } + return buffer +} + +func (c *Codec) encodeColumn(buffer []byte, col *Column) []byte { + var lenBuffer [4]byte + // encode length. + binary.LittleEndian.PutUint32(lenBuffer[:], uint32(col.length)) + buffer = append(buffer, lenBuffer[:4]...) + + // encode nullCount. + binary.LittleEndian.PutUint32(lenBuffer[:], uint32(col.nullCount())) + buffer = append(buffer, lenBuffer[:4]...) + + // encode nullBitmap. + if col.nullCount() > 0 { + numNullBitmapBytes := (col.length + 7) / 8 + buffer = append(buffer, col.nullBitmap[:numNullBitmapBytes]...) + } + + // encode offsets. + if !col.isFixed() { + numOffsetBytes := (col.length + 1) * 8 + offsetBytes := i64SliceToBytes(col.offsets) + buffer = append(buffer, offsetBytes[:numOffsetBytes]...) + } + + // encode data. + buffer = append(buffer, col.data...) + return buffer +} + +func i64SliceToBytes(i64s []int64) (b []byte) { + if len(i64s) == 0 { + return nil + } + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + hdr.Len = len(i64s) * 8 + hdr.Cap = hdr.Len + hdr.Data = uintptr(unsafe.Pointer(&i64s[0])) + return b +} + +// Decode decodes a Chunk from a byte slice, return the remained unused bytes. +func (c *Codec) Decode(buffer []byte) (*Chunk, []byte) { + chk := &Chunk{} + for ordinal := 0; len(buffer) > 0; ordinal++ { + col := &Column{} + buffer = c.decodeColumn(buffer, col, ordinal) + chk.columns = append(chk.columns, col) + } + return chk, buffer +} + +// DecodeToChunk decodes a Chunk from a byte slice, return the remained unused bytes. +func (c *Codec) DecodeToChunk(buffer []byte, chk *Chunk) (remained []byte) { + for i := 0; i < len(chk.columns); i++ { + buffer = c.decodeColumn(buffer, chk.columns[i], i) + } + return buffer +} + +// decodeColumn decodes a Column from a byte slice, return the remained unused bytes. +func (c *Codec) decodeColumn(buffer []byte, col *Column, ordinal int) (remained []byte) { + // Todo(Shenghui Wu): Optimize all data is null. + // decode length. + col.length = int(binary.LittleEndian.Uint32(buffer)) + buffer = buffer[4:] + + // decode nullCount. + nullCount := int(binary.LittleEndian.Uint32(buffer)) + buffer = buffer[4:] + + // decode nullBitmap. + if nullCount > 0 { + numNullBitmapBytes := (col.length + 7) / 8 + col.nullBitmap = buffer[:numNullBitmapBytes:numNullBitmapBytes] + buffer = buffer[numNullBitmapBytes:] + } else { + c.setAllNotNull(col) + } + + // decode offsets. + numFixedBytes := getFixedLen(c.colTypes[ordinal]) + numDataBytes := int64(numFixedBytes * col.length) + if numFixedBytes == -1 { + numOffsetBytes := (col.length + 1) * 8 + col.offsets = bytesToI64Slice(buffer[:numOffsetBytes:numOffsetBytes]) + buffer = buffer[numOffsetBytes:] + numDataBytes = col.offsets[col.length] + } else if cap(col.elemBuf) < numFixedBytes { + col.elemBuf = make([]byte, numFixedBytes) + } + + // decode data. + col.data = buffer[:numDataBytes:numDataBytes] + return buffer[numDataBytes:] +} + +var allNotNullBitmap [128]byte + +func (c *Codec) setAllNotNull(col *Column) { + numNullBitmapBytes := (col.length + 7) / 8 + col.nullBitmap = col.nullBitmap[:0] + for i := 0; i < numNullBitmapBytes; { + numAppendBytes := mathutil.Min(numNullBitmapBytes-i, cap(allNotNullBitmap)) + col.nullBitmap = append(col.nullBitmap, allNotNullBitmap[:numAppendBytes]...) + i += numAppendBytes + } +} + +func bytesToI64Slice(b []byte) (i64s []int64) { + if len(b) == 0 { + return nil + } + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&i64s)) + hdr.Len = len(b) / 8 + hdr.Cap = hdr.Len + hdr.Data = uintptr(unsafe.Pointer(&b[0])) + return i64s +} + +// varElemLen indicates this Column is a variable length Column. +const varElemLen = -1 + +func getFixedLen(colType *types.FieldType) int { + switch colType.Tp { + case mysql.TypeFloat: + return 4 + case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, + mysql.TypeLonglong, mysql.TypeDouble, mysql.TypeYear, mysql.TypeDuration: + return 8 + default: + return varElemLen + } +} + +// GetFixedLen get the memory size of a fixed-length type. +// if colType is not fixed-length, it returns varElemLen, aka -1. +func GetFixedLen(colType *types.FieldType) int { + return getFixedLen(colType) +} + +// EstimateTypeWidth estimates the average width of values of the type. +// This is used by the planner, which doesn't require absolutely correct results; +// it's OK (and expected) to guess if we don't know for sure. +// +// mostly study from https://github.com/postgres/postgres/blob/REL_12_STABLE/src/backend/utils/cache/lsyscache.c#L2356 +func EstimateTypeWidth(padChar bool, colType *types.FieldType) int { + colLen := getFixedLen(colType) + // Easy if it's a fixed-width type + if colLen != varElemLen { + return colLen + } + + colLen = colType.Flen + if colLen > 0 { + /* + * If PAD_CHAR_TO_FULL_LENGTH is enabled, and type is CHAR, + * the colType.Flen is also the only width. + */ + if padChar && colType.Tp == mysql.TypeString { + return colLen + } + if colLen <= 32 { + return colLen + } + if colLen < 1000 { + return 32 + (colLen-32)/2 // assume 50% + } + /* + * Beyond 1000, assume we're looking at something like + * "varchar(10000)" where the limit isn't actually reached often, and + * use a fixed estimate. + */ + return 32 + (1000-32)/2 + } + // Oops, we have no idea ... wild guess time. + return 32 +} + +func init() { + for i := 0; i < 128; i++ { + allNotNullBitmap[i] = 0xFF + } +} + +// Decoder decodes the data returned from the coprocessor and stores the result in Chunk. +// How Decoder works: +// 1. Initialization phase: Decode a whole input byte slice to Decoder.intermChk(intermediate chunk) using Codec.Decode. +// intermChk is introduced to simplify the implementation of decode phase. This phase uses pointer operations with +// less CPU and memory cost. +// 2. Decode phase: +// 2.1 Set the number of rows to be decoded to a value that is a multiple of 8 and greater than +// `chk.RequiredRows() - chk.NumRows()`. This reduces the overhead of copying the srcCol.nullBitMap into +// destCol.nullBitMap. +// 2.2 Append srcCol.offsets to destCol.offsets when the elements is of var-length type. And further adjust the +// offsets according to descCol.offsets[destCol.length]-srcCol.offsets[0]. +// 2.3 Append srcCol.nullBitMap to destCol.nullBitMap. +// 3. Go to step 1 when the input byte slice is consumed. +type Decoder struct { + intermChk *Chunk + codec *Codec + remainedRows int +} + +// NewDecoder creates a new Decoder object for decode a Chunk. +func NewDecoder(chk *Chunk, colTypes []*types.FieldType) *Decoder { + return &Decoder{intermChk: chk, codec: NewCodec(colTypes), remainedRows: 0} +} + +// Decode decodes multiple rows of Decoder.intermChk and stores the result in chk. +func (c *Decoder) Decode(chk *Chunk) { + requiredRows := chk.RequiredRows() - chk.NumRows() + // Set the requiredRows to a multiple of 8. + requiredRows = (requiredRows + 7) >> 3 << 3 + if requiredRows > c.remainedRows { + requiredRows = c.remainedRows + } + for i := 0; i < chk.NumCols(); i++ { + c.decodeColumn(chk, i, requiredRows) + } + c.remainedRows -= requiredRows +} + +// Reset decodes data and store the result in Decoder.intermChk. This decode phase uses pointer operations with less +// CPU and memory costs. +func (c *Decoder) Reset(data []byte) { + c.codec.DecodeToChunk(data, c.intermChk) + c.remainedRows = c.intermChk.NumRows() +} + +// IsFinished indicates whether Decoder.intermChk has been dried up. +func (c *Decoder) IsFinished() bool { + return c.remainedRows == 0 +} + +// RemainedRows indicates Decoder.intermChk has remained rows. +func (c *Decoder) RemainedRows() int { + return c.remainedRows +} + +// ReuseIntermChk swaps `Decoder.intermChk` with `chk` directly when `Decoder.intermChk.NumRows()` is no less +// than `chk.requiredRows * factor` where `factor` is 0.8 now. This can avoid the overhead of appending the +// data from `Decoder.intermChk` to `chk`. Moreover, the column.offsets needs to be further adjusted +// according to column.offset[0]. +func (c *Decoder) ReuseIntermChk(chk *Chunk) { + for i, col := range c.intermChk.columns { + col.length = c.remainedRows + elemLen := getFixedLen(c.codec.colTypes[i]) + if elemLen == varElemLen { + // For var-length types, we need to adjust the offsets before reuse. + if deltaOffset := col.offsets[0]; deltaOffset != 0 { + for j := 0; j < len(col.offsets); j++ { + col.offsets[j] -= deltaOffset + } + } + } + } + chk.SwapColumns(c.intermChk) + c.remainedRows = 0 +} + +func (c *Decoder) decodeColumn(chk *Chunk, ordinal int, requiredRows int) { + elemLen := getFixedLen(c.codec.colTypes[ordinal]) + numDataBytes := int64(elemLen * requiredRows) + srcCol := c.intermChk.columns[ordinal] + destCol := chk.columns[ordinal] + + if elemLen == varElemLen { + // For var-length types, we need to adjust the offsets after appending to destCol. + numDataBytes = srcCol.offsets[requiredRows] - srcCol.offsets[0] + deltaOffset := destCol.offsets[destCol.length] - srcCol.offsets[0] + destCol.offsets = append(destCol.offsets, srcCol.offsets[1:requiredRows+1]...) + for i := destCol.length + 1; i <= destCol.length+requiredRows; i++ { + destCol.offsets[i] = destCol.offsets[i] + deltaOffset + } + srcCol.offsets = srcCol.offsets[requiredRows:] + } + + numNullBitmapBytes := (requiredRows + 7) >> 3 + if destCol.length%8 == 0 { + destCol.nullBitmap = append(destCol.nullBitmap, srcCol.nullBitmap[:numNullBitmapBytes]...) + } else { + destCol.appendMultiSameNullBitmap(false, requiredRows) + bitMapLen := len(destCol.nullBitmap) + // bitOffset indicates the number of valid bits in destCol.nullBitmap's last byte. + bitOffset := destCol.length % 8 + startIdx := (destCol.length - 1) >> 3 + for i := 0; i < numNullBitmapBytes; i++ { + destCol.nullBitmap[startIdx+i] |= srcCol.nullBitmap[i] << bitOffset + // The high order 8-bitOffset bits in `srcCol.nullBitmap[i]` should be appended to the low order of the next slot. + if startIdx+i+1 < bitMapLen { + destCol.nullBitmap[startIdx+i+1] |= srcCol.nullBitmap[i] >> (8 - bitOffset) + } + } + } + // Set all the redundant bits in the last slot of destCol.nullBitmap to 0. + numRedundantBits := uint(len(destCol.nullBitmap)*8 - destCol.length - requiredRows) + bitMask := byte(1<<(8-numRedundantBits)) - 1 + destCol.nullBitmap[len(destCol.nullBitmap)-1] &= bitMask + + srcCol.nullBitmap = srcCol.nullBitmap[numNullBitmapBytes:] + destCol.length += requiredRows + + destCol.data = append(destCol.data, srcCol.data[:numDataBytes]...) + srcCol.data = srcCol.data[numDataBytes:] +} diff --git a/util/chunk/codec_test.go b/util/chunk/codec_test.go new file mode 100644 index 0000000..686e1d6 --- /dev/null +++ b/util/chunk/codec_test.go @@ -0,0 +1,194 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package chunk + +import ( + "fmt" + "testing" + + "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" +) + +var _ = check.Suite(&testCodecSuite{}) + +type testCodecSuite struct{} + +func (s *testCodecSuite) TestCodec(c *check.C) { + numCols := 4 + numRows := 10 + + colTypes := make([]*types.FieldType, 0, numCols) + colTypes = append(colTypes, &types.FieldType{Tp: mysql.TypeLonglong}) + colTypes = append(colTypes, &types.FieldType{Tp: mysql.TypeLonglong}) + colTypes = append(colTypes, &types.FieldType{Tp: mysql.TypeVarchar}) + colTypes = append(colTypes, &types.FieldType{Tp: mysql.TypeVarchar}) + + oldChk := NewChunkWithCapacity(colTypes, numRows) + for i := 0; i < numRows; i++ { + str := fmt.Sprintf("%d.12345", i) + oldChk.AppendNull(0) + oldChk.AppendInt64(1, int64(i)) + oldChk.AppendString(2, str) + oldChk.AppendString(3, str) + } + + codec := NewCodec(colTypes) + buffer := codec.Encode(oldChk) + + newChk := NewChunkWithCapacity(colTypes, numRows) + remained := codec.DecodeToChunk(buffer, newChk) + + c.Assert(len(remained), check.Equals, 0) + c.Assert(newChk.NumCols(), check.Equals, numCols) + c.Assert(newChk.NumRows(), check.Equals, numRows) + for i := 0; i < numRows; i++ { + row := newChk.GetRow(i) + str := fmt.Sprintf("%d.12345", i) + c.Assert(row.IsNull(0), check.IsTrue) + c.Assert(row.IsNull(1), check.IsFalse) + c.Assert(row.IsNull(2), check.IsFalse) + c.Assert(row.IsNull(3), check.IsFalse) + + c.Assert(row.GetInt64(1), check.Equals, int64(i)) + c.Assert(row.GetString(2), check.Equals, str) + c.Assert(row.GetString(3), check.Equals, str) + } +} + +func (s *testCodecSuite) TestEstimateTypeWidth(c *check.C) { + var colType *types.FieldType + + colType = &types.FieldType{Tp: mysql.TypeLonglong} + c.Assert(EstimateTypeWidth(false, colType), check.Equals, 8) // fixed-witch type + + colType = &types.FieldType{Tp: mysql.TypeString, Flen: 100000} + c.Assert(EstimateTypeWidth(true, colType), check.Equals, 100000) // PAD_CHAR_TO_FULL_LENGTH + + colType = &types.FieldType{Tp: mysql.TypeString, Flen: 31} + c.Assert(EstimateTypeWidth(false, colType), check.Equals, 31) // colLen <= 32 + + colType = &types.FieldType{Tp: mysql.TypeString, Flen: 999} + c.Assert(EstimateTypeWidth(false, colType), check.Equals, 515) // colLen < 1000 + + colType = &types.FieldType{Tp: mysql.TypeString, Flen: 2000} + c.Assert(EstimateTypeWidth(false, colType), check.Equals, 516) // colLen < 1000 + + colType = &types.FieldType{Tp: mysql.TypeString} + c.Assert(EstimateTypeWidth(false, colType), check.Equals, 32) // value after guessing +} + +func BenchmarkEncodeChunk(b *testing.B) { + numCols := 4 + numRows := 1024 + + chk := &Chunk{columns: make([]*Column, numCols)} + for i := 0; i < numCols; i++ { + chk.columns[i] = &Column{ + length: numRows, + nullBitmap: make([]byte, numRows/8+1), + data: make([]byte, numRows*8), + } + } + + codec := &Codec{} + + b.ResetTimer() + for i := 0; i < b.N; i++ { + codec.Encode(chk) + } +} + +func BenchmarkDecode(b *testing.B) { + numCols := 4 + numRows := 1024 + + colTypes := make([]*types.FieldType, numCols) + chk := &Chunk{columns: make([]*Column, numCols)} + for i := 0; i < numCols; i++ { + chk.columns[i] = &Column{ + length: numRows, + nullBitmap: make([]byte, numRows/8+1), + data: make([]byte, numRows*8), + } + colTypes[i] = &types.FieldType{ + Tp: mysql.TypeLonglong, + } + } + codec := &Codec{colTypes} + buffer := codec.Encode(chk) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + codec.Decode(buffer) + } +} + +func BenchmarkDecodeToChunk(b *testing.B) { + numCols := 4 + numRows := 1024 + + colTypes := make([]*types.FieldType, numCols) + chk := &Chunk{ + columns: make([]*Column, numCols), + } + for i := 0; i < numCols; i++ { + chk.columns[i] = &Column{ + length: numRows, + nullBitmap: make([]byte, numRows/8+1), + data: make([]byte, numRows*8), + elemBuf: make([]byte, 8), + } + colTypes[i] = &types.FieldType{ + Tp: mysql.TypeLonglong, + } + } + codec := &Codec{colTypes} + buffer := codec.Encode(chk) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + codec.DecodeToChunk(buffer, chk) + } +} + +func BenchmarkDecodeToChunkWithVariableType(b *testing.B) { + numCols := 5 + numRows := 1024 + + colTypes := make([]*types.FieldType, 0, numCols) + colTypes = append(colTypes, &types.FieldType{Tp: mysql.TypeLonglong}) + colTypes = append(colTypes, &types.FieldType{Tp: mysql.TypeLonglong}) + colTypes = append(colTypes, &types.FieldType{Tp: mysql.TypeVarchar}) + colTypes = append(colTypes, &types.FieldType{Tp: mysql.TypeVarchar}) + + chk := NewChunkWithCapacity(colTypes, numRows) + for i := 0; i < numRows; i++ { + str := fmt.Sprintf("%d.12345", i) + chk.AppendNull(0) + chk.AppendInt64(1, int64(i)) + chk.AppendString(2, str) + chk.AppendString(3, str) + } + codec := &Codec{colTypes} + buffer := codec.Encode(chk) + + chk.Reset() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + codec.DecodeToChunk(buffer, chk) + } +} diff --git a/util/chunk/column.go b/util/chunk/column.go new file mode 100644 index 0000000..8aa5d27 --- /dev/null +++ b/util/chunk/column.go @@ -0,0 +1,574 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package chunk + +import ( + "fmt" + "math/bits" + "reflect" + "unsafe" + + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/hack" +) + +// Column stores one column of data in Apache Arrow format. +// See https://arrow.apache.org/docs/memory_layout.html +type Column struct { + length int + nullBitmap []byte // bit 0 is null, 1 is not null + offsets []int64 + data []byte + elemBuf []byte +} + +// NewColumn creates a new column with the specific length and capacity. +func NewColumn(ft *types.FieldType, cap int) *Column { + return newColumn(getFixedLen(ft), cap) +} + +func newColumn(typeSize, cap int) *Column { + var col *Column + if typeSize == varElemLen { + col = newVarLenColumn(cap, nil) + } else { + col = newFixedLenColumn(typeSize, cap) + } + return col +} + +func (c *Column) typeSize() int { + if len(c.elemBuf) > 0 { + return len(c.elemBuf) + } + return varElemLen +} + +func (c *Column) isFixed() bool { + return c.elemBuf != nil +} + +// Reset resets this Column according to the EvalType. +// Different from reset, Reset will reset the elemBuf. +func (c *Column) Reset(eType types.EvalType) { + switch eType { + case types.ETInt: + c.ResizeInt64(0, false) + case types.ETReal: + c.ResizeFloat64(0, false) + case types.ETString: + c.ReserveString(0) + default: + panic(fmt.Sprintf("invalid EvalType %v", eType)) + } +} + +// reset resets the underlying data of this Column but doesn't modify its data type. +func (c *Column) reset() { + c.length = 0 + c.nullBitmap = c.nullBitmap[:0] + if len(c.offsets) > 0 { + // The first offset is always 0, it makes slicing the data easier, we need to keep it. + c.offsets = c.offsets[:1] + } + c.data = c.data[:0] +} + +// IsNull returns if this row is null. +func (c *Column) IsNull(rowIdx int) bool { + nullByte := c.nullBitmap[rowIdx/8] + return nullByte&(1<<(uint(rowIdx)&7)) == 0 +} + +// CopyConstruct copies this Column to dst. +// If dst is nil, it creates a new Column and returns it. +func (c *Column) CopyConstruct(dst *Column) *Column { + if dst != nil { + dst.length = c.length + dst.nullBitmap = append(dst.nullBitmap[:0], c.nullBitmap...) + dst.offsets = append(dst.offsets[:0], c.offsets...) + dst.data = append(dst.data[:0], c.data...) + dst.elemBuf = append(dst.elemBuf[:0], c.elemBuf...) + return dst + } + newCol := &Column{length: c.length} + newCol.nullBitmap = append(newCol.nullBitmap, c.nullBitmap...) + newCol.offsets = append(newCol.offsets, c.offsets...) + newCol.data = append(newCol.data, c.data...) + newCol.elemBuf = append(newCol.elemBuf, c.elemBuf...) + return newCol +} + +func (c *Column) appendNullBitmap(notNull bool) { + idx := c.length >> 3 + if idx >= len(c.nullBitmap) { + c.nullBitmap = append(c.nullBitmap, 0) + } + if notNull { + pos := uint(c.length) & 7 + c.nullBitmap[idx] |= byte(1 << pos) + } +} + +// appendMultiSameNullBitmap appends multiple same bit value to `nullBitMap`. +// notNull means not null. +// num means the number of bits that should be appended. +func (c *Column) appendMultiSameNullBitmap(notNull bool, num int) { + numNewBytes := ((c.length + num + 7) >> 3) - len(c.nullBitmap) + b := byte(0) + if notNull { + b = 0xff + } + for i := 0; i < numNewBytes; i++ { + c.nullBitmap = append(c.nullBitmap, b) + } + if !notNull { + return + } + // 1. Set all the remaining bits in the last slot of old c.numBitMap to 1. + numRemainingBits := uint(c.length % 8) + bitMask := byte(^((1 << numRemainingBits) - 1)) + c.nullBitmap[c.length/8] |= bitMask + // 2. Set all the redundant bits in the last slot of new c.numBitMap to 0. + numRedundantBits := uint(len(c.nullBitmap)*8 - c.length - num) + bitMask = byte(1<<(8-numRedundantBits)) - 1 + c.nullBitmap[len(c.nullBitmap)-1] &= bitMask +} + +// AppendNull appends a null value into this Column. +func (c *Column) AppendNull() { + c.appendNullBitmap(false) + if c.isFixed() { + c.data = append(c.data, c.elemBuf...) + } else { + c.offsets = append(c.offsets, c.offsets[c.length]) + } + c.length++ +} + +func (c *Column) finishAppendFixed() { + c.data = append(c.data, c.elemBuf...) + c.appendNullBitmap(true) + c.length++ +} + +// AppendInt64 appends an int64 value into this Column. +func (c *Column) AppendInt64(i int64) { + *(*int64)(unsafe.Pointer(&c.elemBuf[0])) = i + c.finishAppendFixed() +} + +// AppendUint64 appends a uint64 value into this Column. +func (c *Column) AppendUint64(u uint64) { + *(*uint64)(unsafe.Pointer(&c.elemBuf[0])) = u + c.finishAppendFixed() +} + +// AppendFloat32 appends a float32 value into this Column. +func (c *Column) AppendFloat32(f float32) { + *(*float32)(unsafe.Pointer(&c.elemBuf[0])) = f + c.finishAppendFixed() +} + +// AppendFloat64 appends a float64 value into this Column. +func (c *Column) AppendFloat64(f float64) { + *(*float64)(unsafe.Pointer(&c.elemBuf[0])) = f + c.finishAppendFixed() +} + +func (c *Column) finishAppendVar() { + c.appendNullBitmap(true) + c.offsets = append(c.offsets, int64(len(c.data))) + c.length++ +} + +// AppendString appends a string value into this Column. +func (c *Column) AppendString(str string) { + c.data = append(c.data, str...) + c.finishAppendVar() +} + +// AppendBytes appends a byte slice into this Column. +func (c *Column) AppendBytes(b []byte) { + c.data = append(c.data, b...) + c.finishAppendVar() +} + +const ( + sizeInt64 = int(unsafe.Sizeof(int64(0))) + sizeUint64 = int(unsafe.Sizeof(uint64(0))) + sizeFloat32 = int(unsafe.Sizeof(float32(0))) + sizeFloat64 = int(unsafe.Sizeof(float64(0))) +) + +var ( + emptyBuf = make([]byte, 4*1024) +) + +// resize resizes the column so that it contains n elements, only valid for fixed-length types. +func (c *Column) resize(n, typeSize int, isNull bool) { + sizeData := n * typeSize + if cap(c.data) >= sizeData { + (*reflect.SliceHeader)(unsafe.Pointer(&c.data)).Len = sizeData + } else { + c.data = make([]byte, sizeData) + } + if !isNull { + for j := 0; j < sizeData; j += len(emptyBuf) { + copy(c.data[j:], emptyBuf) + } + } + + newNulls := false + sizeNulls := (n + 7) >> 3 + if cap(c.nullBitmap) >= sizeNulls { + (*reflect.SliceHeader)(unsafe.Pointer(&c.nullBitmap)).Len = sizeNulls + } else { + c.nullBitmap = make([]byte, sizeNulls) + newNulls = true + } + if !isNull || !newNulls { + var nullVal byte + if !isNull { + nullVal = 0xFF + } + for i := range c.nullBitmap { + c.nullBitmap[i] = nullVal + } + } + + if cap(c.elemBuf) >= typeSize { + (*reflect.SliceHeader)(unsafe.Pointer(&c.elemBuf)).Len = typeSize + } else { + c.elemBuf = make([]byte, typeSize) + } + + c.length = n +} + +// reserve makes the column capacity be at least enough to contain n elements. +// this method is only valid for var-length types and estElemSize is the estimated size of this type. +func (c *Column) reserve(n, estElemSize int) { + sizeData := n * estElemSize + if cap(c.data) >= sizeData { + c.data = c.data[:0] + } else { + c.data = make([]byte, 0, sizeData) + } + + sizeNulls := (n + 7) >> 3 + if cap(c.nullBitmap) >= sizeNulls { + c.nullBitmap = c.nullBitmap[:0] + } else { + c.nullBitmap = make([]byte, 0, sizeNulls) + } + + sizeOffs := n + 1 + if cap(c.offsets) >= sizeOffs { + c.offsets = c.offsets[:1] + } else { + c.offsets = make([]int64, 1, sizeOffs) + } + + c.elemBuf = nil + c.length = 0 +} + +// SetNull sets the rowIdx to null. +func (c *Column) SetNull(rowIdx int, isNull bool) { + if isNull { + c.nullBitmap[rowIdx>>3] &= ^(1 << uint(rowIdx&7)) + } else { + c.nullBitmap[rowIdx>>3] |= 1 << uint(rowIdx&7) + } +} + +// SetNulls sets rows in [begin, end) to null. +func (c *Column) SetNulls(begin, end int, isNull bool) { + i := ((begin + 7) >> 3) << 3 + for ; begin < i && begin < end; begin++ { + c.SetNull(begin, isNull) + } + var v uint8 + if !isNull { + v = (1 << 8) - 1 + } + for ; begin+8 <= end; begin += 8 { + c.nullBitmap[begin>>3] = v + } + for ; begin < end; begin++ { + c.SetNull(begin, isNull) + } +} + +// nullCount returns the number of nulls in this Column. +func (c *Column) nullCount() int { + var cnt, i int + for ; i+8 <= c.length; i += 8 { + // 0 is null and 1 is not null + cnt += 8 - bits.OnesCount8(uint8(c.nullBitmap[i>>3])) + } + for ; i < c.length; i++ { + if c.IsNull(i) { + cnt++ + } + } + return cnt +} + +// ResizeInt64 resizes the column so that it contains n int64 elements. +func (c *Column) ResizeInt64(n int, isNull bool) { + c.resize(n, sizeInt64, isNull) +} + +// ResizeUint64 resizes the column so that it contains n uint64 elements. +func (c *Column) ResizeUint64(n int, isNull bool) { + c.resize(n, sizeUint64, isNull) +} + +// ResizeFloat32 resizes the column so that it contains n float32 elements. +func (c *Column) ResizeFloat32(n int, isNull bool) { + c.resize(n, sizeFloat32, isNull) +} + +// ResizeFloat64 resizes the column so that it contains n float64 elements. +func (c *Column) ResizeFloat64(n int, isNull bool) { + c.resize(n, sizeFloat64, isNull) +} + +// ReserveString changes the column capacity to store n string elements and set the length to zero. +func (c *Column) ReserveString(n int) { + c.reserve(n, 8) +} + +// ReserveBytes changes the column capacity to store n bytes elements and set the length to zero. +func (c *Column) ReserveBytes(n int) { + c.reserve(n, 8) +} + +// ReserveSet changes the column capacity to store n set elements and set the length to zero. +func (c *Column) ReserveSet(n int) { + c.reserve(n, 8) +} + +// ReserveEnum changes the column capacity to store n enum elements and set the length to zero. +func (c *Column) ReserveEnum(n int) { + c.reserve(n, 8) +} + +func (c *Column) castSliceHeader(header *reflect.SliceHeader, typeSize int) { + header.Data = (*reflect.SliceHeader)(unsafe.Pointer(&c.data)).Data + header.Len = c.length + header.Cap = cap(c.data) / typeSize +} + +// Int64s returns an int64 slice stored in this Column. +func (c *Column) Int64s() []int64 { + var res []int64 + c.castSliceHeader((*reflect.SliceHeader)(unsafe.Pointer(&res)), sizeInt64) + return res +} + +// Uint64s returns a uint64 slice stored in this Column. +func (c *Column) Uint64s() []uint64 { + var res []uint64 + c.castSliceHeader((*reflect.SliceHeader)(unsafe.Pointer(&res)), sizeUint64) + return res +} + +// Float32s returns a float32 slice stored in this Column. +func (c *Column) Float32s() []float32 { + var res []float32 + c.castSliceHeader((*reflect.SliceHeader)(unsafe.Pointer(&res)), sizeFloat32) + return res +} + +// Float64s returns a float64 slice stored in this Column. +func (c *Column) Float64s() []float64 { + var res []float64 + c.castSliceHeader((*reflect.SliceHeader)(unsafe.Pointer(&res)), sizeFloat64) + return res +} + +// GetInt64 returns the int64 in the specific row. +func (c *Column) GetInt64(rowID int) int64 { + return *(*int64)(unsafe.Pointer(&c.data[rowID*8])) +} + +// GetUint64 returns the uint64 in the specific row. +func (c *Column) GetUint64(rowID int) uint64 { + return *(*uint64)(unsafe.Pointer(&c.data[rowID*8])) +} + +// GetFloat32 returns the float32 in the specific row. +func (c *Column) GetFloat32(rowID int) float32 { + return *(*float32)(unsafe.Pointer(&c.data[rowID*4])) +} + +// GetFloat64 returns the float64 in the specific row. +func (c *Column) GetFloat64(rowID int) float64 { + return *(*float64)(unsafe.Pointer(&c.data[rowID*8])) +} + +// GetString returns the string in the specific row. +func (c *Column) GetString(rowID int) string { + return string(hack.String(c.data[c.offsets[rowID]:c.offsets[rowID+1]])) +} + +// GetBytes returns the byte slice in the specific row. +func (c *Column) GetBytes(rowID int) []byte { + return c.data[c.offsets[rowID]:c.offsets[rowID+1]] +} + +// GetRaw returns the underlying raw bytes in the specific row. +func (c *Column) GetRaw(rowID int) []byte { + var data []byte + if c.isFixed() { + elemLen := len(c.elemBuf) + data = c.data[rowID*elemLen : rowID*elemLen+elemLen] + } else { + data = c.data[c.offsets[rowID]:c.offsets[rowID+1]] + } + return data +} + +// SetRaw sets the raw bytes for the rowIdx-th element. +// NOTE: Two conditions must be satisfied before calling this function: +// 1. The column should be stored with variable-length elements. +// 2. The length of the new element should be exactly the same as the old one. +func (c *Column) SetRaw(rowID int, bs []byte) { + copy(c.data[c.offsets[rowID]:c.offsets[rowID+1]], bs) +} + +// reconstruct reconstructs this Column by removing all filtered rows in it according to sel. +func (c *Column) reconstruct(sel []int) { + if sel == nil { + return + } + if c.isFixed() { + elemLen := len(c.elemBuf) + for dst, src := range sel { + idx := dst >> 3 + pos := uint16(dst & 7) + if c.IsNull(src) { + c.nullBitmap[idx] &= ^byte(1 << pos) + } else { + copy(c.data[dst*elemLen:dst*elemLen+elemLen], c.data[src*elemLen:src*elemLen+elemLen]) + c.nullBitmap[idx] |= byte(1 << pos) + } + } + c.data = c.data[:len(sel)*elemLen] + } else { + tail := 0 + for dst, src := range sel { + idx := dst >> 3 + pos := uint(dst & 7) + if c.IsNull(src) { + c.nullBitmap[idx] &= ^byte(1 << pos) + c.offsets[dst+1] = int64(tail) + } else { + start, end := c.offsets[src], c.offsets[src+1] + copy(c.data[tail:], c.data[start:end]) + tail += int(end - start) + c.offsets[dst+1] = int64(tail) + c.nullBitmap[idx] |= byte(1 << pos) + } + } + c.data = c.data[:tail] + c.offsets = c.offsets[:len(sel)+1] + } + c.length = len(sel) + + // clean nullBitmap + c.nullBitmap = c.nullBitmap[:(len(sel)+7)>>3] + idx := len(sel) >> 3 + if idx < len(c.nullBitmap) { + pos := uint16(len(sel) & 7) + c.nullBitmap[idx] &= byte((1 << pos) - 1) + } +} + +// CopyReconstruct copies this Column to dst and removes unselected rows. +// If dst is nil, it creates a new Column and returns it. +func (c *Column) CopyReconstruct(sel []int, dst *Column) *Column { + if sel == nil { + return c.CopyConstruct(dst) + } + + selLength := len(sel) + if selLength == c.length { + // The variable 'ascend' is used to check if the sel array is in ascending order + ascend := true + for i := 1; i < selLength; i++ { + if sel[i] < sel[i-1] { + ascend = false + break + } + } + if ascend { + return c.CopyConstruct(dst) + } + } + + if dst == nil { + dst = newColumn(c.typeSize(), len(sel)) + } else { + dst.reset() + } + + if c.isFixed() { + elemLen := len(c.elemBuf) + dst.elemBuf = make([]byte, elemLen) + for _, i := range sel { + dst.appendNullBitmap(!c.IsNull(i)) + dst.data = append(dst.data, c.data[i*elemLen:i*elemLen+elemLen]...) + dst.length++ + } + } else { + dst.elemBuf = nil + if len(dst.offsets) == 0 { + dst.offsets = append(dst.offsets, 0) + } + for _, i := range sel { + dst.appendNullBitmap(!c.IsNull(i)) + start, end := c.offsets[i], c.offsets[i+1] + dst.data = append(dst.data, c.data[start:end]...) + dst.offsets = append(dst.offsets, int64(len(dst.data))) + dst.length++ + } + } + return dst +} + +// MergeNulls merges these columns' null bitmaps. +// For a row, if any column of it is null, the result is null. +// It works like: if col1.IsNull || col2.IsNull || col3.IsNull. +// The caller should ensure that all these columns have the same +// length, and data stored in the result column is fixed-length type. +func (c *Column) MergeNulls(cols ...*Column) { + if !c.isFixed() { + panic("result column should be fixed-length type") + } + for _, col := range cols { + if c.length != col.length { + panic("should ensure all columns have the same length") + } + } + for _, col := range cols { + for i := range c.nullBitmap { + // bit 0 is null, 1 is not null, so do AND operations here. + c.nullBitmap[i] &= col.nullBitmap[i] + } + } +} diff --git a/util/chunk/compare.go b/util/chunk/compare.go new file mode 100644 index 0000000..7dbbef7 --- /dev/null +++ b/util/chunk/compare.go @@ -0,0 +1,144 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package chunk + +import ( + "sort" + + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" +) + +// CompareFunc is a function to compare the two values in Row, the two columns must have the same type. +type CompareFunc = func(l Row, lCol int, r Row, rCol int) int + +// GetCompareFunc gets a compare function for the field type. +func GetCompareFunc(tp *types.FieldType) CompareFunc { + switch tp.Tp { + case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong, mysql.TypeYear: + if mysql.HasUnsignedFlag(tp.Flag) { + return cmpUint64 + } + return cmpInt64 + case mysql.TypeFloat: + return cmpFloat32 + case mysql.TypeDouble: + return cmpFloat64 + case mysql.TypeString, mysql.TypeVarString, mysql.TypeVarchar, + mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob: + return cmpString + } + return nil +} + +func cmpNull(lNull, rNull bool) int { + if lNull && rNull { + return 0 + } + if lNull { + return -1 + } + return 1 +} + +func cmpInt64(l Row, lCol int, r Row, rCol int) int { + lNull, rNull := l.IsNull(lCol), r.IsNull(rCol) + if lNull || rNull { + return cmpNull(lNull, rNull) + } + return types.CompareInt64(l.GetInt64(lCol), r.GetInt64(rCol)) +} + +func cmpUint64(l Row, lCol int, r Row, rCol int) int { + lNull, rNull := l.IsNull(lCol), r.IsNull(rCol) + if lNull || rNull { + return cmpNull(lNull, rNull) + } + return types.CompareUint64(l.GetUint64(lCol), r.GetUint64(rCol)) +} + +func cmpString(l Row, lCol int, r Row, rCol int) int { + lNull, rNull := l.IsNull(lCol), r.IsNull(rCol) + if lNull || rNull { + return cmpNull(lNull, rNull) + } + return types.CompareString(l.GetString(lCol), r.GetString(rCol)) +} + +func cmpFloat32(l Row, lCol int, r Row, rCol int) int { + lNull, rNull := l.IsNull(lCol), r.IsNull(rCol) + if lNull || rNull { + return cmpNull(lNull, rNull) + } + return types.CompareFloat64(float64(l.GetFloat32(lCol)), float64(r.GetFloat32(rCol))) +} + +func cmpFloat64(l Row, lCol int, r Row, rCol int) int { + lNull, rNull := l.IsNull(lCol), r.IsNull(rCol) + if lNull || rNull { + return cmpNull(lNull, rNull) + } + return types.CompareFloat64(l.GetFloat64(lCol), r.GetFloat64(rCol)) +} + +// Compare compares the value with ad. +func Compare(row Row, colIdx int, ad *types.Datum) int { + switch ad.Kind() { + case types.KindNull: + if row.IsNull(colIdx) { + return 0 + } + return 1 + case types.KindMinNotNull: + if row.IsNull(colIdx) { + return -1 + } + return 1 + case types.KindMaxValue: + return -1 + case types.KindInt64: + return types.CompareInt64(row.GetInt64(colIdx), ad.GetInt64()) + case types.KindUint64: + return types.CompareUint64(row.GetUint64(colIdx), ad.GetUint64()) + case types.KindFloat32: + return types.CompareFloat64(float64(row.GetFloat32(colIdx)), float64(ad.GetFloat32())) + case types.KindFloat64: + return types.CompareFloat64(row.GetFloat64(colIdx), ad.GetFloat64()) + case types.KindString, types.KindBytes: + return types.CompareString(row.GetString(colIdx), ad.GetString()) + default: + return 0 + } +} + +// LowerBound searches on the non-decreasing Column colIdx, +// returns the smallest index i such that the value at row i is not less than `d`. +func (c *Chunk) LowerBound(colIdx int, d *types.Datum) (index int, match bool) { + index = sort.Search(c.NumRows(), func(i int) bool { + cmp := Compare(c.GetRow(i), colIdx, d) + if cmp == 0 { + match = true + } + return cmp >= 0 + }) + return +} + +// UpperBound searches on the non-decreasing Column colIdx, +// returns the smallest index i such that the value at row i is larger than `d`. +func (c *Chunk) UpperBound(colIdx int, d *types.Datum) int { + return sort.Search(c.NumRows(), func(i int) bool { + return Compare(c.GetRow(i), colIdx, d) > 0 + }) +} diff --git a/util/chunk/iterator.go b/util/chunk/iterator.go new file mode 100644 index 0000000..c6a3bc5 --- /dev/null +++ b/util/chunk/iterator.go @@ -0,0 +1,286 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package chunk + +var ( + _ Iterator = (*Iterator4Chunk)(nil) + _ Iterator = (*iterator4RowPtr)(nil) + _ Iterator = (*iterator4List)(nil) + _ Iterator = (*iterator4Slice)(nil) +) + +// Iterator is used to iterate a number of rows. +// +// for row := it.Begin(); row != it.End(); row = it.Next() { +// ... +// } +type Iterator interface { + // Begin resets the cursor of the iterator and returns the first Row. + Begin() Row + + // Next returns the next Row. + Next() Row + + // End returns the invalid end Row. + End() Row + + // Len returns the length. + Len() int + + // Current returns the current Row. + Current() Row + + // ReachEnd reaches the end of iterator. + ReachEnd() +} + +// NewIterator4Slice returns a Iterator for Row slice. +func NewIterator4Slice(rows []Row) Iterator { + return &iterator4Slice{rows: rows} +} + +type iterator4Slice struct { + rows []Row + cursor int +} + +// Begin implements the Iterator interface. +func (it *iterator4Slice) Begin() Row { + if it.Len() == 0 { + return it.End() + } + it.cursor = 1 + return it.rows[0] +} + +// Next implements the Iterator interface. +func (it *iterator4Slice) Next() Row { + if len := it.Len(); it.cursor >= len { + it.cursor = len + 1 + return it.End() + } + row := it.rows[it.cursor] + it.cursor++ + return row +} + +// Current implements the Iterator interface. +func (it *iterator4Slice) Current() Row { + if it.cursor == 0 || it.cursor > it.Len() { + return it.End() + } + return it.rows[it.cursor-1] +} + +// End implements the Iterator interface. +func (it *iterator4Slice) End() Row { + return Row{} +} + +// ReachEnd implements the Iterator interface. +func (it *iterator4Slice) ReachEnd() { + it.cursor = it.Len() + 1 +} + +// Len implements the Iterator interface. +func (it *iterator4Slice) Len() int { + return len(it.rows) +} + +// NewIterator4Chunk returns a iterator for Chunk. +func NewIterator4Chunk(chk *Chunk) *Iterator4Chunk { + return &Iterator4Chunk{chk: chk} +} + +// Iterator4Chunk is used to iterate rows inside a chunk. +type Iterator4Chunk struct { + chk *Chunk + cursor int32 + numRows int32 +} + +// Begin implements the Iterator interface. +func (it *Iterator4Chunk) Begin() Row { + it.numRows = int32(it.chk.NumRows()) + if it.numRows == 0 { + return it.End() + } + it.cursor = 1 + return it.chk.GetRow(0) +} + +// Next implements the Iterator interface. +func (it *Iterator4Chunk) Next() Row { + if it.cursor >= it.numRows { + it.cursor = it.numRows + 1 + return it.End() + } + row := it.chk.GetRow(int(it.cursor)) + it.cursor++ + return row +} + +// Current implements the Iterator interface. +func (it *Iterator4Chunk) Current() Row { + if it.cursor == 0 || int(it.cursor) > it.Len() { + return it.End() + } + return it.chk.GetRow(int(it.cursor) - 1) +} + +// End implements the Iterator interface. +func (it *Iterator4Chunk) End() Row { + return Row{} +} + +// ReachEnd implements the Iterator interface. +func (it *Iterator4Chunk) ReachEnd() { + it.cursor = int32(it.Len() + 1) +} + +// Len implements the Iterator interface +func (it *Iterator4Chunk) Len() int { + return it.chk.NumRows() +} + +// GetChunk returns the chunk stored in the Iterator4Chunk +func (it *Iterator4Chunk) GetChunk() *Chunk { + return it.chk +} + +// NewIterator4List returns a Iterator for List. +func NewIterator4List(li *List) Iterator { + return &iterator4List{li: li} +} + +type iterator4List struct { + li *List + chkCursor int + rowCursor int +} + +// Begin implements the Iterator interface. +func (it *iterator4List) Begin() Row { + if it.li.NumChunks() == 0 { + return it.End() + } + chk := it.li.GetChunk(0) + row := chk.GetRow(0) + if chk.NumRows() == 1 { + it.chkCursor = 1 + it.rowCursor = 0 + } else { + it.chkCursor = 0 + it.rowCursor = 1 + } + return row +} + +// Next implements the Iterator interface. +func (it *iterator4List) Next() Row { + if it.chkCursor >= it.li.NumChunks() { + it.chkCursor = it.li.NumChunks() + 1 + return it.End() + } + chk := it.li.GetChunk(it.chkCursor) + row := chk.GetRow(it.rowCursor) + it.rowCursor++ + if it.rowCursor == chk.NumRows() { + it.rowCursor = 0 + it.chkCursor++ + } + return row +} + +// Current implements the Iterator interface. +func (it *iterator4List) Current() Row { + if (it.chkCursor == 0 && it.rowCursor == 0) || it.chkCursor > it.li.NumChunks() { + return it.End() + } + if it.rowCursor == 0 { + curChk := it.li.GetChunk(it.chkCursor - 1) + return curChk.GetRow(curChk.NumRows() - 1) + } + curChk := it.li.GetChunk(it.chkCursor) + return curChk.GetRow(it.rowCursor - 1) +} + +// End implements the Iterator interface. +func (it *iterator4List) End() Row { + return Row{} +} + +// ReachEnd implements the Iterator interface. +func (it *iterator4List) ReachEnd() { + it.chkCursor = it.li.NumChunks() + 1 +} + +// Len implements the Iterator interface. +func (it *iterator4List) Len() int { + return it.li.Len() +} + +// NewIterator4RowPtr returns a Iterator for RowPtrs. +func NewIterator4RowPtr(li *List, ptrs []RowPtr) Iterator { + return &iterator4RowPtr{li: li, ptrs: ptrs} +} + +type iterator4RowPtr struct { + li *List + ptrs []RowPtr + cursor int +} + +// Begin implements the Iterator interface. +func (it *iterator4RowPtr) Begin() Row { + if it.Len() == 0 { + return it.End() + } + it.cursor = 1 + return it.li.GetRow(it.ptrs[0]) +} + +// Next implements the Iterator interface. +func (it *iterator4RowPtr) Next() Row { + if len := it.Len(); it.cursor >= len { + it.cursor = len + 1 + return it.End() + } + row := it.li.GetRow(it.ptrs[it.cursor]) + it.cursor++ + return row +} + +// Current implements the Iterator interface. +func (it *iterator4RowPtr) Current() Row { + if it.cursor == 0 || it.cursor > it.Len() { + return it.End() + } + return it.li.GetRow(it.ptrs[it.cursor-1]) +} + +// End implements the Iterator interface. +func (it *iterator4RowPtr) End() Row { + return Row{} +} + +// ReachEnd implements the Iterator interface. +func (it *iterator4RowPtr) ReachEnd() { + it.cursor = it.Len() + 1 +} + +// Len implements the Iterator interface. +func (it *iterator4RowPtr) Len() int { + return len(it.ptrs) +} diff --git a/util/chunk/iterator_test.go b/util/chunk/iterator_test.go new file mode 100644 index 0000000..c55b43f --- /dev/null +++ b/util/chunk/iterator_test.go @@ -0,0 +1,144 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package chunk + +import ( + "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" + "testing" +) + +func TestT(t *testing.T) { + check.TestingT(t) +} + +var _ = check.Suite(&testChunkSuite{}) + +type testChunkSuite struct{} + +func (s *testChunkSuite) TestIteratorOnSel(c *check.C) { + fields := []*types.FieldType{types.NewFieldType(mysql.TypeLonglong)} + chk := New(fields, 32, 1024) + sel := make([]int, 0, 1024) + for i := 0; i < 1024; i++ { + chk.AppendInt64(0, int64(i)) + if i%2 == 0 { + sel = append(sel, i) + } + } + chk.SetSel(sel) + it := NewIterator4Chunk(chk) + cnt := 0 + for row := it.Begin(); row != it.End(); row = it.Next() { + c.Assert(row.GetInt64(0)%2, check.Equals, int64(0)) + cnt++ + } + c.Assert(cnt, check.Equals, 1024/2) +} + +func (s *testChunkSuite) TestIterator(c *check.C) { + fields := []*types.FieldType{types.NewFieldType(mysql.TypeLonglong)} + chk := New(fields, 32, 1024) + n := 10 + var expected []int64 + for i := 0; i < n; i++ { + chk.AppendInt64(0, int64(i)) + expected = append(expected, int64(i)) + } + var rows []Row + li := NewList(fields, 1, 2) + li2 := NewList(fields, 8, 16) + var ptrs []RowPtr + var ptrs2 []RowPtr + for i := 0; i < n; i++ { + rows = append(rows, chk.GetRow(i)) + ptr := li.AppendRow(chk.GetRow(i)) + ptrs = append(ptrs, ptr) + ptr2 := li2.AppendRow(chk.GetRow(i)) + ptrs2 = append(ptrs2, ptr2) + } + + it := NewIterator4Slice(rows) + checkIterator(c, it, expected) + it.Begin() + for i := 0; i < 5; i++ { + c.Assert(it.Current(), check.Equals, rows[i]) + it.Next() + } + it.ReachEnd() + c.Assert(it.Current(), check.Equals, it.End()) + c.Assert(it.Begin(), check.Equals, rows[0]) + + it = NewIterator4Chunk(chk) + checkIterator(c, it, expected) + it.Begin() + for i := 0; i < 5; i++ { + c.Assert(it.Current(), check.Equals, chk.GetRow(i)) + it.Next() + } + it.ReachEnd() + c.Assert(it.Current(), check.Equals, it.End()) + c.Assert(it.Begin(), check.Equals, chk.GetRow(0)) + + it = NewIterator4List(li) + checkIterator(c, it, expected) + it.Begin() + for i := 0; i < 5; i++ { + c.Assert(it.Current(), check.Equals, li.GetRow(ptrs[i])) + it.Next() + } + it.ReachEnd() + c.Assert(it.Current(), check.Equals, it.End()) + c.Assert(it.Begin(), check.Equals, li.GetRow(ptrs[0])) + + it = NewIterator4RowPtr(li, ptrs) + checkIterator(c, it, expected) + it.Begin() + for i := 0; i < 5; i++ { + c.Assert(it.Current(), check.Equals, li.GetRow(ptrs[i])) + it.Next() + } + it.ReachEnd() + c.Assert(it.Current(), check.Equals, it.End()) + c.Assert(it.Begin(), check.Equals, li.GetRow(ptrs[0])) + + it = NewIterator4RowPtr(li2, ptrs2) + checkIterator(c, it, expected) + it.Begin() + for i := 0; i < 5; i++ { + c.Assert(it.Current(), check.Equals, li2.GetRow(ptrs2[i])) + it.Next() + } + it.ReachEnd() + c.Assert(it.Current(), check.Equals, it.End()) + c.Assert(it.Begin(), check.Equals, li2.GetRow(ptrs2[0])) + + it = NewIterator4Slice(nil) + c.Assert(it.Begin(), check.Equals, it.End()) + it = NewIterator4Chunk(new(Chunk)) + c.Assert(it.Begin(), check.Equals, it.End()) + it = NewIterator4List(new(List)) + c.Assert(it.Begin(), check.Equals, it.End()) + it = NewIterator4RowPtr(li, nil) + c.Assert(it.Begin(), check.Equals, it.End()) +} + +func checkIterator(c *check.C, it Iterator, expected []int64) { + var got []int64 + for row := it.Begin(); row != it.End(); row = it.Next() { + got = append(got, row.GetInt64(0)) + } + c.Assert(got, check.DeepEquals, expected) +} diff --git a/util/chunk/list.go b/util/chunk/list.go new file mode 100644 index 0000000..d3e4a59 --- /dev/null +++ b/util/chunk/list.go @@ -0,0 +1,173 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package chunk + +import ( + "github.com/pingcap/errors" + "github.com/pingcap/tidb/types" +) + +// List holds a slice of chunks, use to append rows with max chunk size properly handled. +type List struct { + fieldTypes []*types.FieldType + initChunkSize int + maxChunkSize int + length int + chunks []*Chunk + freelist []*Chunk + + consumedIdx int // chunk index in "chunks", has been consumed. +} + +// RowPtr is used to get a row from a list. +// It is only valid for the list that returns it. +type RowPtr struct { + ChkIdx uint32 + RowIdx uint32 +} + +// NewList creates a new List with field types, init chunk size and max chunk size. +func NewList(fieldTypes []*types.FieldType, initChunkSize, maxChunkSize int) *List { + l := &List{ + fieldTypes: fieldTypes, + initChunkSize: initChunkSize, + maxChunkSize: maxChunkSize, + consumedIdx: -1, + } + return l +} + +// Len returns the length of the List. +func (l *List) Len() int { + return l.length +} + +// NumChunks returns the number of chunks in the List. +func (l *List) NumChunks() int { + return len(l.chunks) +} + +// GetChunk gets the Chunk by ChkIdx. +func (l *List) GetChunk(chkIdx int) *Chunk { + return l.chunks[chkIdx] +} + +// AppendRow appends a row to the List, the row is copied to the List. +func (l *List) AppendRow(row Row) RowPtr { + chkIdx := len(l.chunks) - 1 + if chkIdx == -1 || l.chunks[chkIdx].NumRows() >= l.chunks[chkIdx].Capacity() || chkIdx == l.consumedIdx { + newChk := l.allocChunk() + l.chunks = append(l.chunks, newChk) + if chkIdx != l.consumedIdx { + l.consumedIdx = chkIdx + } + chkIdx++ + } + chk := l.chunks[chkIdx] + rowIdx := chk.NumRows() + chk.AppendRow(row) + l.length++ + return RowPtr{ChkIdx: uint32(chkIdx), RowIdx: uint32(rowIdx)} +} + +// Add adds a chunk to the List, the chunk may be modified later by the list. +// Caller must make sure the input chk is not empty and not used any more and has the same field types. +func (l *List) Add(chk *Chunk) { + // FixMe: we should avoid add a Chunk that chk.NumRows() > list.maxChunkSize. + if chk.NumRows() == 0 { + // TODO: return error here. + panic("chunk appended to List should have at least 1 row") + } + if chkIdx := len(l.chunks) - 1; l.consumedIdx != chkIdx { + l.consumedIdx = chkIdx + } + l.consumedIdx++ + l.chunks = append(l.chunks, chk) + l.length += chk.NumRows() +} + +func (l *List) allocChunk() (chk *Chunk) { + if len(l.freelist) > 0 { + lastIdx := len(l.freelist) - 1 + chk = l.freelist[lastIdx] + l.freelist = l.freelist[:lastIdx] + chk.Reset() + return + } + if len(l.chunks) > 0 { + return Renew(l.chunks[len(l.chunks)-1], l.maxChunkSize) + } + return New(l.fieldTypes, l.initChunkSize, l.maxChunkSize) +} + +// GetRow gets a Row from the list by RowPtr. +func (l *List) GetRow(ptr RowPtr) Row { + chk := l.chunks[ptr.ChkIdx] + return chk.GetRow(int(ptr.RowIdx)) +} + +// Reset resets the List. +func (l *List) Reset() { + l.freelist = append(l.freelist, l.chunks...) + l.chunks = l.chunks[:0] + l.length = 0 + l.consumedIdx = -1 +} + +// preAlloc4Row pre-allocates the storage memory for a Row. +// NOTE: only used in test +// 1. The List must be empty or holds no useful data. +// 2. The schema of the Row must be the same with the List. +// 3. This API is paired with the `Insert()` function, which inserts all the +// rows data into the List after the pre-allocation. +func (l *List) preAlloc4Row(row Row) (ptr RowPtr) { + chkIdx := len(l.chunks) - 1 + if chkIdx == -1 || l.chunks[chkIdx].NumRows() >= l.chunks[chkIdx].Capacity() { + newChk := l.allocChunk() + l.chunks = append(l.chunks, newChk) + if chkIdx != l.consumedIdx { + l.consumedIdx = chkIdx + } + chkIdx++ + } + chk := l.chunks[chkIdx] + rowIdx := chk.preAlloc(row) + l.length++ + return RowPtr{ChkIdx: uint32(chkIdx), RowIdx: uint32(rowIdx)} +} + +// Insert inserts `row` on the position specified by `ptr`. +// Note: Insert will cover the origin data, it should be called after +// PreAlloc. +func (l *List) Insert(ptr RowPtr, row Row) { + l.chunks[ptr.ChkIdx].insert(int(ptr.RowIdx), row) +} + +// ListWalkFunc is used to walk the list. +// If error is returned, it will stop walking. +type ListWalkFunc = func(row Row) error + +// Walk iterate the list and call walkFunc for each row. +func (l *List) Walk(walkFunc ListWalkFunc) error { + for i := 0; i < len(l.chunks); i++ { + chk := l.chunks[i] + for j := 0; j < chk.NumRows(); j++ { + err := walkFunc(chk.GetRow(j)) + if err != nil { + return errors.Trace(err) + } + } + } + return nil +} diff --git a/util/chunk/list_test.go b/util/chunk/list_test.go new file mode 100644 index 0000000..e6c795d --- /dev/null +++ b/util/chunk/list_test.go @@ -0,0 +1,151 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package chunk + +import ( + "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" + "strconv" + "strings" + "testing" +) + +func (s *testChunkSuite) TestList(c *check.C) { + fields := []*types.FieldType{ + types.NewFieldType(mysql.TypeLonglong), + } + l := NewList(fields, 2, 2) + srcChunk := NewChunkWithCapacity(fields, 32) + srcChunk.AppendInt64(0, 1) + srcRow := srcChunk.GetRow(0) + + // Test basic append. + for i := 0; i < 5; i++ { + l.AppendRow(srcRow) + } + c.Assert(l.NumChunks(), check.Equals, 3) + c.Assert(l.Len(), check.Equals, 5) + c.Assert(len(l.freelist), check.Equals, 0) + + // Test chunk reuse. + l.Reset() + c.Assert(len(l.freelist), check.Equals, 3) + + for i := 0; i < 5; i++ { + l.AppendRow(srcRow) + } + c.Assert(len(l.freelist), check.Equals, 0) + + // Test add chunk then append row. + l.Reset() + nChunk := NewChunkWithCapacity(fields, 32) + nChunk.AppendNull(0) + l.Add(nChunk) + ptr := l.AppendRow(srcRow) + c.Assert(l.NumChunks(), check.Equals, 2) + c.Assert(ptr.ChkIdx, check.Equals, uint32(1)) + c.Assert(ptr.RowIdx, check.Equals, uint32(0)) + row := l.GetRow(ptr) + c.Assert(row.GetInt64(0), check.Equals, int64(1)) + + // Test iteration. + l.Reset() + for i := 0; i < 5; i++ { + tmp := NewChunkWithCapacity(fields, 32) + tmp.AppendInt64(0, int64(i)) + l.AppendRow(tmp.GetRow(0)) + } + expected := []int64{0, 1, 2, 3, 4} + var results []int64 + err := l.Walk(func(r Row) error { + results = append(results, r.GetInt64(0)) + return nil + }) + c.Assert(err, check.IsNil) + c.Assert(results, check.DeepEquals, expected) +} + +func (s *testChunkSuite) TestListPrePreAlloc4RowAndInsert(c *check.C) { + fieldTypes := make([]*types.FieldType, 0, 3) + fieldTypes = append(fieldTypes, &types.FieldType{Tp: mysql.TypeFloat}) + fieldTypes = append(fieldTypes, &types.FieldType{Tp: mysql.TypeLonglong}) + fieldTypes = append(fieldTypes, &types.FieldType{Tp: mysql.TypeVarchar}) + + srcChk := NewChunkWithCapacity(fieldTypes, 10) + for i := int64(0); i < 10; i++ { + srcChk.AppendFloat32(0, float32(i)) + srcChk.AppendInt64(1, i) + srcChk.AppendString(2, strings.Repeat(strconv.FormatInt(i, 10), int(i))) + } + + srcList := NewList(fieldTypes, 3, 3) + destList := NewList(fieldTypes, 5, 5) + destRowPtr := make([]RowPtr, srcChk.NumRows()) + for i := 0; i < srcChk.NumRows(); i++ { + srcList.AppendRow(srcChk.GetRow(i)) + destRowPtr[i] = destList.preAlloc4Row(srcChk.GetRow(i)) + } + + c.Assert(srcList.NumChunks(), check.Equals, 4) + c.Assert(destList.NumChunks(), check.Equals, 2) + + iter4Src := NewIterator4List(srcList) + for row, i := iter4Src.Begin(), 0; row != iter4Src.End(); row, i = iter4Src.Next(), i+1 { + destList.Insert(destRowPtr[i], row) + } + + iter4Dest := NewIterator4List(destList) + srcRow, destRow := iter4Src.Begin(), iter4Dest.Begin() + for ; srcRow != iter4Src.End(); srcRow, destRow = iter4Src.Next(), iter4Dest.Next() { + c.Assert(srcRow.GetFloat32(0), check.Equals, destRow.GetFloat32(0)) + c.Assert(srcRow.GetInt64(1), check.Equals, destRow.GetInt64(1)) + c.Assert(srcRow.GetString(2), check.Equals, destRow.GetString(2)) + } +} + +func BenchmarkPreAllocList(b *testing.B) { + fieldTypes := make([]*types.FieldType, 0, 1) + fieldTypes = append(fieldTypes, &types.FieldType{Tp: mysql.TypeLonglong}) + chk := NewChunkWithCapacity(fieldTypes, 1) + chk.AppendInt64(0, 1) + row := chk.GetRow(0) + + b.ResetTimer() + list := NewList(fieldTypes, 1024, 1024) + for i := 0; i < b.N; i++ { + list.Reset() + // 32768 indicates the number of int64 rows to fill 256KB L2 cache. + for j := 0; j < 32768; j++ { + list.preAlloc4Row(row) + } + } +} + +func BenchmarkPreAllocChunk(b *testing.B) { + fieldTypes := make([]*types.FieldType, 0, 1) + fieldTypes = append(fieldTypes, &types.FieldType{Tp: mysql.TypeLonglong}) + chk := NewChunkWithCapacity(fieldTypes, 1) + chk.AppendInt64(0, 1) + row := chk.GetRow(0) + + b.ResetTimer() + finalChk := New(fieldTypes, 33000, 1024) + for i := 0; i < b.N; i++ { + finalChk.Reset() + for j := 0; j < 32768; j++ { + finalChk.preAlloc(row) + } + } +} diff --git a/util/chunk/mutrow.go b/util/chunk/mutrow.go new file mode 100644 index 0000000..df91b73 --- /dev/null +++ b/util/chunk/mutrow.go @@ -0,0 +1,265 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package chunk + +import ( + "encoding/binary" + "math" + "unsafe" + + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/hack" +) + +// MutRow represents a mutable Row. +// The underlying columns only contains one row and not exposed to the user. +type MutRow Row + +// ToRow converts the MutRow to Row, so it can be used to read data. +func (mr MutRow) ToRow() Row { + return Row(mr) +} + +// Len returns the number of columns. +func (mr MutRow) Len() int { + return len(mr.c.columns) +} + +// MutRowFromValues creates a MutRow from a interface slice. +func MutRowFromValues(vals ...interface{}) MutRow { + c := &Chunk{columns: make([]*Column, 0, len(vals))} + for _, val := range vals { + col := makeMutRowColumn(val) + c.columns = append(c.columns, col) + } + return MutRow{c: c} +} + +// MutRowFromDatums creates a MutRow from a datum slice. +func MutRowFromDatums(datums []types.Datum) MutRow { + c := &Chunk{columns: make([]*Column, 0, len(datums))} + for _, d := range datums { + col := makeMutRowColumn(d.GetValue()) + c.columns = append(c.columns, col) + } + return MutRow{c: c, idx: 0} +} + +// MutRowFromTypes creates a MutRow from a FieldType slice, each Column is initialized to zero value. +func MutRowFromTypes(types []*types.FieldType) MutRow { + c := &Chunk{columns: make([]*Column, 0, len(types))} + for _, tp := range types { + col := makeMutRowColumn(zeroValForType(tp)) + c.columns = append(c.columns, col) + } + return MutRow{c: c, idx: 0} +} + +func zeroValForType(tp *types.FieldType) interface{} { + switch tp.Tp { + case mysql.TypeFloat: + return float32(0) + case mysql.TypeDouble: + return float64(0) + case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong, mysql.TypeYear: + if mysql.HasUnsignedFlag(tp.Flag) { + return uint64(0) + } + return int64(0) + case mysql.TypeString, mysql.TypeVarString, mysql.TypeVarchar: + return "" + case mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob: + return []byte{} + default: + return nil + } +} + +func makeMutRowColumn(in interface{}) *Column { + switch x := in.(type) { + case nil: + col := makeMutRowUint64Column(uint64(0)) + col.nullBitmap[0] = 0 + return col + case int: + return makeMutRowUint64Column(uint64(x)) + case int64: + return makeMutRowUint64Column(uint64(x)) + case uint64: + return makeMutRowUint64Column(x) + case float64: + return makeMutRowUint64Column(math.Float64bits(x)) + case float32: + col := newMutRowFixedLenColumn(4) + *(*uint32)(unsafe.Pointer(&col.data[0])) = math.Float32bits(x) + return col + case string: + return makeMutRowBytesColumn(hack.Slice(x)) + case []byte: + return makeMutRowBytesColumn(x) + default: + return nil + } +} + +func newMutRowFixedLenColumn(elemSize int) *Column { + buf := make([]byte, elemSize+1) + col := &Column{ + length: 1, + elemBuf: buf[:elemSize], + data: buf[:elemSize], + nullBitmap: buf[elemSize:], + } + col.nullBitmap[0] = 1 + return col +} + +func newMutRowVarLenColumn(valSize int) *Column { + buf := make([]byte, valSize+1) + col := &Column{ + length: 1, + offsets: []int64{0, int64(valSize)}, + data: buf[:valSize], + nullBitmap: buf[valSize:], + } + col.nullBitmap[0] = 1 + return col +} + +func makeMutRowUint64Column(val uint64) *Column { + col := newMutRowFixedLenColumn(8) + *(*uint64)(unsafe.Pointer(&col.data[0])) = val + return col +} + +func makeMutRowBytesColumn(bin []byte) *Column { + col := newMutRowVarLenColumn(len(bin)) + copy(col.data, bin) + col.nullBitmap[0] = 1 + return col +} + +// SetRow sets the MutRow with Row. +func (mr MutRow) SetRow(row Row) { + for colIdx, rCol := range row.c.columns { + mrCol := mr.c.columns[colIdx] + if rCol.IsNull(row.idx) { + mrCol.nullBitmap[0] = 0 + continue + } + elemLen := len(rCol.elemBuf) + if elemLen > 0 { + copy(mrCol.data, rCol.data[row.idx*elemLen:(row.idx+1)*elemLen]) + } else { + setMutRowBytes(mrCol, rCol.data[rCol.offsets[row.idx]:rCol.offsets[row.idx+1]]) + } + mrCol.nullBitmap[0] = 1 + } +} + +// SetValues sets the MutRow with values. +func (mr MutRow) SetValues(vals ...interface{}) { + for i, v := range vals { + mr.SetValue(i, v) + } +} + +// SetValue sets the MutRow with colIdx and value. +func (mr MutRow) SetValue(colIdx int, val interface{}) { + col := mr.c.columns[colIdx] + if val == nil { + col.nullBitmap[0] = 0 + return + } + switch x := val.(type) { + case int: + binary.LittleEndian.PutUint64(col.data, uint64(x)) + case int64: + binary.LittleEndian.PutUint64(col.data, uint64(x)) + case uint64: + binary.LittleEndian.PutUint64(col.data, x) + case float64: + binary.LittleEndian.PutUint64(col.data, math.Float64bits(x)) + case float32: + binary.LittleEndian.PutUint32(col.data, math.Float32bits(x)) + case string: + setMutRowBytes(col, hack.Slice(x)) + case []byte: + setMutRowBytes(col, x) + } + col.nullBitmap[0] = 1 +} + +// SetDatums sets the MutRow with datum slice. +func (mr MutRow) SetDatums(datums ...types.Datum) { + for i, d := range datums { + mr.SetDatum(i, d) + } +} + +// SetDatum sets the MutRow with colIdx and datum. +func (mr MutRow) SetDatum(colIdx int, d types.Datum) { + col := mr.c.columns[colIdx] + if d.IsNull() { + col.nullBitmap[0] = 0 + return + } + switch d.Kind() { + case types.KindInt64, types.KindUint64, types.KindFloat64: + binary.LittleEndian.PutUint64(mr.c.columns[colIdx].data, d.GetUint64()) + case types.KindFloat32: + binary.LittleEndian.PutUint32(mr.c.columns[colIdx].data, math.Float32bits(d.GetFloat32())) + case types.KindString, types.KindBytes: + setMutRowBytes(col, d.GetBytes()) + default: + mr.c.columns[colIdx] = makeMutRowColumn(d.GetValue()) + } + col.nullBitmap[0] = 1 +} + +func setMutRowBytes(col *Column, bin []byte) { + if len(col.data) >= len(bin) { + col.data = col.data[:len(bin)] + } else { + buf := make([]byte, len(bin)+1) + col.data = buf[:len(bin)] + col.nullBitmap = buf[len(bin):] + } + copy(col.data, bin) + col.offsets[1] = int64(len(bin)) +} + +// ShallowCopyPartialRow shallow copies the data of `row` to MutRow. +func (mr MutRow) ShallowCopyPartialRow(colIdx int, row Row) { + for i, srcCol := range row.c.columns { + dstCol := mr.c.columns[colIdx+i] + if !srcCol.IsNull(row.idx) { + // MutRow only contains one row, so we can directly set the whole byte. + dstCol.nullBitmap[0] = 1 + } else { + dstCol.nullBitmap[0] = 0 + } + + if srcCol.isFixed() { + elemLen := len(srcCol.elemBuf) + offset := row.idx * elemLen + dstCol.data = srcCol.data[offset : offset+elemLen] + } else { + start, end := srcCol.offsets[row.idx], srcCol.offsets[row.idx+1] + dstCol.data = srcCol.data[start:end] + dstCol.offsets[1] = int64(len(dstCol.data)) + } + } +} diff --git a/util/chunk/mutrow_test.go b/util/chunk/mutrow_test.go new file mode 100644 index 0000000..61e7414 --- /dev/null +++ b/util/chunk/mutrow_test.go @@ -0,0 +1,195 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package chunk + +import ( + "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "testing" +) + +var allTypes = []*types.FieldType{ + types.NewFieldType(mysql.TypeTiny), + types.NewFieldType(mysql.TypeShort), + types.NewFieldType(mysql.TypeInt24), + types.NewFieldType(mysql.TypeLong), + types.NewFieldType(mysql.TypeLonglong), + { + Tp: mysql.TypeLonglong, + Flen: types.UnspecifiedLength, + Decimal: types.UnspecifiedLength, + Flag: mysql.UnsignedFlag, + }, + types.NewFieldType(mysql.TypeYear), + types.NewFieldType(mysql.TypeFloat), + types.NewFieldType(mysql.TypeDouble), + types.NewFieldType(mysql.TypeString), + types.NewFieldType(mysql.TypeVarString), + types.NewFieldType(mysql.TypeVarchar), + types.NewFieldType(mysql.TypeBlob), + types.NewFieldType(mysql.TypeTinyBlob), + types.NewFieldType(mysql.TypeMediumBlob), + types.NewFieldType(mysql.TypeLongBlob), + types.NewFieldType(mysql.TypeDate), + types.NewFieldType(mysql.TypeDatetime), + types.NewFieldType(mysql.TypeTimestamp), + types.NewFieldType(mysql.TypeDuration), + types.NewFieldType(mysql.TypeNewDecimal), + { + Tp: mysql.TypeSet, + Flen: types.UnspecifiedLength, + Decimal: types.UnspecifiedLength, + Flag: mysql.UnsignedFlag, + Elems: []string{"a", "b"}, + }, + { + Tp: mysql.TypeEnum, + Flen: types.UnspecifiedLength, + Decimal: types.UnspecifiedLength, + Flag: mysql.UnsignedFlag, + Elems: []string{"a", "b"}, + }, + types.NewFieldType(mysql.TypeBit), +} + +func (s *testChunkSuite) TestMutRow(c *check.C) { + mutRow := MutRowFromTypes(allTypes) + row := mutRow.ToRow() + sc := new(stmtctx.StatementContext) + for i := 0; i < row.Len(); i++ { + val := zeroValForType(allTypes[i]) + d := row.GetDatum(i, allTypes[i]) + d2 := types.NewDatum(val) + cmp, err := d.CompareDatum(sc, &d2) + c.Assert(err, check.IsNil) + c.Assert(cmp, check.Equals, 0) + } + + mutRow = MutRowFromValues("abc", 123) + c.Assert(row.IsNull(0), check.IsFalse) + c.Assert(mutRow.ToRow().GetString(0), check.Equals, "abc") + c.Assert(row.IsNull(1), check.IsFalse) + c.Assert(mutRow.ToRow().GetInt64(1), check.Equals, int64(123)) + mutRow.SetValues("abcd", 456) + row = mutRow.ToRow() + c.Assert(row.GetString(0), check.Equals, "abcd") + c.Assert(row.IsNull(0), check.IsFalse) + c.Assert(row.GetInt64(1), check.Equals, int64(456)) + c.Assert(row.IsNull(1), check.IsFalse) + mutRow.SetDatums(types.NewStringDatum("defgh"), types.NewIntDatum(33)) + c.Assert(row.IsNull(0), check.IsFalse) + c.Assert(row.GetString(0), check.Equals, "defgh") + c.Assert(row.IsNull(1), check.IsFalse) + c.Assert(row.GetInt64(1), check.Equals, int64(33)) + + mutRow.SetRow(MutRowFromValues("foobar", nil).ToRow()) + row = mutRow.ToRow() + c.Assert(row.IsNull(0), check.IsFalse) + c.Assert(row.IsNull(1), check.IsTrue) + + nRow := MutRowFromValues(nil, 111).ToRow() + c.Assert(nRow.IsNull(0), check.IsTrue) + c.Assert(nRow.IsNull(1), check.IsFalse) + mutRow.SetRow(nRow) + row = mutRow.ToRow() + c.Assert(row.IsNull(0), check.IsTrue) + c.Assert(row.IsNull(1), check.IsFalse) +} + +func BenchmarkMutRowSetDatums(b *testing.B) { + b.ReportAllocs() + mutRow := MutRowFromValues(1, "abcd") + datums := []types.Datum{types.NewDatum(1), types.NewDatum("abcd")} + for i := 0; i < b.N; i++ { + mutRow.SetDatums(datums...) + } +} + +func BenchmarkMutRowSetValues(b *testing.B) { + b.ReportAllocs() + mutRow := MutRowFromValues(1, "abcd") + for i := 0; i < b.N; i++ { + mutRow.SetValues(1, "abcd") + } +} + +func BenchmarkMutRowFromTypes(b *testing.B) { + b.ReportAllocs() + tps := []*types.FieldType{ + types.NewFieldType(mysql.TypeLonglong), + types.NewFieldType(mysql.TypeVarchar), + } + for i := 0; i < b.N; i++ { + MutRowFromTypes(tps) + } +} + +func BenchmarkMutRowFromDatums(b *testing.B) { + b.ReportAllocs() + datums := []types.Datum{types.NewDatum(1), types.NewDatum("abc")} + for i := 0; i < b.N; i++ { + MutRowFromDatums(datums) + } +} + +func BenchmarkMutRowFromValues(b *testing.B) { + b.ReportAllocs() + values := []interface{}{1, "abc"} + for i := 0; i < b.N; i++ { + MutRowFromValues(values) + } +} + +func (s *testChunkSuite) TestMutRowShallowCopyPartialRow(c *check.C) { + colTypes := make([]*types.FieldType, 0, 3) + colTypes = append(colTypes, &types.FieldType{Tp: mysql.TypeVarString}) + colTypes = append(colTypes, &types.FieldType{Tp: mysql.TypeLonglong}) + + mutRow := MutRowFromTypes(colTypes) + row := MutRowFromValues("abc", 123).ToRow() + mutRow.ShallowCopyPartialRow(0, row) + c.Assert(row.GetString(0), check.Equals, mutRow.ToRow().GetString(0)) + c.Assert(row.GetInt64(1), check.Equals, mutRow.ToRow().GetInt64(1)) + + row.c.Reset() + d := types.NewStringDatum("dfg") + row.c.AppendDatum(0, &d) + d = types.NewIntDatum(567) + row.c.AppendDatum(1, &d) + + c.Assert(row.GetString(0), check.Equals, mutRow.ToRow().GetString(0)) + c.Assert(row.GetInt64(1), check.Equals, mutRow.ToRow().GetInt64(1)) +} + +var rowsNum = 1024 + +func BenchmarkMutRowShallowCopyPartialRow(b *testing.B) { + b.ReportAllocs() + colTypes := make([]*types.FieldType, 0, 8) + colTypes = append(colTypes, &types.FieldType{Tp: mysql.TypeVarString}) + colTypes = append(colTypes, &types.FieldType{Tp: mysql.TypeVarString}) + colTypes = append(colTypes, &types.FieldType{Tp: mysql.TypeLonglong}) + colTypes = append(colTypes, &types.FieldType{Tp: mysql.TypeLonglong}) + + mutRow := MutRowFromTypes(colTypes) + row := MutRowFromValues("abc", "abcdefg", 123, 456).ToRow() + b.ResetTimer() + for i := 0; i < b.N; i++ { + for j := 0; j < rowsNum; j++ { + mutRow.ShallowCopyPartialRow(0, row) + } + } +} diff --git a/util/chunk/row.go b/util/chunk/row.go new file mode 100644 index 0000000..490beb2 --- /dev/null +++ b/util/chunk/row.go @@ -0,0 +1,138 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package chunk + +import ( + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" +) + +// Row represents a row of data, can be used to access values. +type Row struct { + c *Chunk + idx int +} + +// Chunk returns the Chunk which the row belongs to. +func (r Row) Chunk() *Chunk { + return r.c +} + +// IsEmpty returns true if the Row is empty. +func (r Row) IsEmpty() bool { + return r == Row{} +} + +// Idx returns the row index of Chunk. +func (r Row) Idx() int { + return r.idx +} + +// Len returns the number of values in the row. +func (r Row) Len() int { + return r.c.NumCols() +} + +// GetInt64 returns the int64 value with the colIdx. +func (r Row) GetInt64(colIdx int) int64 { + return r.c.columns[colIdx].GetInt64(r.idx) +} + +// GetUint64 returns the uint64 value with the colIdx. +func (r Row) GetUint64(colIdx int) uint64 { + return r.c.columns[colIdx].GetUint64(r.idx) +} + +// GetFloat32 returns the float32 value with the colIdx. +func (r Row) GetFloat32(colIdx int) float32 { + return r.c.columns[colIdx].GetFloat32(r.idx) +} + +// GetFloat64 returns the float64 value with the colIdx. +func (r Row) GetFloat64(colIdx int) float64 { + return r.c.columns[colIdx].GetFloat64(r.idx) +} + +// GetString returns the string value with the colIdx. +func (r Row) GetString(colIdx int) string { + return r.c.columns[colIdx].GetString(r.idx) +} + +// GetBytes returns the bytes value with the colIdx. +func (r Row) GetBytes(colIdx int) []byte { + return r.c.columns[colIdx].GetBytes(r.idx) +} + +// GetDatumRow converts chunk.Row to types.DatumRow. +// Keep in mind that GetDatumRow has a reference to r.c, which is a chunk, +// this function works only if the underlying chunk is valid or unchanged. +func (r Row) GetDatumRow(fields []*types.FieldType) []types.Datum { + datumRow := make([]types.Datum, 0, r.c.NumCols()) + for colIdx := 0; colIdx < r.c.NumCols(); colIdx++ { + datum := r.GetDatum(colIdx, fields[colIdx]) + datumRow = append(datumRow, datum) + } + return datumRow +} + +// GetDatum implements the chunk.Row interface. +func (r Row) GetDatum(colIdx int, tp *types.FieldType) types.Datum { + var d types.Datum + switch tp.Tp { + case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong: + if !r.IsNull(colIdx) { + if mysql.HasUnsignedFlag(tp.Flag) { + d.SetUint64(r.GetUint64(colIdx)) + } else { + d.SetInt64(r.GetInt64(colIdx)) + } + } + case mysql.TypeYear: + // FIXBUG: because insert type of TypeYear is definite int64, so we regardless of the unsigned flag. + if !r.IsNull(colIdx) { + d.SetInt64(r.GetInt64(colIdx)) + } + case mysql.TypeFloat: + if !r.IsNull(colIdx) { + d.SetFloat32(r.GetFloat32(colIdx)) + } + case mysql.TypeDouble: + if !r.IsNull(colIdx) { + d.SetFloat64(r.GetFloat64(colIdx)) + } + case mysql.TypeVarchar, mysql.TypeVarString, mysql.TypeString, + mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob: + if !r.IsNull(colIdx) { + d.SetBytes(r.GetBytes(colIdx)) + } + } + return d +} + +// GetRaw returns the underlying raw bytes with the colIdx. +func (r Row) GetRaw(colIdx int) []byte { + return r.c.columns[colIdx].GetRaw(r.idx) +} + +// IsNull returns if the datum in the chunk.Row is null. +func (r Row) IsNull(colIdx int) bool { + return r.c.columns[colIdx].IsNull(r.idx) +} + +// CopyConstruct creates a new row and copies this row's data into it. +func (r Row) CopyConstruct() Row { + newChk := renewWithCapacity(r.c, 1, 1) + newChk.AppendRow(r) + return newChk.GetRow(0) +} diff --git a/util/codec/bench_test.go b/util/codec/bench_test.go new file mode 100644 index 0000000..d473fe6 --- /dev/null +++ b/util/codec/bench_test.go @@ -0,0 +1,78 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package codec + +import ( + "testing" + + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" +) + +var valueCnt = 100 + +func composeEncodedData(size int) []byte { + values := make([]types.Datum, 0, size) + for i := 0; i < size; i++ { + values = append(values, types.NewDatum(i)) + } + bs, _ := EncodeValue(nil, nil, values...) + return bs +} + +func BenchmarkDecodeWithSize(b *testing.B) { + b.StopTimer() + bs := composeEncodedData(valueCnt) + b.StartTimer() + for i := 0; i < b.N; i++ { + Decode(bs, valueCnt) + } +} + +func BenchmarkDecodeWithOutSize(b *testing.B) { + b.StopTimer() + bs := composeEncodedData(valueCnt) + b.StartTimer() + for i := 0; i < b.N; i++ { + Decode(bs, 1) + } +} + +func BenchmarkEncodeIntWithSize(b *testing.B) { + for i := 0; i < b.N; i++ { + data := make([]byte, 0, 8) + EncodeInt(data, 10) + } +} + +func BenchmarkEncodeIntWithOutSize(b *testing.B) { + for i := 0; i < b.N; i++ { + EncodeInt(nil, 10) + } +} + +func BenchmarkDecodeOneToChunk(b *testing.B) { + str := new(types.Datum) + *str = types.NewStringDatum("a") + var raw []byte + raw = append(raw, bytesFlag) + raw = EncodeBytes(raw, str.GetBytes()) + intType := types.NewFieldType(mysql.TypeLonglong) + b.ResetTimer() + decoder := NewDecoder(chunk.New([]*types.FieldType{intType}, 32, 32), nil) + for i := 0; i < b.N; i++ { + decoder.DecodeOne(raw, 0, intType) + } +} diff --git a/util/codec/bytes.go b/util/codec/bytes.go new file mode 100644 index 0000000..9ef8519 --- /dev/null +++ b/util/codec/bytes.go @@ -0,0 +1,207 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package codec + +import ( + "encoding/binary" + "runtime" + "unsafe" + + "github.com/pingcap/errors" +) + +const ( + encGroupSize = 8 + encMarker = byte(0xFF) + encPad = byte(0x0) +) + +var ( + pads = make([]byte, encGroupSize) +) + +// EncodeBytes guarantees the encoded value is in ascending order for comparison, +// encoding with the following rule: +// [group1][marker1]...[groupN][markerN] +// group is 8 bytes slice which is padding with 0. +// marker is `0xFF - padding 0 count` +// For example: +// [] -> [0, 0, 0, 0, 0, 0, 0, 0, 247] +// [1, 2, 3] -> [1, 2, 3, 0, 0, 0, 0, 0, 250] +// [1, 2, 3, 0] -> [1, 2, 3, 0, 0, 0, 0, 0, 251] +// [1, 2, 3, 4, 5, 6, 7, 8] -> [1, 2, 3, 4, 5, 6, 7, 8, 255, 0, 0, 0, 0, 0, 0, 0, 0, 247] +// Refer: https://github.com/facebook/mysql-5.6/wiki/MyRocks-record-format#memcomparable-format +func EncodeBytes(b []byte, data []byte) []byte { + // Allocate more space to avoid unnecessary slice growing. + // Assume that the byte slice size is about `(len(data) / encGroupSize + 1) * (encGroupSize + 1)` bytes, + // that is `(len(data) / 8 + 1) * 9` in our implement. + dLen := len(data) + reallocSize := (dLen/encGroupSize + 1) * (encGroupSize + 1) + result := reallocBytes(b, reallocSize) + for idx := 0; idx <= dLen; idx += encGroupSize { + remain := dLen - idx + padCount := 0 + if remain >= encGroupSize { + result = append(result, data[idx:idx+encGroupSize]...) + } else { + padCount = encGroupSize - remain + result = append(result, data[idx:]...) + result = append(result, pads[:padCount]...) + } + + marker := encMarker - byte(padCount) + result = append(result, marker) + } + + return result +} + +func decodeBytes(b []byte, buf []byte, reverse bool) ([]byte, []byte, error) { + if buf == nil { + buf = make([]byte, 0, len(b)) + } + buf = buf[:0] + for { + if len(b) < encGroupSize+1 { + return nil, nil, errors.New("insufficient bytes to decode value") + } + + groupBytes := b[:encGroupSize+1] + + group := groupBytes[:encGroupSize] + marker := groupBytes[encGroupSize] + + var padCount byte + if reverse { + padCount = marker + } else { + padCount = encMarker - marker + } + if padCount > encGroupSize { + return nil, nil, errors.Errorf("invalid marker byte, group bytes %q", groupBytes) + } + + realGroupSize := encGroupSize - padCount + buf = append(buf, group[:realGroupSize]...) + b = b[encGroupSize+1:] + + if padCount != 0 { + var padByte = encPad + if reverse { + padByte = encMarker + } + // Check validity of padding bytes. + for _, v := range group[realGroupSize:] { + if v != padByte { + return nil, nil, errors.Errorf("invalid padding byte, group bytes %q", groupBytes) + } + } + break + } + } + if reverse { + reverseBytes(buf) + } + return b, buf, nil +} + +// DecodeBytes decodes bytes which is encoded by EncodeBytes before, +// returns the leftover bytes and decoded value if no error. +// `buf` is used to buffer data to avoid the cost of makeslice in decodeBytes when DecodeBytes is called by Decoder.DecodeOne. +func DecodeBytes(b []byte, buf []byte) ([]byte, []byte, error) { + return decodeBytes(b, buf, false) +} + +// EncodeBytesDesc first encodes bytes using EncodeBytes, then bitwise reverses +// encoded value to guarantee the encoded value is in descending order for comparison. +func EncodeBytesDesc(b []byte, data []byte) []byte { + n := len(b) + b = EncodeBytes(b, data) + reverseBytes(b[n:]) + return b +} + +// DecodeBytesDesc decodes bytes which is encoded by EncodeBytesDesc before, +// returns the leftover bytes and decoded value if no error. +func DecodeBytesDesc(b []byte, buf []byte) ([]byte, []byte, error) { + return decodeBytes(b, buf, true) +} + +// EncodeCompactBytes joins bytes with its length into a byte slice. It is more +// efficient in both space and time compare to EncodeBytes. Note that the encoded +// result is not memcomparable. +func EncodeCompactBytes(b []byte, data []byte) []byte { + b = reallocBytes(b, binary.MaxVarintLen64+len(data)) + b = EncodeVarint(b, int64(len(data))) + return append(b, data...) +} + +// DecodeCompactBytes decodes bytes which is encoded by EncodeCompactBytes before. +func DecodeCompactBytes(b []byte) ([]byte, []byte, error) { + b, n, err := DecodeVarint(b) + if err != nil { + return nil, nil, errors.Trace(err) + } + if int64(len(b)) < n { + return nil, nil, errors.Errorf("insufficient bytes to decode value, expected length: %v", n) + } + return b[n:], b[:n], nil +} + +// See https://golang.org/src/crypto/cipher/xor.go +const wordSize = int(unsafe.Sizeof(uintptr(0))) +const supportsUnaligned = runtime.GOARCH == "386" || runtime.GOARCH == "amd64" + +func fastReverseBytes(b []byte) { + n := len(b) + w := n / wordSize + if w > 0 { + bw := *(*[]uintptr)(unsafe.Pointer(&b)) + for i := 0; i < w; i++ { + bw[i] = ^bw[i] + } + } + + for i := w * wordSize; i < n; i++ { + b[i] = ^b[i] + } +} + +func safeReverseBytes(b []byte) { + for i := range b { + b[i] = ^b[i] + } +} + +func reverseBytes(b []byte) { + if supportsUnaligned { + fastReverseBytes(b) + return + } + + safeReverseBytes(b) +} + +// reallocBytes is like realloc. +func reallocBytes(b []byte, n int) []byte { + newSize := len(b) + n + if cap(b) < newSize { + bs := make([]byte, len(b), newSize) + copy(bs, b) + return bs + } + + // slice b has capability to store n bytes + return b +} diff --git a/util/codec/bytes_test.go b/util/codec/bytes_test.go new file mode 100644 index 0000000..00b482e --- /dev/null +++ b/util/codec/bytes_test.go @@ -0,0 +1,84 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package codec + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/util/testleak" +) + +var _ = Suite(&testBytesSuite{}) + +type testBytesSuite struct { +} + +func (s *testBytesSuite) TestBytesCodec(c *C) { + defer testleak.AfterTest(c)() + inputs := []struct { + enc []byte + dec []byte + desc bool + }{ + {[]byte{}, []byte{0, 0, 0, 0, 0, 0, 0, 0, 247}, false}, + {[]byte{}, []byte{255, 255, 255, 255, 255, 255, 255, 255, 8}, true}, + {[]byte{0}, []byte{0, 0, 0, 0, 0, 0, 0, 0, 248}, false}, + {[]byte{0}, []byte{255, 255, 255, 255, 255, 255, 255, 255, 7}, true}, + {[]byte{1, 2, 3}, []byte{1, 2, 3, 0, 0, 0, 0, 0, 250}, false}, + {[]byte{1, 2, 3}, []byte{254, 253, 252, 255, 255, 255, 255, 255, 5}, true}, + {[]byte{1, 2, 3, 0}, []byte{1, 2, 3, 0, 0, 0, 0, 0, 251}, false}, + {[]byte{1, 2, 3, 0}, []byte{254, 253, 252, 255, 255, 255, 255, 255, 4}, true}, + {[]byte{1, 2, 3, 4, 5, 6, 7}, []byte{1, 2, 3, 4, 5, 6, 7, 0, 254}, false}, + {[]byte{1, 2, 3, 4, 5, 6, 7}, []byte{254, 253, 252, 251, 250, 249, 248, 255, 1}, true}, + {[]byte{0, 0, 0, 0, 0, 0, 0, 0}, []byte{0, 0, 0, 0, 0, 0, 0, 0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 247}, false}, + {[]byte{0, 0, 0, 0, 0, 0, 0, 0}, []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 255, 255, 255, 255, 255, 255, 255, 255, 8}, true}, + {[]byte{1, 2, 3, 4, 5, 6, 7, 8}, []byte{1, 2, 3, 4, 5, 6, 7, 8, 255, 0, 0, 0, 0, 0, 0, 0, 0, 247}, false}, + {[]byte{1, 2, 3, 4, 5, 6, 7, 8}, []byte{254, 253, 252, 251, 250, 249, 248, 247, 0, 255, 255, 255, 255, 255, 255, 255, 255, 8}, true}, + {[]byte{1, 2, 3, 4, 5, 6, 7, 8, 9}, []byte{1, 2, 3, 4, 5, 6, 7, 8, 255, 9, 0, 0, 0, 0, 0, 0, 0, 248}, false}, + {[]byte{1, 2, 3, 4, 5, 6, 7, 8, 9}, []byte{254, 253, 252, 251, 250, 249, 248, 247, 0, 246, 255, 255, 255, 255, 255, 255, 255, 7}, true}, + } + + for _, input := range inputs { + if input.desc { + b := EncodeBytesDesc(nil, input.enc) + c.Assert(b, BytesEquals, input.dec) + _, d, err := DecodeBytesDesc(b, nil) + c.Assert(err, IsNil) + c.Assert(d, BytesEquals, input.enc) + } else { + b := EncodeBytes(nil, input.enc) + c.Assert(b, BytesEquals, input.dec) + _, d, err := DecodeBytes(b, nil) + c.Assert(err, IsNil) + c.Assert(d, BytesEquals, input.enc) + } + } + + // Test error decode. + errInputs := [][]byte{ + {1, 2, 3, 4}, + {0, 0, 0, 0, 0, 0, 0, 247}, + {0, 0, 0, 0, 0, 0, 0, 0, 246}, + {0, 0, 0, 0, 0, 0, 0, 1, 247}, + {1, 2, 3, 4, 5, 6, 7, 8, 0}, + {1, 2, 3, 4, 5, 6, 7, 8, 255, 1}, + {1, 2, 3, 4, 5, 6, 7, 8, 255, 1, 2, 3, 4, 5, 6, 7, 8}, + {1, 2, 3, 4, 5, 6, 7, 8, 255, 1, 2, 3, 4, 5, 6, 7, 8, 255}, + {1, 2, 3, 4, 5, 6, 7, 8, 255, 1, 2, 3, 4, 5, 6, 7, 8, 0}, + } + + for _, input := range errInputs { + _, _, err := DecodeBytes(input, nil) + c.Assert(err, NotNil) + } +} diff --git a/util/codec/codec.go b/util/codec/codec.go new file mode 100644 index 0000000..487124f --- /dev/null +++ b/util/codec/codec.go @@ -0,0 +1,746 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package codec + +import ( + "bytes" + "encoding/binary" + "fmt" + "hash" + "io" + "time" + "unsafe" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" +) + +// First byte in the encoded value which specifies the encoding type. +const ( + NilFlag byte = 0 + bytesFlag byte = 1 + compactBytesFlag byte = 2 + intFlag byte = 3 + uintFlag byte = 4 + floatFlag byte = 5 + decimalFlag byte = 6 + durationFlag byte = 7 + varintFlag byte = 8 + uvarintFlag byte = 9 + jsonFlag byte = 10 + maxFlag byte = 250 +) + +const ( + sizeUint64 = unsafe.Sizeof(uint64(0)) + sizeFloat64 = unsafe.Sizeof(float64(0)) +) + +func preRealloc(b []byte, vals []types.Datum, comparable bool) []byte { + var size int + for i := range vals { + switch vals[i].Kind() { + case types.KindInt64, types.KindUint64: + size += sizeInt(comparable) + case types.KindString, types.KindBytes: + size += sizeBytes(vals[i].GetBytes(), comparable) + case types.KindFloat32, types.KindFloat64: + size += 9 + case types.KindNull, types.KindMinNotNull, types.KindMaxValue: + size += 1 + default: + return b + } + } + return reallocBytes(b, size) +} + +// encode will encode a datum and append it to a byte slice. If comparable is true, the encoded bytes can be sorted as it's original order. +// If hash is true, the encoded bytes can be checked equal as it's original value. +func encode(sc *stmtctx.StatementContext, b []byte, vals []types.Datum, comparable bool) (_ []byte, err error) { + b = preRealloc(b, vals, comparable) + for i, length := 0, len(vals); i < length; i++ { + switch vals[i].Kind() { + case types.KindInt64: + b = encodeSignedInt(b, vals[i].GetInt64(), comparable) + case types.KindUint64: + b = encodeUnsignedInt(b, vals[i].GetUint64(), comparable) + case types.KindFloat32, types.KindFloat64: + b = append(b, floatFlag) + b = EncodeFloat(b, vals[i].GetFloat64()) + case types.KindString, types.KindBytes: + b = encodeBytes(b, vals[i].GetBytes(), comparable) + case types.KindNull: + b = append(b, NilFlag) + case types.KindMinNotNull: + b = append(b, bytesFlag) + case types.KindMaxValue: + b = append(b, maxFlag) + default: + return b, errors.Errorf("unsupport encode type %d", vals[i].Kind()) + } + } + + return b, errors.Trace(err) +} + +// EstimateValueSize uses to estimate the value size of the encoded values. +func EstimateValueSize(sc *stmtctx.StatementContext, val types.Datum) (int, error) { + l := 0 + switch val.Kind() { + case types.KindInt64: + l = valueSizeOfSignedInt(val.GetInt64()) + case types.KindUint64: + l = valueSizeOfUnsignedInt(val.GetUint64()) + case types.KindFloat32, types.KindFloat64: + l = 9 + case types.KindString, types.KindBytes: + l = valueSizeOfBytes(val.GetBytes()) + case types.KindNull, types.KindMinNotNull, types.KindMaxValue: + l = 1 + default: + return l, errors.Errorf("unsupported encode type %d", val.Kind()) + } + return l, nil +} + +func encodeBytes(b []byte, v []byte, comparable bool) []byte { + if comparable { + b = append(b, bytesFlag) + b = EncodeBytes(b, v) + } else { + b = append(b, compactBytesFlag) + b = EncodeCompactBytes(b, v) + } + return b +} + +func valueSizeOfBytes(v []byte) int { + return valueSizeOfSignedInt(int64(len(v))) + len(v) +} + +func sizeBytes(v []byte, comparable bool) int { + if comparable { + reallocSize := (len(v)/encGroupSize + 1) * (encGroupSize + 1) + return 1 + reallocSize + } + reallocSize := binary.MaxVarintLen64 + len(v) + return 1 + reallocSize +} + +func encodeSignedInt(b []byte, v int64, comparable bool) []byte { + if comparable { + b = append(b, intFlag) + b = EncodeInt(b, v) + } else { + b = append(b, varintFlag) + b = EncodeVarint(b, v) + } + return b +} + +func valueSizeOfSignedInt(v int64) int { + if v < 0 { + v = 0 - v - 1 + } + // Flag occupy 1 bit and at lease 1 bit. + size := 2 + v = v >> 6 + for v > 0 { + size++ + v = v >> 7 + } + return size +} + +func encodeUnsignedInt(b []byte, v uint64, comparable bool) []byte { + if comparable { + b = append(b, uintFlag) + b = EncodeUint(b, v) + } else { + b = append(b, uvarintFlag) + b = EncodeUvarint(b, v) + } + return b +} + +func valueSizeOfUnsignedInt(v uint64) int { + // Flag occupy 1 bit and at lease 1 bit. + size := 2 + v = v >> 7 + for v > 0 { + size++ + v = v >> 7 + } + return size +} + +func sizeInt(comparable bool) int { + if comparable { + return 9 + } + return 1 + binary.MaxVarintLen64 +} + +// EncodeKey appends the encoded values to byte slice b, returns the appended +// slice. It guarantees the encoded value is in ascending order for comparison. +// For Decimal type, datum must set datum's length and frac. +func EncodeKey(sc *stmtctx.StatementContext, b []byte, v ...types.Datum) ([]byte, error) { + return encode(sc, b, v, true) +} + +// EncodeValue appends the encoded values to byte slice b, returning the appended +// slice. It does not guarantee the order for comparison. +func EncodeValue(sc *stmtctx.StatementContext, b []byte, v ...types.Datum) ([]byte, error) { + return encode(sc, b, v, false) +} + +func encodeHashChunkRowIdx(sc *stmtctx.StatementContext, row chunk.Row, tp *types.FieldType, idx int) (flag byte, b []byte, err error) { + if row.IsNull(idx) { + flag = NilFlag + return + } + switch tp.Tp { + case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong, mysql.TypeYear: + flag = varintFlag + if mysql.HasUnsignedFlag(tp.Flag) { + if integer := row.GetInt64(idx); integer < 0 { + flag = uvarintFlag + } + } + b = row.GetRaw(idx) + case mysql.TypeFloat: + flag = floatFlag + f := float64(row.GetFloat32(idx)) + b = (*[unsafe.Sizeof(f)]byte)(unsafe.Pointer(&f))[:] + case mysql.TypeDouble: + flag = floatFlag + b = row.GetRaw(idx) + case mysql.TypeVarchar, mysql.TypeVarString, mysql.TypeString, mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob: + flag = compactBytesFlag + b = row.GetBytes(idx) + default: + return 0, nil, errors.Errorf("unsupport column type for encode %d", tp.Tp) + } + return +} + +// HashChunkColumns writes the encoded value of each row's column, which of index `colIdx`, to h. +func HashChunkColumns(sc *stmtctx.StatementContext, h []hash.Hash64, chk *chunk.Chunk, tp *types.FieldType, colIdx int, buf []byte, isNull []bool) (err error) { + return HashChunkSelected(sc, h, chk, tp, colIdx, buf, isNull, nil) +} + +// HashChunkSelected writes the encoded value of selected row's column, which of index `colIdx`, to h. +// sel indicates which rows are selected. If it is nil, all rows are selected. +func HashChunkSelected(sc *stmtctx.StatementContext, h []hash.Hash64, chk *chunk.Chunk, tp *types.FieldType, colIdx int, buf []byte, + isNull, sel []bool) (err error) { + var b []byte + column := chk.Column(colIdx) + rows := chk.NumRows() + switch tp.Tp { + case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong, mysql.TypeYear: + i64s := column.Int64s() + for i, v := range i64s { + if sel != nil && !sel[i] { + continue + } + if column.IsNull(i) { + buf[0], b = NilFlag, nil + isNull[i] = true + } else { + buf[0] = varintFlag + if mysql.HasUnsignedFlag(tp.Flag) && v < 0 { + buf[0] = uvarintFlag + } + b = column.GetRaw(i) + } + + // As the golang doc described, `Hash.Write` never returns an error. + // See https://golang.org/pkg/hash/#Hash + _, _ = h[i].Write(buf) + _, _ = h[i].Write(b) + } + case mysql.TypeFloat: + f32s := column.Float32s() + for i, f := range f32s { + if sel != nil && !sel[i] { + continue + } + if column.IsNull(i) { + buf[0], b = NilFlag, nil + isNull[i] = true + } else { + buf[0] = floatFlag + d := float64(f) + b = (*[sizeFloat64]byte)(unsafe.Pointer(&d))[:] + } + + // As the golang doc described, `Hash.Write` never returns an error. + // See https://golang.org/pkg/hash/#Hash + _, _ = h[i].Write(buf) + _, _ = h[i].Write(b) + } + case mysql.TypeDouble: + f64s := column.Float64s() + for i, f := range f64s { + if sel != nil && !sel[i] { + continue + } + if column.IsNull(i) { + buf[0], b = NilFlag, nil + isNull[i] = true + } else { + buf[0] = floatFlag + b = (*[sizeFloat64]byte)(unsafe.Pointer(&f))[:] + } + + // As the golang doc described, `Hash.Write` never returns an error. + // See https://golang.org/pkg/hash/#Hash + _, _ = h[i].Write(buf) + _, _ = h[i].Write(b) + } + case mysql.TypeVarchar, mysql.TypeVarString, mysql.TypeString, mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob: + for i := 0; i < rows; i++ { + if sel != nil && !sel[i] { + continue + } + if column.IsNull(i) { + buf[0], b = NilFlag, nil + isNull[i] = true + } else { + buf[0] = compactBytesFlag + b = column.GetBytes(i) + } + + // As the golang doc described, `Hash.Write` never returns an error. + // See https://golang.org/pkg/hash/#Hash + _, _ = h[i].Write(buf) + _, _ = h[i].Write(b) + } + default: + return errors.Errorf("unsupport column type for encode %d", tp.Tp) + } + return +} + +// HashChunkRow writes the encoded values to w. +// If two rows are logically equal, it will generate the same bytes. +func HashChunkRow(sc *stmtctx.StatementContext, w io.Writer, row chunk.Row, allTypes []*types.FieldType, colIdx []int, buf []byte) (err error) { + var b []byte + for _, idx := range colIdx { + buf[0], b, err = encodeHashChunkRowIdx(sc, row, allTypes[idx], idx) + if err != nil { + return errors.Trace(err) + } + _, err = w.Write(buf) + if err != nil { + return + } + _, err = w.Write(b) + if err != nil { + return + } + } + return err +} + +// EqualChunkRow returns a boolean reporting whether row1 and row2 +// with their types and column index are logically equal. +func EqualChunkRow(sc *stmtctx.StatementContext, + row1 chunk.Row, allTypes1 []*types.FieldType, colIdx1 []int, + row2 chunk.Row, allTypes2 []*types.FieldType, colIdx2 []int, +) (bool, error) { + for i := range colIdx1 { + idx1, idx2 := colIdx1[i], colIdx2[i] + flag1, b1, err := encodeHashChunkRowIdx(sc, row1, allTypes1[idx1], idx1) + if err != nil { + return false, errors.Trace(err) + } + flag2, b2, err := encodeHashChunkRowIdx(sc, row2, allTypes2[idx2], idx2) + if err != nil { + return false, errors.Trace(err) + } + if !(flag1 == flag2 && bytes.Equal(b1, b2)) { + return false, nil + } + } + return true, nil +} + +// Decode decodes values from a byte slice generated with EncodeKey or EncodeValue +// before. +// size is the size of decoded datum slice. +func Decode(b []byte, size int) ([]types.Datum, error) { + if len(b) < 1 { + return nil, errors.New("invalid encoded key") + } + + var ( + err error + values = make([]types.Datum, 0, size) + ) + + for len(b) > 0 { + var d types.Datum + b, d, err = DecodeOne(b) + if err != nil { + return nil, errors.Trace(err) + } + + values = append(values, d) + } + + return values, nil +} + +// DecodeRange decodes the range values from a byte slice that generated by EncodeKey. +// It handles some special values like `MinNotNull` and `MaxValueDatum`. +func DecodeRange(b []byte, size int) ([]types.Datum, []byte, error) { + if len(b) < 1 { + return nil, b, errors.New("invalid encoded key: length of key is zero") + } + + var ( + err error + values = make([]types.Datum, 0, size) + ) + + for len(b) > 1 { + var d types.Datum + b, d, err = DecodeOne(b) + if err != nil { + return values, b, errors.Trace(err) + } + values = append(values, d) + } + + if len(b) == 1 { + switch b[0] { + case NilFlag: + values = append(values, types.Datum{}) + case bytesFlag: + values = append(values, types.MinNotNullDatum()) + // `maxFlag + 1` for PrefixNext + case maxFlag, maxFlag + 1: + values = append(values, types.MaxValueDatum()) + default: + return values, b, errors.Errorf("invalid encoded key flag %v", b[0]) + } + } + return values, nil, nil +} + +// DecodeOne decodes on datum from a byte slice generated with EncodeKey or EncodeValue. +func DecodeOne(b []byte) (remain []byte, d types.Datum, err error) { + if len(b) < 1 { + return nil, d, errors.New("invalid encoded key") + } + flag := b[0] + b = b[1:] + switch flag { + case intFlag: + var v int64 + b, v, err = DecodeInt(b) + d.SetInt64(v) + case uintFlag: + var v uint64 + b, v, err = DecodeUint(b) + d.SetUint64(v) + case varintFlag: + var v int64 + b, v, err = DecodeVarint(b) + d.SetInt64(v) + case uvarintFlag: + var v uint64 + b, v, err = DecodeUvarint(b) + d.SetUint64(v) + case floatFlag: + var v float64 + b, v, err = DecodeFloat(b) + d.SetFloat64(v) + case bytesFlag: + var v []byte + b, v, err = DecodeBytes(b, nil) + d.SetBytes(v) + case compactBytesFlag: + var v []byte + b, v, err = DecodeCompactBytes(b) + d.SetBytes(v) + case NilFlag: + default: + return b, d, errors.Errorf("invalid encoded key flag %v", flag) + } + if err != nil { + return b, d, errors.Trace(err) + } + return b, d, nil +} + +// CutOne cuts the first encoded value from b. +// It will return the first encoded item and the remains as byte slice. +func CutOne(b []byte) (data []byte, remain []byte, err error) { + l, err := peek(b) + if err != nil { + return nil, nil, errors.Trace(err) + } + return b[:l], b[l:], nil +} + +// CutColumnID cuts the column ID from b. +// It will return the remains as byte slice and column ID +func CutColumnID(b []byte) (remain []byte, n int64, err error) { + if len(b) < 1 { + return nil, 0, errors.New("invalid encoded key") + } + // skip the flag + b = b[1:] + return DecodeVarint(b) +} + +// SetRawValues set raw datum values from a row data. +func SetRawValues(data []byte, values []types.Datum) error { + for i := 0; i < len(values); i++ { + l, err := peek(data) + if err != nil { + return errors.Trace(err) + } + values[i].SetRaw(data[:l:l]) + data = data[l:] + } + return nil +} + +// peek peeks the first encoded value from b and returns its length. +func peek(b []byte) (length int, err error) { + if len(b) < 1 { + return 0, errors.New("invalid encoded key") + } + flag := b[0] + length++ + b = b[1:] + var l int + switch flag { + case NilFlag: + case intFlag, uintFlag, floatFlag, durationFlag: + // Those types are stored in 8 bytes. + l = 8 + case bytesFlag: + l, err = peekBytes(b) + case compactBytesFlag: + l, err = peekCompactBytes(b) + case varintFlag: + l, err = peekVarint(b) + case uvarintFlag: + l, err = peekUvarint(b) + default: + return 0, errors.Errorf("invalid encoded key flag %v", flag) + } + if err != nil { + return 0, errors.Trace(err) + } + length += l + return +} + +func peekBytes(b []byte) (int, error) { + offset := 0 + for { + if len(b) < offset+encGroupSize+1 { + return 0, errors.New("insufficient bytes to decode value") + } + // The byte slice is encoded into many groups. + // For each group, there are 8 bytes for data and 1 byte for marker. + marker := b[offset+encGroupSize] + padCount := encMarker - marker + offset += encGroupSize + 1 + // When padCount is not zero, it means we get the end of the byte slice. + if padCount != 0 { + break + } + } + return offset, nil +} + +func peekCompactBytes(b []byte) (int, error) { + // Get length. + v, n := binary.Varint(b) + vi := int(v) + if n < 0 { + return 0, errors.New("value larger than 64 bits") + } else if n == 0 { + return 0, errors.New("insufficient bytes to decode value") + } + if len(b) < vi+n { + return 0, errors.Errorf("insufficient bytes to decode value, expected length: %v", n) + } + return n + vi, nil +} + +func peekVarint(b []byte) (int, error) { + _, n := binary.Varint(b) + if n < 0 { + return 0, errors.New("value larger than 64 bits") + } + return n, nil +} + +func peekUvarint(b []byte) (int, error) { + _, n := binary.Uvarint(b) + if n < 0 { + return 0, errors.New("value larger than 64 bits") + } + return n, nil +} + +// Decoder is used to decode value to chunk. +type Decoder struct { + chk *chunk.Chunk + timezone *time.Location + + // buf is only used for DecodeBytes to avoid the cost of makeslice. + buf []byte +} + +// NewDecoder creates a Decoder. +func NewDecoder(chk *chunk.Chunk, timezone *time.Location) *Decoder { + return &Decoder{ + chk: chk, + timezone: timezone, + } +} + +// DecodeOne decodes one value to chunk and returns the remained bytes. +func (decoder *Decoder) DecodeOne(b []byte, colIdx int, ft *types.FieldType) (remain []byte, err error) { + if len(b) < 1 { + return nil, errors.New("invalid encoded key") + } + chk := decoder.chk + flag := b[0] + b = b[1:] + switch flag { + case intFlag: + var v int64 + b, v, err = DecodeInt(b) + if err != nil { + return nil, errors.Trace(err) + } + appendIntToChunk(v, chk, colIdx, ft) + case uintFlag: + var v uint64 + b, v, err = DecodeUint(b) + if err != nil { + return nil, errors.Trace(err) + } + err = appendUintToChunk(v, chk, colIdx, ft, decoder.timezone) + case varintFlag: + var v int64 + b, v, err = DecodeVarint(b) + if err != nil { + return nil, errors.Trace(err) + } + appendIntToChunk(v, chk, colIdx, ft) + case uvarintFlag: + var v uint64 + b, v, err = DecodeUvarint(b) + if err != nil { + return nil, errors.Trace(err) + } + err = appendUintToChunk(v, chk, colIdx, ft, decoder.timezone) + case floatFlag: + var v float64 + b, v, err = DecodeFloat(b) + if err != nil { + return nil, errors.Trace(err) + } + appendFloatToChunk(v, chk, colIdx, ft) + case bytesFlag: + b, decoder.buf, err = DecodeBytes(b, decoder.buf) + if err != nil { + return nil, errors.Trace(err) + } + chk.AppendBytes(colIdx, decoder.buf) + case compactBytesFlag: + var v []byte + b, v, err = DecodeCompactBytes(b) + if err != nil { + return nil, errors.Trace(err) + } + chk.AppendBytes(colIdx, v) + case NilFlag: + chk.AppendNull(colIdx) + default: + return nil, errors.Errorf("invalid encoded key flag %v", flag) + } + if err != nil { + return nil, errors.Trace(err) + } + return b, nil +} + +func appendIntToChunk(val int64, chk *chunk.Chunk, colIdx int, ft *types.FieldType) { + chk.AppendInt64(colIdx, val) +} + +func appendUintToChunk(val uint64, chk *chunk.Chunk, colIdx int, ft *types.FieldType, loc *time.Location) error { + chk.AppendUint64(colIdx, val) + return nil +} + +func appendFloatToChunk(val float64, chk *chunk.Chunk, colIdx int, ft *types.FieldType) { + if ft.Tp == mysql.TypeFloat { + chk.AppendFloat32(colIdx, float32(val)) + } else { + chk.AppendFloat64(colIdx, val) + } +} + +// HashGroupKey encodes each row of this column and append encoded data into buf. +// Only use in the aggregate executor. +func HashGroupKey(sc *stmtctx.StatementContext, n int, col *chunk.Column, buf [][]byte, ft *types.FieldType) ([][]byte, error) { + switch ft.EvalType() { + case types.ETInt: + i64s := col.Int64s() + for i := 0; i < n; i++ { + if col.IsNull(i) { + buf[i] = append(buf[i], NilFlag) + } else { + buf[i] = encodeSignedInt(buf[i], i64s[i], false) + } + } + case types.ETReal: + f64s := col.Float64s() + for i := 0; i < n; i++ { + if col.IsNull(i) { + buf[i] = append(buf[i], NilFlag) + } else { + buf[i] = append(buf[i], floatFlag) + buf[i] = EncodeFloat(buf[i], f64s[i]) + } + } + case types.ETString: + for i := 0; i < n; i++ { + if col.IsNull(i) { + buf[i] = append(buf[i], NilFlag) + } else { + buf[i] = encodeBytes(buf[i], col.GetBytes(i), false) + } + } + default: + return nil, errors.New(fmt.Sprintf("invalid eval type %v", ft.EvalType())) + } + return buf, nil +} diff --git a/util/codec/codec_test.go b/util/codec/codec_test.go new file mode 100644 index 0000000..6581811 --- /dev/null +++ b/util/codec/codec_test.go @@ -0,0 +1,866 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package codec + +import ( + "bytes" + "hash" + "hash/crc32" + "hash/fnv" + "math" + "testing" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/testleak" +) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + TestingT(t) +} + +var _ = Suite(&testCodecSuite{}) + +type testCodecSuite struct { +} + +func (s *testCodecSuite) TestCodecKey(c *C) { + defer testleak.AfterTest(c)() + table := []struct { + Input []types.Datum + Expect []types.Datum + }{ + { + types.MakeDatums(int64(1)), + types.MakeDatums(int64(1)), + }, + + { + types.MakeDatums(float32(1), float64(3.15), []byte("123"), "123"), + types.MakeDatums(float64(1), float64(3.15), []byte("123"), []byte("123")), + }, + { + types.MakeDatums(uint64(1), float64(3.15), []byte("123"), int64(-1)), + types.MakeDatums(uint64(1), float64(3.15), []byte("123"), int64(-1)), + }, + + { + types.MakeDatums(true, false), + types.MakeDatums(int64(1), int64(0)), + }, + + { + types.MakeDatums(nil), + types.MakeDatums(nil), + }, + } + sc := &stmtctx.StatementContext{TimeZone: time.Local} + for i, t := range table { + comment := Commentf("%d %v", i, t) + b, err := EncodeKey(sc, nil, t.Input...) + c.Assert(err, IsNil, comment) + args, err := Decode(b, 1) + c.Assert(err, IsNil) + c.Assert(args, DeepEquals, t.Expect) + + b, err = EncodeValue(sc, nil, t.Input...) + c.Assert(err, IsNil) + size, err := estimateValuesSize(sc, t.Input) + c.Assert(err, IsNil) + c.Assert(len(b), Equals, size) + args, err = Decode(b, 1) + c.Assert(err, IsNil) + c.Assert(args, DeepEquals, t.Expect) + } +} + +func estimateValuesSize(sc *stmtctx.StatementContext, vals []types.Datum) (int, error) { + size := 0 + for _, val := range vals { + length, err := EstimateValueSize(sc, val) + if err != nil { + return 0, err + } + size += length + } + return size, nil +} + +func (s *testCodecSuite) TestCodecKeyCompare(c *C) { + defer testleak.AfterTest(c)() + table := []struct { + Left []types.Datum + Right []types.Datum + Expect int + }{ + { + types.MakeDatums(1), + types.MakeDatums(1), + 0, + }, + { + types.MakeDatums(-1), + types.MakeDatums(1), + -1, + }, + { + types.MakeDatums(3.15), + types.MakeDatums(3.12), + 1, + }, + { + types.MakeDatums("abc"), + types.MakeDatums("abcd"), + -1, + }, + { + types.MakeDatums("abcdefgh"), + types.MakeDatums("abcdefghi"), + -1, + }, + { + types.MakeDatums(1, "abc"), + types.MakeDatums(1, "abcd"), + -1, + }, + { + types.MakeDatums(1, "abc", "def"), + types.MakeDatums(1, "abcd", "af"), + -1, + }, + { + types.MakeDatums(3.12, "ebc", "def"), + types.MakeDatums(2.12, "abcd", "af"), + 1, + }, + { + types.MakeDatums([]byte{0x01, 0x00}, []byte{0xFF}), + types.MakeDatums([]byte{0x01, 0x00, 0xFF}), + -1, + }, + { + types.MakeDatums([]byte{0x01}, uint64(0xFFFFFFFFFFFFFFF)), + types.MakeDatums([]byte{0x01, 0x10}, 0), + -1, + }, + { + types.MakeDatums(0), + types.MakeDatums(nil), + 1, + }, + { + types.MakeDatums([]byte{0x00}), + types.MakeDatums(nil), + 1, + }, + { + types.MakeDatums(math.SmallestNonzeroFloat64), + types.MakeDatums(nil), + 1, + }, + { + types.MakeDatums(int64(math.MinInt64)), + types.MakeDatums(nil), + 1, + }, + { + types.MakeDatums(1, int64(math.MinInt64), nil), + types.MakeDatums(1, nil, uint64(math.MaxUint64)), + 1, + }, + { + types.MakeDatums(1, []byte{}, nil), + types.MakeDatums(1, nil, 123), + 1, + }, + { + []types.Datum{types.MinNotNullDatum()}, + []types.Datum{types.MaxValueDatum()}, + -1, + }, + } + sc := &stmtctx.StatementContext{TimeZone: time.Local} + for _, t := range table { + b1, err := EncodeKey(sc, nil, t.Left...) + c.Assert(err, IsNil) + + b2, err := EncodeKey(sc, nil, t.Right...) + c.Assert(err, IsNil) + + c.Assert(bytes.Compare(b1, b2), Equals, t.Expect, Commentf("%v - %v - %v - %v - %v", t.Left, t.Right, b1, b2, t.Expect)) + } +} + +func (s *testCodecSuite) TestNumberCodec(c *C) { + defer testleak.AfterTest(c)() + tblInt64 := []int64{ + math.MinInt64, + math.MinInt32, + math.MinInt16, + math.MinInt8, + 0, + math.MaxInt8, + math.MaxInt16, + math.MaxInt32, + math.MaxInt64, + 1<<47 - 1, + -1 << 47, + 1<<23 - 1, + -1 << 23, + 1<<55 - 1, + -1 << 55, + 1, + -1, + } + + for _, t := range tblInt64 { + b := EncodeInt(nil, t) + _, v, err := DecodeInt(b) + c.Assert(err, IsNil) + c.Assert(v, Equals, t) + + b = EncodeIntDesc(nil, t) + _, v, err = DecodeIntDesc(b) + c.Assert(err, IsNil) + c.Assert(v, Equals, t) + + b = EncodeVarint(nil, t) + _, v, err = DecodeVarint(b) + c.Assert(err, IsNil) + c.Assert(v, Equals, t) + + b = EncodeComparableVarint(nil, t) + _, v, err = DecodeComparableVarint(b) + c.Assert(err, IsNil) + c.Assert(v, Equals, t) + } + + tblUint64 := []uint64{ + 0, + math.MaxUint8, + math.MaxUint16, + math.MaxUint32, + math.MaxUint64, + 1<<24 - 1, + 1<<48 - 1, + 1<<56 - 1, + 1, + math.MaxInt16, + math.MaxInt8, + math.MaxInt32, + math.MaxInt64, + } + + for _, t := range tblUint64 { + b := EncodeUint(nil, t) + _, v, err := DecodeUint(b) + c.Assert(err, IsNil) + c.Assert(v, Equals, t) + + b = EncodeUintDesc(nil, t) + _, v, err = DecodeUintDesc(b) + c.Assert(err, IsNil) + c.Assert(v, Equals, t) + + b = EncodeUvarint(nil, t) + _, v, err = DecodeUvarint(b) + c.Assert(err, IsNil) + c.Assert(v, Equals, t) + + b = EncodeComparableUvarint(nil, t) + _, v, err = DecodeComparableUvarint(b) + c.Assert(err, IsNil) + c.Assert(v, Equals, t) + } + var b []byte + b = EncodeComparableVarint(b, -1) + b = EncodeComparableUvarint(b, 1) + b = EncodeComparableVarint(b, 2) + b, i, err := DecodeComparableVarint(b) + c.Assert(err, IsNil) + c.Assert(i, Equals, int64(-1)) + b, u, err := DecodeComparableUvarint(b) + c.Assert(err, IsNil) + c.Assert(u, Equals, uint64(1)) + _, i, err = DecodeComparableVarint(b) + c.Assert(err, IsNil) + c.Assert(i, Equals, int64(2)) +} + +func (s *testCodecSuite) TestNumberOrder(c *C) { + defer testleak.AfterTest(c)() + tblInt64 := []struct { + Arg1 int64 + Arg2 int64 + Ret int + }{ + {-1, 1, -1}, + {math.MaxInt64, math.MinInt64, 1}, + {math.MaxInt64, math.MaxInt32, 1}, + {math.MinInt32, math.MaxInt16, -1}, + {math.MinInt64, math.MaxInt8, -1}, + {0, math.MaxInt8, -1}, + {math.MinInt8, 0, -1}, + {math.MinInt16, math.MaxInt16, -1}, + {1, -1, 1}, + {1, 0, 1}, + {-1, 0, -1}, + {0, 0, 0}, + {math.MaxInt16, math.MaxInt16, 0}, + } + + for _, t := range tblInt64 { + b1 := EncodeInt(nil, t.Arg1) + b2 := EncodeInt(nil, t.Arg2) + + ret := bytes.Compare(b1, b2) + c.Assert(ret, Equals, t.Ret) + + b1 = EncodeIntDesc(nil, t.Arg1) + b2 = EncodeIntDesc(nil, t.Arg2) + + ret = bytes.Compare(b1, b2) + c.Assert(ret, Equals, -t.Ret) + + b1 = EncodeComparableVarint(nil, t.Arg1) + b2 = EncodeComparableVarint(nil, t.Arg2) + ret = bytes.Compare(b1, b2) + c.Assert(ret, Equals, t.Ret) + } + + tblUint64 := []struct { + Arg1 uint64 + Arg2 uint64 + Ret int + }{ + {0, 0, 0}, + {1, 0, 1}, + {0, 1, -1}, + {math.MaxInt8, math.MaxInt16, -1}, + {math.MaxUint32, math.MaxInt32, 1}, + {math.MaxUint8, math.MaxInt8, 1}, + {math.MaxUint16, math.MaxInt32, -1}, + {math.MaxUint64, math.MaxInt64, 1}, + {math.MaxInt64, math.MaxUint32, 1}, + {math.MaxUint64, 0, 1}, + {0, math.MaxUint64, -1}, + } + + for _, t := range tblUint64 { + b1 := EncodeUint(nil, t.Arg1) + b2 := EncodeUint(nil, t.Arg2) + + ret := bytes.Compare(b1, b2) + c.Assert(ret, Equals, t.Ret) + + b1 = EncodeUintDesc(nil, t.Arg1) + b2 = EncodeUintDesc(nil, t.Arg2) + + ret = bytes.Compare(b1, b2) + c.Assert(ret, Equals, -t.Ret) + + b1 = EncodeComparableUvarint(nil, t.Arg1) + b2 = EncodeComparableUvarint(nil, t.Arg2) + ret = bytes.Compare(b1, b2) + c.Assert(ret, Equals, t.Ret) + } +} + +func (s *testCodecSuite) TestFloatCodec(c *C) { + defer testleak.AfterTest(c)() + tblFloat := []float64{ + -1, + 0, + 1, + math.MaxFloat64, + math.MaxFloat32, + math.SmallestNonzeroFloat32, + math.SmallestNonzeroFloat64, + math.Inf(-1), + math.Inf(1), + } + + for _, t := range tblFloat { + b := EncodeFloat(nil, t) + _, v, err := DecodeFloat(b) + c.Assert(err, IsNil) + c.Assert(v, Equals, t) + + b = EncodeFloatDesc(nil, t) + _, v, err = DecodeFloatDesc(b) + c.Assert(err, IsNil) + c.Assert(v, Equals, t) + } + + tblCmp := []struct { + Arg1 float64 + Arg2 float64 + Ret int + }{ + {1, -1, 1}, + {1, 0, 1}, + {0, -1, 1}, + {0, 0, 0}, + {math.MaxFloat64, 1, 1}, + {math.MaxFloat32, math.MaxFloat64, -1}, + {math.MaxFloat64, 0, 1}, + {math.MaxFloat64, math.SmallestNonzeroFloat64, 1}, + {math.Inf(-1), 0, -1}, + {math.Inf(1), 0, 1}, + {math.Inf(-1), math.Inf(1), -1}, + } + + for _, t := range tblCmp { + b1 := EncodeFloat(nil, t.Arg1) + b2 := EncodeFloat(nil, t.Arg2) + + ret := bytes.Compare(b1, b2) + c.Assert(ret, Equals, t.Ret) + + b1 = EncodeFloatDesc(nil, t.Arg1) + b2 = EncodeFloatDesc(nil, t.Arg2) + + ret = bytes.Compare(b1, b2) + c.Assert(ret, Equals, -t.Ret) + } +} + +func (s *testCodecSuite) TestBytes(c *C) { + defer testleak.AfterTest(c)() + tblBytes := [][]byte{ + {}, + {0x00, 0x01}, + {0xff, 0xff}, + {0x01, 0x00}, + []byte("abc"), + []byte("hello world"), + } + + for _, t := range tblBytes { + b := EncodeBytes(nil, t) + _, v, err := DecodeBytes(b, nil) + c.Assert(err, IsNil) + c.Assert(t, DeepEquals, v, Commentf("%v - %v - %v", t, b, v)) + + b = EncodeBytesDesc(nil, t) + _, v, err = DecodeBytesDesc(b, nil) + c.Assert(err, IsNil) + c.Assert(t, DeepEquals, v, Commentf("%v - %v - %v", t, b, v)) + + b = EncodeCompactBytes(nil, t) + _, v, err = DecodeCompactBytes(b) + c.Assert(err, IsNil) + c.Assert(t, DeepEquals, v, Commentf("%v - %v - %v", t, b, v)) + } + + tblCmp := []struct { + Arg1 []byte + Arg2 []byte + Ret int + }{ + {[]byte{}, []byte{0x00}, -1}, + {[]byte{0x00}, []byte{0x00}, 0}, + {[]byte{0xFF}, []byte{0x00}, 1}, + {[]byte{0xFF}, []byte{0xFF, 0x00}, -1}, + {[]byte("a"), []byte("b"), -1}, + {[]byte("a"), []byte{0x00}, 1}, + {[]byte{0x00}, []byte{0x01}, -1}, + {[]byte{0x00, 0x01}, []byte{0x00, 0x00}, 1}, + {[]byte{0x00, 0x00, 0x00}, []byte{0x00, 0x00}, 1}, + {[]byte{0x00, 0x00, 0x00}, []byte{0x00, 0x00}, 1}, + {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, -1}, + {[]byte{0x01, 0x02, 0x03, 0x00}, []byte{0x01, 0x02, 0x03}, 1}, + {[]byte{0x01, 0x03, 0x03, 0x04}, []byte{0x01, 0x03, 0x03, 0x05}, -1}, + {[]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07}, []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}, -1}, + {[]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09}, []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}, 1}, + {[]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00}, []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}, 1}, + } + + for _, t := range tblCmp { + b1 := EncodeBytes(nil, t.Arg1) + b2 := EncodeBytes(nil, t.Arg2) + + ret := bytes.Compare(b1, b2) + c.Assert(ret, Equals, t.Ret) + + b1 = EncodeBytesDesc(nil, t.Arg1) + b2 = EncodeBytesDesc(nil, t.Arg2) + + ret = bytes.Compare(b1, b2) + c.Assert(ret, Equals, -t.Ret) + } +} + +func (s *testCodecSuite) TestCut(c *C) { + defer testleak.AfterTest(c)() + table := []struct { + Input []types.Datum + Expect []types.Datum + }{ + { + types.MakeDatums(int64(1)), + types.MakeDatums(int64(1)), + }, + + { + types.MakeDatums(float32(1), float64(3.15), []byte("123"), "123"), + types.MakeDatums(float64(1), float64(3.15), []byte("123"), []byte("123")), + }, + { + types.MakeDatums(uint64(1), float64(3.15), []byte("123"), int64(-1)), + types.MakeDatums(uint64(1), float64(3.15), []byte("123"), int64(-1)), + }, + + { + types.MakeDatums(true, false), + types.MakeDatums(int64(1), int64(0)), + }, + + { + types.MakeDatums(nil), + types.MakeDatums(nil), + }, + { + types.MakeDatums(float32(1), float64(3.15), []byte("123456789012345")), + types.MakeDatums(float64(1), float64(3.15), []byte("123456789012345")), + }, + } + sc := &stmtctx.StatementContext{TimeZone: time.Local} + for i, t := range table { + comment := Commentf("%d %v", i, t) + b, err := EncodeKey(sc, nil, t.Input...) + c.Assert(err, IsNil, comment) + var d []byte + for j, e := range t.Expect { + d, b, err = CutOne(b) + c.Assert(err, IsNil) + c.Assert(d, NotNil) + ed, err1 := EncodeKey(sc, nil, e) + c.Assert(err1, IsNil) + c.Assert(d, DeepEquals, ed, Commentf("%d:%d %#v", i, j, e)) + } + c.Assert(b, HasLen, 0) + } + for i, t := range table { + comment := Commentf("%d %v", i, t) + b, err := EncodeValue(sc, nil, t.Input...) + c.Assert(err, IsNil, comment) + var d []byte + for j, e := range t.Expect { + d, b, err = CutOne(b) + c.Assert(err, IsNil) + c.Assert(d, NotNil) + ed, err1 := EncodeValue(sc, nil, e) + c.Assert(err1, IsNil) + c.Assert(d, DeepEquals, ed, Commentf("%d:%d %#v", i, j, e)) + } + c.Assert(b, HasLen, 0) + } + + b, err := EncodeValue(sc, nil, types.NewDatum(42)) + c.Assert(err, IsNil) + rem, n, err := CutColumnID(b) + c.Assert(err, IsNil) + c.Assert(rem, HasLen, 0) + c.Assert(n, Equals, int64(42)) +} + +func (s *testCodecSuite) TestSetRawValues(c *C) { + sc := &stmtctx.StatementContext{TimeZone: time.Local} + datums := types.MakeDatums(1, "abc", 1.1, []byte("def")) + rowData, err := EncodeValue(sc, nil, datums...) + c.Assert(err, IsNil) + values := make([]types.Datum, 4) + err = SetRawValues(rowData, values) + c.Assert(err, IsNil) + for i, rawVal := range values { + c.Assert(rawVal.Kind(), Equals, types.KindRaw) + encoded, err1 := EncodeValue(sc, nil, datums[i]) + c.Assert(err1, IsNil) + c.Assert(encoded, BytesEquals, rawVal.GetBytes()) + } +} + +func (s *testCodecSuite) TestDecodeOneToChunk(c *C) { + defer testleak.AfterTest(c)() + sc := &stmtctx.StatementContext{TimeZone: time.Local} + datums, tps := datumsForTest(sc) + rowCount := 3 + chk := chunkForTest(c, sc, datums, tps, rowCount) + for colIdx, tp := range tps { + for rowIdx := 0; rowIdx < rowCount; rowIdx++ { + got := chk.GetRow(rowIdx).GetDatum(colIdx, tp) + expect := datums[colIdx] + if got.IsNull() { + c.Assert(expect.IsNull(), IsTrue) + } else { + cmp, err := got.CompareDatum(sc, &expect) + c.Assert(err, IsNil) + c.Assert(cmp, Equals, 0) + } + } + } +} + +func datumsForTest(sc *stmtctx.StatementContext) ([]types.Datum, []*types.FieldType) { + table := []struct { + value interface{} + tp *types.FieldType + }{ + {nil, types.NewFieldType(mysql.TypeLonglong)}, + {int64(1), types.NewFieldType(mysql.TypeTiny)}, + {int64(1), types.NewFieldType(mysql.TypeShort)}, + {int64(1), types.NewFieldType(mysql.TypeInt24)}, + {int64(1), types.NewFieldType(mysql.TypeLong)}, + {int64(-1), types.NewFieldType(mysql.TypeLong)}, + {int64(1), types.NewFieldType(mysql.TypeLonglong)}, + {uint64(1), types.NewFieldType(mysql.TypeLonglong)}, + {float32(1), types.NewFieldType(mysql.TypeFloat)}, + {float64(1), types.NewFieldType(mysql.TypeDouble)}, + {"abc", types.NewFieldType(mysql.TypeString)}, + {"def", types.NewFieldType(mysql.TypeVarchar)}, + {"ghi", types.NewFieldType(mysql.TypeVarString)}, + {[]byte("abc"), types.NewFieldType(mysql.TypeBlob)}, + {[]byte("abc"), types.NewFieldType(mysql.TypeTinyBlob)}, + {[]byte("abc"), types.NewFieldType(mysql.TypeMediumBlob)}, + {[]byte("abc"), types.NewFieldType(mysql.TypeLongBlob)}, + {int64(1), types.NewFieldType(mysql.TypeYear)}, + } + + datums := make([]types.Datum, 0, len(table)+2) + tps := make([]*types.FieldType, 0, len(table)+2) + for _, t := range table { + tps = append(tps, t.tp) + datums = append(datums, types.NewDatum(t.value)) + } + return datums, tps +} + +func chunkForTest(c *C, sc *stmtctx.StatementContext, datums []types.Datum, tps []*types.FieldType, rowCount int) *chunk.Chunk { + decoder := NewDecoder(chunk.New(tps, 32, 32), sc.TimeZone) + for rowIdx := 0; rowIdx < rowCount; rowIdx++ { + encoded, err := EncodeValue(sc, nil, datums...) + c.Assert(err, IsNil) + decoder.buf = make([]byte, 0, len(encoded)) + for colIdx, tp := range tps { + encoded, err = decoder.DecodeOne(encoded, colIdx, tp) + c.Assert(err, IsNil) + } + } + return decoder.chk +} + +func (s *testCodecSuite) TestDecodeRange(c *C) { + _, _, err := DecodeRange(nil, 0) + c.Assert(err, NotNil) + + datums := types.MakeDatums(1, "abc", 1.1, []byte("def")) + rowData, err := EncodeValue(nil, nil, datums...) + c.Assert(err, IsNil) + + datums1, _, err := DecodeRange(rowData, len(datums)) + c.Assert(err, IsNil) + for i, datum := range datums1 { + cmp, err := datum.CompareDatum(nil, &datums[i]) + c.Assert(err, IsNil) + c.Assert(cmp, Equals, 0) + } + + for _, b := range []byte{NilFlag, bytesFlag, maxFlag, maxFlag + 1} { + newData := append(rowData, b) + _, _, err := DecodeRange(newData, len(datums)+1) + c.Assert(err, IsNil) + } +} + +func testHashChunkRowEqual(c *C, a, b interface{}, equal bool) { + sc := &stmtctx.StatementContext{TimeZone: time.Local} + buf1 := make([]byte, 1) + buf2 := make([]byte, 1) + + tp1 := new(types.FieldType) + types.DefaultTypeForValue(a, tp1) + chk1 := chunk.New([]*types.FieldType{tp1}, 1, 1) + d := types.Datum{} + d.SetValue(a) + chk1.AppendDatum(0, &d) + + tp2 := new(types.FieldType) + types.DefaultTypeForValue(b, tp2) + chk2 := chunk.New([]*types.FieldType{tp2}, 1, 1) + d = types.Datum{} + d.SetValue(b) + chk2.AppendDatum(0, &d) + + h := crc32.NewIEEE() + err1 := HashChunkRow(sc, h, chk1.GetRow(0), []*types.FieldType{tp1}, []int{0}, buf1) + sum1 := h.Sum32() + h.Reset() + err2 := HashChunkRow(sc, h, chk2.GetRow(0), []*types.FieldType{tp2}, []int{0}, buf2) + sum2 := h.Sum32() + c.Assert(err1, IsNil) + c.Assert(err2, IsNil) + if equal { + c.Assert(sum1, Equals, sum2) + } else { + c.Assert(sum1, Not(Equals), sum2) + } + e, err := EqualChunkRow(sc, + chk1.GetRow(0), []*types.FieldType{tp1}, []int{0}, + chk2.GetRow(0), []*types.FieldType{tp2}, []int{0}) + c.Assert(err, IsNil) + if equal { + c.Assert(e, IsTrue) + } else { + c.Assert(e, IsFalse) + } +} + +func (s *testCodecSuite) TestHashChunkRow(c *C) { + sc := &stmtctx.StatementContext{TimeZone: time.Local} + buf := make([]byte, 1) + datums, tps := datumsForTest(sc) + chk := chunkForTest(c, sc, datums, tps, 1) + + colIdx := make([]int, len(tps)) + for i := 0; i < len(tps); i++ { + colIdx[i] = i + } + h := crc32.NewIEEE() + err1 := HashChunkRow(sc, h, chk.GetRow(0), tps, colIdx, buf) + sum1 := h.Sum32() + h.Reset() + err2 := HashChunkRow(sc, h, chk.GetRow(0), tps, colIdx, buf) + sum2 := h.Sum32() + + c.Assert(err1, IsNil) + c.Assert(err2, IsNil) + c.Assert(sum1, Equals, sum2) + e, err := EqualChunkRow(sc, + chk.GetRow(0), tps, colIdx, + chk.GetRow(0), tps, colIdx) + c.Assert(err, IsNil) + c.Assert(e, IsTrue) + + testHashChunkRowEqual(c, uint64(1), int64(1), true) + testHashChunkRowEqual(c, uint64(18446744073709551615), int64(-1), false) + + testHashChunkRowEqual(c, float32(1.0), float64(1.0), true) + testHashChunkRowEqual(c, float32(1.0), float64(1.1), false) + + testHashChunkRowEqual(c, "x", []byte("x"), true) + testHashChunkRowEqual(c, "x", []byte("y"), false) +} + +func (s *testCodecSuite) TestValueSizeOfSignedInt(c *C) { + testCase := []int64{64, 8192, 1048576, 134217728, 17179869184, 2199023255552, 281474976710656, 36028797018963968, 4611686018427387904} + var b []byte + for _, v := range testCase { + b := encodeSignedInt(b[:0], v-10, false) + c.Assert(len(b), Equals, valueSizeOfSignedInt(v-10)) + + b = encodeSignedInt(b[:0], v, false) + c.Assert(len(b), Equals, valueSizeOfSignedInt(v)) + + b = encodeSignedInt(b[:0], v+10, false) + c.Assert(len(b), Equals, valueSizeOfSignedInt(v+10)) + + // Test for negative value. + b = encodeSignedInt(b[:0], 0-v, false) + c.Assert(len(b), Equals, valueSizeOfSignedInt(0-v)) + + b = encodeSignedInt(b[:0], 0-v+10, false) + c.Assert(len(b), Equals, valueSizeOfSignedInt(0-v+10)) + + b = encodeSignedInt(b[:0], 0-v-10, false) + c.Assert(len(b), Equals, valueSizeOfSignedInt(0-v-10)) + } +} + +func (s *testCodecSuite) TestValueSizeOfUnsignedInt(c *C) { + testCase := []uint64{128, 16384, 2097152, 268435456, 34359738368, 4398046511104, 562949953421312, 72057594037927936, 9223372036854775808} + var b []byte + for _, v := range testCase { + b := encodeUnsignedInt(b[:0], v-10, false) + c.Assert(len(b), Equals, valueSizeOfUnsignedInt(v-10)) + + b = encodeUnsignedInt(b[:0], v, false) + c.Assert(len(b), Equals, valueSizeOfUnsignedInt(v)) + + b = encodeUnsignedInt(b[:0], v+10, false) + c.Assert(len(b), Equals, valueSizeOfUnsignedInt(v+10)) + } +} + +func (s *testCodecSuite) TestHashChunkColumns(c *C) { + sc := &stmtctx.StatementContext{TimeZone: time.Local} + buf := make([]byte, 1) + datums, tps := datumsForTest(sc) + chk := chunkForTest(c, sc, datums, tps, 3) + + colIdx := make([]int, len(tps)) + for i := 0; i < len(tps); i++ { + colIdx[i] = i + } + hasNull := []bool{false, false, false} + vecHash := []hash.Hash64{fnv.New64(), fnv.New64(), fnv.New64()} + rowHash := []hash.Hash64{fnv.New64(), fnv.New64(), fnv.New64()} + + // Test hash value of the first `Null` column + c.Assert(chk.GetRow(0).IsNull(0), Equals, true) + err1 := HashChunkColumns(sc, vecHash, chk, tps[0], 0, buf, hasNull) + err2 := HashChunkRow(sc, rowHash[0], chk.GetRow(0), tps, colIdx[0:1], buf) + err3 := HashChunkRow(sc, rowHash[1], chk.GetRow(1), tps, colIdx[0:1], buf) + err4 := HashChunkRow(sc, rowHash[2], chk.GetRow(2), tps, colIdx[0:1], buf) + c.Assert(err1, IsNil) + c.Assert(err2, IsNil) + c.Assert(err3, IsNil) + c.Assert(err4, IsNil) + + c.Assert(hasNull[0], Equals, true) + c.Assert(hasNull[1], Equals, true) + c.Assert(hasNull[2], Equals, true) + c.Assert(vecHash[0].Sum64(), Equals, rowHash[0].Sum64()) + c.Assert(vecHash[1].Sum64(), Equals, rowHash[1].Sum64()) + c.Assert(vecHash[2].Sum64(), Equals, rowHash[2].Sum64()) + + // Test hash value of every single column that is not `Null` + for i := 1; i < len(tps); i++ { + hasNull = []bool{false, false, false} + vecHash = []hash.Hash64{fnv.New64(), fnv.New64(), fnv.New64()} + rowHash = []hash.Hash64{fnv.New64(), fnv.New64(), fnv.New64()} + + c.Assert(chk.GetRow(0).IsNull(i), Equals, false) + err1 = HashChunkColumns(sc, vecHash, chk, tps[i], i, buf, hasNull) + err2 = HashChunkRow(sc, rowHash[0], chk.GetRow(0), tps, colIdx[i:i+1], buf) + err3 = HashChunkRow(sc, rowHash[1], chk.GetRow(1), tps, colIdx[i:i+1], buf) + err4 = HashChunkRow(sc, rowHash[2], chk.GetRow(2), tps, colIdx[i:i+1], buf) + c.Assert(err1, IsNil) + c.Assert(err2, IsNil) + c.Assert(err3, IsNil) + c.Assert(err4, IsNil) + + c.Assert(hasNull[0], Equals, false) + c.Assert(hasNull[1], Equals, false) + c.Assert(hasNull[2], Equals, false) + c.Assert(vecHash[0].Sum64(), Equals, rowHash[0].Sum64()) + c.Assert(vecHash[1].Sum64(), Equals, rowHash[1].Sum64()) + c.Assert(vecHash[2].Sum64(), Equals, rowHash[2].Sum64()) + } +} diff --git a/util/codec/float.go b/util/codec/float.go new file mode 100644 index 0000000..39ce634 --- /dev/null +++ b/util/codec/float.go @@ -0,0 +1,65 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package codec + +import ( + "math" + + "github.com/pingcap/errors" +) + +func encodeFloatToCmpUint64(f float64) uint64 { + u := math.Float64bits(f) + if f >= 0 { + u |= signMask + } else { + u = ^u + } + return u +} + +func decodeCmpUintToFloat(u uint64) float64 { + if u&signMask > 0 { + u &= ^signMask + } else { + u = ^u + } + return math.Float64frombits(u) +} + +// EncodeFloat encodes a float v into a byte slice which can be sorted lexicographically later. +// EncodeFloat guarantees that the encoded value is in ascending order for comparison. +func EncodeFloat(b []byte, v float64) []byte { + u := encodeFloatToCmpUint64(v) + return EncodeUint(b, u) +} + +// DecodeFloat decodes a float from a byte slice generated with EncodeFloat before. +func DecodeFloat(b []byte) ([]byte, float64, error) { + b, u, err := DecodeUint(b) + return b, decodeCmpUintToFloat(u), errors.Trace(err) +} + +// EncodeFloatDesc encodes a float v into a byte slice which can be sorted lexicographically later. +// EncodeFloatDesc guarantees that the encoded value is in descending order for comparison. +func EncodeFloatDesc(b []byte, v float64) []byte { + u := encodeFloatToCmpUint64(v) + return EncodeUintDesc(b, u) +} + +// DecodeFloatDesc decodes a float from a byte slice generated with EncodeFloatDesc before. +func DecodeFloatDesc(b []byte) ([]byte, float64, error) { + b, u, err := DecodeUintDesc(b) + return b, decodeCmpUintToFloat(u), errors.Trace(err) +} diff --git a/util/codec/number.go b/util/codec/number.go new file mode 100644 index 0000000..47912e8 --- /dev/null +++ b/util/codec/number.go @@ -0,0 +1,284 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package codec + +import ( + "encoding/binary" + "math" + + "github.com/pingcap/errors" +) + +const signMask uint64 = 0x8000000000000000 + +// EncodeIntToCmpUint make int v to comparable uint type +func EncodeIntToCmpUint(v int64) uint64 { + return uint64(v) ^ signMask +} + +// DecodeCmpUintToInt decodes the u that encoded by EncodeIntToCmpUint +func DecodeCmpUintToInt(u uint64) int64 { + return int64(u ^ signMask) +} + +// EncodeInt appends the encoded value to slice b and returns the appended slice. +// EncodeInt guarantees that the encoded value is in ascending order for comparison. +func EncodeInt(b []byte, v int64) []byte { + var data [8]byte + u := EncodeIntToCmpUint(v) + binary.BigEndian.PutUint64(data[:], u) + return append(b, data[:]...) +} + +// EncodeIntDesc appends the encoded value to slice b and returns the appended slice. +// EncodeIntDesc guarantees that the encoded value is in descending order for comparison. +func EncodeIntDesc(b []byte, v int64) []byte { + var data [8]byte + u := EncodeIntToCmpUint(v) + binary.BigEndian.PutUint64(data[:], ^u) + return append(b, data[:]...) +} + +// DecodeInt decodes value encoded by EncodeInt before. +// It returns the leftover un-decoded slice, decoded value if no error. +func DecodeInt(b []byte) ([]byte, int64, error) { + if len(b) < 8 { + return nil, 0, errors.New("insufficient bytes to decode value") + } + + u := binary.BigEndian.Uint64(b[:8]) + v := DecodeCmpUintToInt(u) + b = b[8:] + return b, v, nil +} + +// DecodeIntDesc decodes value encoded by EncodeInt before. +// It returns the leftover un-decoded slice, decoded value if no error. +func DecodeIntDesc(b []byte) ([]byte, int64, error) { + if len(b) < 8 { + return nil, 0, errors.New("insufficient bytes to decode value") + } + + u := binary.BigEndian.Uint64(b[:8]) + v := DecodeCmpUintToInt(^u) + b = b[8:] + return b, v, nil +} + +// EncodeUint appends the encoded value to slice b and returns the appended slice. +// EncodeUint guarantees that the encoded value is in ascending order for comparison. +func EncodeUint(b []byte, v uint64) []byte { + var data [8]byte + binary.BigEndian.PutUint64(data[:], v) + return append(b, data[:]...) +} + +// EncodeUintDesc appends the encoded value to slice b and returns the appended slice. +// EncodeUintDesc guarantees that the encoded value is in descending order for comparison. +func EncodeUintDesc(b []byte, v uint64) []byte { + var data [8]byte + binary.BigEndian.PutUint64(data[:], ^v) + return append(b, data[:]...) +} + +// DecodeUint decodes value encoded by EncodeUint before. +// It returns the leftover un-decoded slice, decoded value if no error. +func DecodeUint(b []byte) ([]byte, uint64, error) { + if len(b) < 8 { + return nil, 0, errors.New("insufficient bytes to decode value") + } + + v := binary.BigEndian.Uint64(b[:8]) + b = b[8:] + return b, v, nil +} + +// DecodeUintDesc decodes value encoded by EncodeInt before. +// It returns the leftover un-decoded slice, decoded value if no error. +func DecodeUintDesc(b []byte) ([]byte, uint64, error) { + if len(b) < 8 { + return nil, 0, errors.New("insufficient bytes to decode value") + } + + data := b[:8] + v := binary.BigEndian.Uint64(data) + b = b[8:] + return b, ^v, nil +} + +// EncodeVarint appends the encoded value to slice b and returns the appended slice. +// Note that the encoded result is not memcomparable. +func EncodeVarint(b []byte, v int64) []byte { + var data [binary.MaxVarintLen64]byte + n := binary.PutVarint(data[:], v) + return append(b, data[:n]...) +} + +// DecodeVarint decodes value encoded by EncodeVarint before. +// It returns the leftover un-decoded slice, decoded value if no error. +func DecodeVarint(b []byte) ([]byte, int64, error) { + v, n := binary.Varint(b) + if n > 0 { + return b[n:], v, nil + } + if n < 0 { + return nil, 0, errors.New("value larger than 64 bits") + } + return nil, 0, errors.New("insufficient bytes to decode value") +} + +// EncodeUvarint appends the encoded value to slice b and returns the appended slice. +// Note that the encoded result is not memcomparable. +func EncodeUvarint(b []byte, v uint64) []byte { + var data [binary.MaxVarintLen64]byte + n := binary.PutUvarint(data[:], v) + return append(b, data[:n]...) +} + +// DecodeUvarint decodes value encoded by EncodeUvarint before. +// It returns the leftover un-decoded slice, decoded value if no error. +func DecodeUvarint(b []byte) ([]byte, uint64, error) { + v, n := binary.Uvarint(b) + if n > 0 { + return b[n:], v, nil + } + if n < 0 { + return nil, 0, errors.New("value larger than 64 bits") + } + return nil, 0, errors.New("insufficient bytes to decode value") +} + +const ( + negativeTagEnd = 8 // negative tag is (negativeTagEnd - length). + positiveTagStart = 0xff - 8 // Positive tag is (positiveTagStart + length). +) + +// EncodeComparableVarint encodes an int64 to a mem-comparable bytes. +func EncodeComparableVarint(b []byte, v int64) []byte { + if v < 0 { + // All negative value has a tag byte prefix (negativeTagEnd - length). + // Smaller negative value encodes to more bytes, has smaller tag. + if v >= -0xff { + return append(b, negativeTagEnd-1, byte(v)) + } else if v >= -0xffff { + return append(b, negativeTagEnd-2, byte(v>>8), byte(v)) + } else if v >= -0xffffff { + return append(b, negativeTagEnd-3, byte(v>>16), byte(v>>8), byte(v)) + } else if v >= -0xffffffff { + return append(b, negativeTagEnd-4, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) + } else if v >= -0xffffffffff { + return append(b, negativeTagEnd-5, byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) + } else if v >= -0xffffffffffff { + return append(b, negativeTagEnd-6, byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), + byte(v)) + } else if v >= -0xffffffffffffff { + return append(b, negativeTagEnd-7, byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), + byte(v>>8), byte(v)) + } + return append(b, negativeTagEnd-8, byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24), + byte(v>>16), byte(v>>8), byte(v)) + } + return EncodeComparableUvarint(b, uint64(v)) +} + +// EncodeComparableUvarint encodes uint64 into mem-comparable bytes. +func EncodeComparableUvarint(b []byte, v uint64) []byte { + // The first byte has 256 values, [0, 7] is reserved for negative tags, + // [248, 255] is reserved for larger positive tags, + // So we can store value [0, 239] in a single byte. + // Values cannot be stored in single byte has a tag byte prefix (positiveTagStart+length). + // Larger value encodes to more bytes, has larger tag. + if v <= positiveTagStart-negativeTagEnd { + return append(b, byte(v)+negativeTagEnd) + } else if v <= 0xff { + return append(b, positiveTagStart+1, byte(v)) + } else if v <= 0xffff { + return append(b, positiveTagStart+2, byte(v>>8), byte(v)) + } else if v <= 0xffffff { + return append(b, positiveTagStart+3, byte(v>>16), byte(v>>8), byte(v)) + } else if v <= 0xffffffff { + return append(b, positiveTagStart+4, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) + } else if v <= 0xffffffffff { + return append(b, positiveTagStart+5, byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) + } else if v <= 0xffffffffffff { + return append(b, positiveTagStart+6, byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), + byte(v)) + } else if v <= 0xffffffffffffff { + return append(b, positiveTagStart+7, byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), + byte(v>>8), byte(v)) + } + return append(b, positiveTagStart+8, byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24), + byte(v>>16), byte(v>>8), byte(v)) +} + +var ( + errDecodeInsufficient = errors.New("insufficient bytes to decode value") + errDecodeInvalid = errors.New("invalid bytes to decode value") +) + +// DecodeComparableUvarint decodes mem-comparable uvarint. +func DecodeComparableUvarint(b []byte) ([]byte, uint64, error) { + if len(b) == 0 { + return nil, 0, errDecodeInsufficient + } + first := b[0] + b = b[1:] + if first < negativeTagEnd { + return nil, 0, errors.Trace(errDecodeInvalid) + } + if first <= positiveTagStart { + return b, uint64(first) - negativeTagEnd, nil + } + length := int(first) - positiveTagStart + if len(b) < length { + return nil, 0, errors.Trace(errDecodeInsufficient) + } + var v uint64 + for _, c := range b[:length] { + v = (v << 8) | uint64(c) + } + return b[length:], v, nil +} + +// DecodeComparableVarint decodes mem-comparable varint. +func DecodeComparableVarint(b []byte) ([]byte, int64, error) { + if len(b) == 0 { + return nil, 0, errors.Trace(errDecodeInsufficient) + } + first := b[0] + if first >= negativeTagEnd && first <= positiveTagStart { + return b, int64(first) - negativeTagEnd, nil + } + b = b[1:] + var length int + var v uint64 + if first < negativeTagEnd { + length = negativeTagEnd - int(first) + v = math.MaxUint64 // negative value has all bits on by default. + } else { + length = int(first) - positiveTagStart + } + if len(b) < length { + return nil, 0, errors.Trace(errDecodeInsufficient) + } + for _, c := range b[:length] { + v = (v << 8) | uint64(c) + } + if first > positiveTagStart && v > math.MaxInt64 { + return nil, 0, errors.Trace(errDecodeInvalid) + } else if first < negativeTagEnd && v <= math.MaxInt64 { + return nil, 0, errors.Trace(errDecodeInvalid) + } + return b[length:], int64(v), nil +} diff --git a/util/disjointset/int_set.go b/util/disjointset/int_set.go new file mode 100644 index 0000000..0881b4a --- /dev/null +++ b/util/disjointset/int_set.go @@ -0,0 +1,42 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package disjointset + +// IntSet is the int disjoint set. +type IntSet struct { + parent []int +} + +// NewIntSet returns a new int disjoint set. +func NewIntSet(size int) *IntSet { + p := make([]int, size) + for i := range p { + p[i] = i + } + return &IntSet{parent: p} +} + +// Union unions two sets in int disjoint set. +func (m *IntSet) Union(a int, b int) { + m.parent[m.FindRoot(a)] = m.FindRoot(b) +} + +// FindRoot finds the representative element of the set that `a` belongs to. +func (m *IntSet) FindRoot(a int) int { + if a == m.parent[a] { + return a + } + m.parent[a] = m.FindRoot(m.parent[a]) + return m.parent[a] +} diff --git a/util/disjointset/int_set_test.go b/util/disjointset/int_set_test.go new file mode 100644 index 0000000..222c63e --- /dev/null +++ b/util/disjointset/int_set_test.go @@ -0,0 +1,52 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package disjointset + +import ( + "testing" + + . "github.com/pingcap/check" +) + +var _ = Suite(&testDisjointSetSuite{}) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + TestingT(t) +} + +type testDisjointSetSuite struct { +} + +func (s *testDisjointSetSuite) TestIntDisjointSet(c *C) { + set := NewIntSet(10) + c.Assert(len(set.parent), Equals, 10) + for i := range set.parent { + c.Assert(set.parent[i], Equals, i) + } + set.Union(0, 1) + set.Union(1, 3) + set.Union(4, 2) + set.Union(2, 6) + set.Union(3, 5) + set.Union(7, 8) + set.Union(9, 6) + c.Assert(set.FindRoot(0), Equals, set.FindRoot(1)) + c.Assert(set.FindRoot(3), Equals, set.FindRoot(1)) + c.Assert(set.FindRoot(5), Equals, set.FindRoot(1)) + c.Assert(set.FindRoot(2), Equals, set.FindRoot(4)) + c.Assert(set.FindRoot(6), Equals, set.FindRoot(4)) + c.Assert(set.FindRoot(9), Equals, set.FindRoot(2)) + c.Assert(set.FindRoot(7), Equals, set.FindRoot(8)) +} diff --git a/util/format/format.go b/util/format/format.go new file mode 100644 index 0000000..0a14a6d --- /dev/null +++ b/util/format/format.go @@ -0,0 +1,195 @@ +// Copyright (c) 2014 The sortutil Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSES/STRUTIL-LICENSE file. + +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package format + +import ( + "bytes" + "fmt" + "io" +) + +const ( + st0 = iota + stBOL + stPERC + stBOLPERC +) + +// Formatter is an io.Writer extended formatter by a fmt.Printf like function Format. +type Formatter interface { + io.Writer + Format(format string, args ...interface{}) (n int, errno error) +} + +type indentFormatter struct { + io.Writer + indent []byte + indentLevel int + state int +} + +var replace = map[rune]string{ + '\000': "\\0", + '\'': "''", + '\n': "\\n", + '\r': "\\r", +} + +// IndentFormatter returns a new Formatter which interprets %i and %u in the +// Format() formats string as indent and unindent commands. The commands can +// nest. The Formatter writes to io.Writer 'w' and inserts one 'indent' +// string per current indent level value. +// Behaviour of commands reaching negative indent levels is undefined. +// IndentFormatter(os.Stdout, "\t").Format("abc%d%%e%i\nx\ny\n%uz\n", 3) +// output: +// abc3%e +// x +// y +// z +// The Go quoted string literal form of the above is: +// "abc%%e\n\tx\n\tx\nz\n" +// The commands can be scattered between separate invocations of Format(), +// i.e. the formatter keeps track of the indent level and knows if it is +// positioned on start of a line and should emit indentation(s). +// The same output as above can be produced by e.g.: +// f := IndentFormatter(os.Stdout, " ") +// f.Format("abc%d%%e%i\nx\n", 3) +// f.Format("y\n%uz\n") +func IndentFormatter(w io.Writer, indent string) Formatter { + return &indentFormatter{w, []byte(indent), 0, stBOL} +} + +func (f *indentFormatter) format(flat bool, format string, args ...interface{}) (n int, errno error) { + var buf = make([]byte, 0) + for i := 0; i < len(format); i++ { + c := format[i] + switch f.state { + case st0: + switch c { + case '\n': + cc := c + if flat && f.indentLevel != 0 { + cc = ' ' + } + buf = append(buf, cc) + f.state = stBOL + case '%': + f.state = stPERC + default: + buf = append(buf, c) + } + case stBOL: + switch c { + case '\n': + cc := c + if flat && f.indentLevel != 0 { + cc = ' ' + } + buf = append(buf, cc) + case '%': + f.state = stBOLPERC + default: + if !flat { + for i := 0; i < f.indentLevel; i++ { + buf = append(buf, f.indent...) + } + } + buf = append(buf, c) + f.state = st0 + } + case stBOLPERC: + switch c { + case 'i': + f.indentLevel++ + f.state = stBOL + case 'u': + f.indentLevel-- + f.state = stBOL + default: + if !flat { + for i := 0; i < f.indentLevel; i++ { + buf = append(buf, f.indent...) + } + } + buf = append(buf, '%', c) + f.state = st0 + } + case stPERC: + switch c { + case 'i': + f.indentLevel++ + f.state = st0 + case 'u': + f.indentLevel-- + f.state = st0 + default: + buf = append(buf, '%', c) + f.state = st0 + } + default: + panic("unexpected state") + } + } + switch f.state { + case stPERC, stBOLPERC: + buf = append(buf, '%') + } + return f.Write([]byte(fmt.Sprintf(string(buf), args...))) +} + +// Format implements Format interface. +func (f *indentFormatter) Format(format string, args ...interface{}) (n int, errno error) { + return f.format(false, format, args...) +} + +type flatFormatter indentFormatter + +// FlatFormatter returns a newly created Formatter with the same functionality as the one returned +// by IndentFormatter except it allows a newline in the 'format' string argument of Format +// to pass through if the indent level is current zero. +// +// If the indent level is non-zero then such new lines are changed to a space character. +// There is no indent string, the %i and %u format verbs are used solely to determine the indent level. +// +// The FlatFormatter is intended for flattening of normally nested structure textual representation to +// a one top level structure per line form. +// FlatFormatter(os.Stdout, " ").Format("abc%d%%e%i\nx\ny\n%uz\n", 3) +// output in the form of a Go quoted string literal: +// "abc3%%e x y z\n" +func FlatFormatter(w io.Writer) Formatter { + return (*flatFormatter)(IndentFormatter(w, "").(*indentFormatter)) +} + +// Format implements Format interface. +func (f *flatFormatter) Format(format string, args ...interface{}) (n int, errno error) { + return (*indentFormatter)(f).format(true, format, args...) +} + +// OutputFormat output escape character with backslash. +func OutputFormat(s string) string { + var buf bytes.Buffer + for _, old := range s { + if newVal, ok := replace[old]; ok { + buf.WriteString(newVal) + continue + } + buf.WriteRune(old) + } + + return buf.String() +} diff --git a/util/format/format_test.go b/util/format/format_test.go new file mode 100644 index 0000000..6dd9b4f --- /dev/null +++ b/util/format/format_test.go @@ -0,0 +1,63 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package format + +import ( + "bytes" + "io/ioutil" + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/util/testleak" +) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + TestingT(t) +} + +var _ = Suite(&testFormatSuite{}) + +type testFormatSuite struct { +} + +func checkFormat(c *C, f Formatter, buf *bytes.Buffer, str, expect string) { + _, err := f.Format(str, 3) + c.Assert(err, IsNil) + b, err := ioutil.ReadAll(buf) + c.Assert(err, IsNil) + c.Assert(string(b), Equals, expect) +} + +func (s *testFormatSuite) TestFormat(c *C) { + defer testleak.AfterTest(c)() + str := "abc%d%%e%i\nx\ny\n%uz\n" + buf := &bytes.Buffer{} + f := IndentFormatter(buf, "\t") + expect := `abc3%e + x + y +z +` + checkFormat(c, f, buf, str, expect) + + str = "abc%d%%e%i\nx\ny\n%uz\n%i\n" + buf = &bytes.Buffer{} + f = FlatFormatter(buf) + expect = "abc3%e x y z\n " + checkFormat(c, f, buf, str, expect) + + str2 := OutputFormat(`\'\000abc\n\rdef`) + c.Assert(str2, Equals, "\\''\\000abc\\n\\rdef") +} diff --git a/util/hack/hack.go b/util/hack/hack.go new file mode 100644 index 0000000..ac59b77 --- /dev/null +++ b/util/hack/hack.go @@ -0,0 +1,47 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package hack + +import ( + "reflect" + "unsafe" +) + +// MutableString can be used as string via string(MutableString) without performance loss. +type MutableString string + +// String converts slice to MutableString without copy. +// The MutableString can be converts to string without copy. +// Use it at your own risk. +func String(b []byte) (s MutableString) { + if len(b) == 0 { + return "" + } + pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + pstring := (*reflect.StringHeader)(unsafe.Pointer(&s)) + pstring.Data = pbytes.Data + pstring.Len = pbytes.Len + return +} + +// Slice converts string to slice without copy. +// Use at your own risk. +func Slice(s string) (b []byte) { + pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + pstring := *(*reflect.StringHeader)(unsafe.Pointer(&s)) + pbytes.Data = pstring.Data + pbytes.Len = pstring.Len + pbytes.Cap = pstring.Len + return +} diff --git a/util/hack/hack_test.go b/util/hack/hack_test.go new file mode 100644 index 0000000..9025a0a --- /dev/null +++ b/util/hack/hack_test.go @@ -0,0 +1,70 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package hack + +import ( + "bytes" + "testing" + + . "github.com/pingcap/check" +) + +func TestT(t *testing.T) { + TestingT(t) +} + +func TestString(t *testing.T) { + b := []byte("hello world") + a := String(b) + + if a != "hello world" { + t.Fatal(a) + } + + b[0] = 'a' + + if a != "aello world" { + t.Fatal(a) + } + + b = append(b, "abc"...) + if a != "aello world" { + t.Fatalf("a:%v, b:%v", a, b) + } +} + +func TestByte(t *testing.T) { + a := "hello world" + + b := Slice(a) + + if !bytes.Equal(b, []byte("hello world")) { + t.Fatal(string(b)) + } +} + +func TestMutable(t *testing.T) { + a := []byte{'a', 'b', 'c'} + b := String(a) // b is a mutable string. + c := string(b) // Warn, c is a mutable string + if c != "abc" { + t.Fatalf("assert fail") + } + + // c changed after a is modified + a[0] = 's' + if c != "sbc" { + t.Fatal("test mutable string fail") + } +} diff --git a/util/logutil/hex.go b/util/logutil/hex.go new file mode 100644 index 0000000..e8ca2fa --- /dev/null +++ b/util/logutil/hex.go @@ -0,0 +1,78 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by aprettyPrintlicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutil + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "reflect" + "strings" + + "github.com/golang/protobuf/proto" +) + +// Hex defines a fmt.Stringer for proto.Message. +// We can't define the String() method on proto.Message, but we can wrap it. +func Hex(msg proto.Message) fmt.Stringer { + return hexStringer{msg} +} + +type hexStringer struct { + proto.Message +} + +func (h hexStringer) String() string { + val := reflect.ValueOf(h.Message) + var w bytes.Buffer + prettyPrint(&w, val) + return w.String() +} + +func prettyPrint(w io.Writer, val reflect.Value) { + tp := val.Type() + switch val.Kind() { + case reflect.Slice: + elemType := tp.Elem() + if elemType.Kind() == reflect.Uint8 { + fmt.Fprintf(w, "%s", hex.EncodeToString(val.Bytes())) + } else { + fmt.Fprintf(w, "%s", val.Interface()) + } + case reflect.Struct: + fmt.Fprintf(w, "{") + for i := 0; i < val.NumField(); i++ { + fv := val.Field(i) + ft := tp.Field(i) + if strings.HasPrefix(ft.Name, "XXX_") { + continue + } + if i != 0 { + fmt.Fprintf(w, " ") + } + fmt.Fprintf(w, "%s:", ft.Name) + prettyPrint(w, fv) + } + fmt.Fprintf(w, "}") + case reflect.Ptr: + if val.IsNil() { + fmt.Fprintf(w, "%v", val.Interface()) + } else { + prettyPrint(w, reflect.Indirect(val)) + } + default: + fmt.Fprintf(w, "%v", val.Interface()) + } +} diff --git a/util/logutil/hex_test.go b/util/logutil/hex_test.go new file mode 100644 index 0000000..8f8a314 --- /dev/null +++ b/util/logutil/hex_test.go @@ -0,0 +1,64 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutil_test + +import ( + "bytes" + "encoding/hex" + "reflect" + + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + . "github.com/pingcap/check" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/util/logutil" +) + +var _ = Suite(&testHexSuite{}) + +type testHexSuite struct{} + +func (s *testHexSuite) SetUpSuite(c *C) {} + +func (s *testHexSuite) SetUpTest(c *C) {} + +func (s *testHexSuite) TestHex(c *C) { + var region metapb.Region + region.Id = 6662 + region.StartKey = []byte{'t', 200, '\\', 000, 000, 000, '\\', 000, 000, 000, 37, '-', 000, 000, 000, 000, 000, 000, 000, 37} + region.EndKey = []byte("3asg3asd") + + c.Assert(logutil.Hex(®ion).String(), Equals, "{Id:6662 StartKey:74c85c0000005c000000252d0000000000000025 EndKey:3361736733617364 RegionEpoch: Peers:[]}") +} + +func (s *testHexSuite) TestPrettyPrint(c *C) { + var buf bytes.Buffer + + byteSlice := []byte("asd2fsdafs中文3af") + logutil.PrettyPrint(&buf, reflect.ValueOf(byteSlice)) + c.Assert(buf.String(), Equals, "61736432667364616673e4b8ade69687336166") + c.Assert(buf.String(), Equals, hex.EncodeToString(byteSlice)) + buf.Reset() + + // Go reflect can't distinguish uint8 from byte! + intSlice := []uint8{1, 2, 3, uint8('a'), uint8('b'), uint8('c'), uint8('\'')} + logutil.PrettyPrint(&buf, reflect.ValueOf(intSlice)) + c.Assert(buf.String(), Equals, "01020361626327") + buf.Reset() + + var ran kv.KeyRange + ran.StartKey = kv.Key("_txxey23_i263") + ran.EndKey = nil + logutil.PrettyPrint(&buf, reflect.ValueOf(ran)) + c.Assert(buf.String(), Equals, "{StartKey:5f747878657932335f69323633 EndKey:}") +} diff --git a/util/logutil/log.go b/util/logutil/log.go new file mode 100644 index 0000000..b6bac61 --- /dev/null +++ b/util/logutil/log.go @@ -0,0 +1,299 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutil + +import ( + "bytes" + "context" + "fmt" + "os" + "path/filepath" + "runtime" + "sort" + "strings" + + "github.com/pingcap/errors" + zaplog "github.com/pingcap/log" + log "github.com/sirupsen/logrus" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "gopkg.in/natefinch/lumberjack.v2" +) + +const ( + defaultLogTimeFormat = "2006/01/02 15:04:05.000" + // DefaultLogMaxSize is the default size of log files. + DefaultLogMaxSize = 300 // MB + // DefaultLogFormat is the default format of the log. + DefaultLogFormat = "text" + defaultLogLevel = log.InfoLevel + // DefaultQueryLogMaxLen is the default max length of the query in the log. + DefaultQueryLogMaxLen = 4096 +) + +// EmptyFileLogConfig is an empty FileLogConfig. +var EmptyFileLogConfig = FileLogConfig{} + +// FileLogConfig serializes file log related config in toml/json. +type FileLogConfig struct { + zaplog.FileLogConfig +} + +// NewFileLogConfig creates a FileLogConfig. +func NewFileLogConfig(maxSize uint) FileLogConfig { + return FileLogConfig{FileLogConfig: zaplog.FileLogConfig{ + MaxSize: int(maxSize), + }, + } +} + +// LogConfig serializes log related config in toml/json. +type LogConfig struct { + zaplog.Config + + // SlowQueryFile filename, default to File log config on empty. + SlowQueryFile string +} + +// NewLogConfig creates a LogConfig. +func NewLogConfig(level, format string, fileCfg FileLogConfig, disableTimestamp bool, opts ...func(*zaplog.Config)) *LogConfig { + c := &LogConfig{ + Config: zaplog.Config{ + Level: level, + Format: format, + DisableTimestamp: disableTimestamp, + File: fileCfg.FileLogConfig, + }, + } + for _, opt := range opts { + opt(&c.Config) + } + return c +} + +// isSKippedPackageName tests wether path name is on log library calling stack. +func isSkippedPackageName(name string) bool { + return strings.Contains(name, "github.com/sirupsen/logrus") || + strings.Contains(name, "github.com/coreos/pkg/capnslog") +} + +// modifyHook injects file name and line pos into log entry. +type contextHook struct{} + +// Fire implements logrus.Hook interface +// https://github.com/sirupsen/logrus/issues/63 +func (hook *contextHook) Fire(entry *log.Entry) error { + pc := make([]uintptr, 4) + cnt := runtime.Callers(6, pc) + + for i := 0; i < cnt; i++ { + fu := runtime.FuncForPC(pc[i] - 1) + name := fu.Name() + if !isSkippedPackageName(name) { + file, line := fu.FileLine(pc[i] - 1) + entry.Data["file"] = filepath.Base(file) + entry.Data["line"] = line + break + } + } + return nil +} + +// Levels implements logrus.Hook interface. +func (hook *contextHook) Levels() []log.Level { + return log.AllLevels +} + +func stringToLogLevel(level string) log.Level { + switch strings.ToLower(level) { + case "fatal": + return log.FatalLevel + case "error": + return log.ErrorLevel + case "warn", "warning": + return log.WarnLevel + case "debug": + return log.DebugLevel + case "info": + return log.InfoLevel + } + return defaultLogLevel +} + +// textFormatter is for compatibility with ngaut/log +type textFormatter struct { + DisableTimestamp bool + EnableEntryOrder bool +} + +// Format implements logrus.Formatter +func (f *textFormatter) Format(entry *log.Entry) ([]byte, error) { + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + if !f.DisableTimestamp { + fmt.Fprintf(b, "%s ", entry.Time.Format(defaultLogTimeFormat)) + } + if file, ok := entry.Data["file"]; ok { + fmt.Fprintf(b, "%s:%v:", file, entry.Data["line"]) + } + fmt.Fprintf(b, " [%s] %s", entry.Level.String(), entry.Message) + + if f.EnableEntryOrder { + keys := make([]string, 0, len(entry.Data)) + for k := range entry.Data { + if k != "file" && k != "line" { + keys = append(keys, k) + } + } + sort.Strings(keys) + for _, k := range keys { + fmt.Fprintf(b, " %v=%v", k, entry.Data[k]) + } + } else { + for k, v := range entry.Data { + if k != "file" && k != "line" { + fmt.Fprintf(b, " %v=%v", k, v) + } + } + } + + b.WriteByte('\n') + + return b.Bytes(), nil +} + +func stringToLogFormatter(format string, disableTimestamp bool) log.Formatter { + switch strings.ToLower(format) { + case "text": + return &textFormatter{ + DisableTimestamp: disableTimestamp, + } + default: + return &textFormatter{} + } +} + +// initFileLog initializes file based logging options. +func initFileLog(cfg *zaplog.FileLogConfig, logger *log.Logger) error { + if st, err := os.Stat(cfg.Filename); err == nil { + if st.IsDir() { + return errors.New("can't use directory as log file name") + } + } + if cfg.MaxSize == 0 { + cfg.MaxSize = DefaultLogMaxSize + } + + // use lumberjack to logrotate + output := &lumberjack.Logger{ + Filename: cfg.Filename, + MaxSize: int(cfg.MaxSize), + MaxBackups: int(cfg.MaxBackups), + MaxAge: int(cfg.MaxDays), + LocalTime: true, + } + + if logger == nil { + log.SetOutput(output) + } else { + logger.Out = output + } + return nil +} + +// InitLogger initializes PD's logger. +func InitLogger(cfg *LogConfig) error { + log.SetLevel(stringToLogLevel(cfg.Level)) + log.AddHook(&contextHook{}) + + if cfg.Format == "" { + cfg.Format = DefaultLogFormat + } + formatter := stringToLogFormatter(cfg.Format, cfg.DisableTimestamp) + log.SetFormatter(formatter) + + if len(cfg.File.Filename) != 0 { + if err := initFileLog(&cfg.File, nil); err != nil { + return errors.Trace(err) + } + } + + return nil +} + +// InitZapLogger initializes a zap logger with cfg. +func InitZapLogger(cfg *LogConfig) error { + gl, props, err := zaplog.InitLogger(&cfg.Config, zap.AddStacktrace(zapcore.FatalLevel)) + if err != nil { + return errors.Trace(err) + } + zaplog.ReplaceGlobals(gl, props) + + return nil +} + +// SetLevel sets the zap logger's level. +func SetLevel(level string) error { + l := zap.NewAtomicLevel() + if err := l.UnmarshalText([]byte(level)); err != nil { + return errors.Trace(err) + } + zaplog.SetLevel(l.Level()) + return nil +} + +type ctxLogKeyType struct{} + +var ctxLogKey = ctxLogKeyType{} + +// Logger gets a contextual logger from current context. +// contextual logger will output common fields from context. +func Logger(ctx context.Context) *zap.Logger { + if ctxlogger, ok := ctx.Value(ctxLogKey).(*zap.Logger); ok { + return ctxlogger + } + return zaplog.L() +} + +// BgLogger is alias of `logutil.BgLogger()` +func BgLogger() *zap.Logger { + return zaplog.L() +} + +// WithConnID attaches connId to context. +func WithConnID(ctx context.Context, connID uint32) context.Context { + var logger *zap.Logger + if ctxLogger, ok := ctx.Value(ctxLogKey).(*zap.Logger); ok { + logger = ctxLogger + } else { + logger = zaplog.L() + } + return context.WithValue(ctx, ctxLogKey, logger.With(zap.Uint32("conn", connID))) +} + +// WithKeyValue attaches key/value to context. +func WithKeyValue(ctx context.Context, key, value string) context.Context { + var logger *zap.Logger + if ctxLogger, ok := ctx.Value(ctxLogKey).(*zap.Logger); ok { + logger = ctxLogger + } else { + logger = zaplog.L() + } + return context.WithValue(ctx, ctxLogKey, logger.With(zap.String(key, value))) +} diff --git a/util/logutil/log_test.go b/util/logutil/log_test.go new file mode 100644 index 0000000..4996c30 --- /dev/null +++ b/util/logutil/log_test.go @@ -0,0 +1,191 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutil + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io" + "os" + "runtime" + "strings" + "testing" + + . "github.com/pingcap/check" + zaplog "github.com/pingcap/log" + log "github.com/sirupsen/logrus" + "go.uber.org/zap" +) + +const ( + logPattern = `\d\d\d\d/\d\d/\d\d \d\d:\d\d:\d\d\.\d\d\d ([\w_%!$@.,+~-]+|\\.)+:\d+: \[(fatal|error|warning|info|debug)\] .*?\n` + // [2019/02/13 15:56:05.385 +08:00] [INFO] [log_test.go:167] ["info message"] ["str key"=val] ["int key"=123] + zapLogWithConnIDPattern = `\[\d\d\d\d/\d\d/\d\d \d\d:\d\d:\d\d.\d\d\d\ (\+|-)\d\d:\d\d\] \[(FATAL|ERROR|WARN|INFO|DEBUG)\] \[([\w_%!$@.,+~-]+|\\.)+:\d+\] \[.*\] \[conn=.*\] (\[.*=.*\]).*\n` + // [2019/02/13 15:56:05.385 +08:00] [INFO] [log_test.go:167] ["info message"] ["str key"=val] ["int key"=123] + zapLogWithKeyValPattern = `\[\d\d\d\d/\d\d/\d\d \d\d:\d\d:\d\d.\d\d\d\ (\+|-)\d\d:\d\d\] \[(FATAL|ERROR|WARN|INFO|DEBUG)\] \[([\w_%!$@.,+~-]+|\\.)+:\d+\] \[.*\] \[ctxKey=.*\] (\[.*=.*\]).*\n` +) + +var PrettyPrint = prettyPrint + +func Test(t *testing.T) { + TestingT(t) +} + +var _ = Suite(&testLogSuite{}) + +type testLogSuite struct { + buf *bytes.Buffer +} + +func (s *testLogSuite) SetUpSuite(c *C) { + s.buf = &bytes.Buffer{} +} + +func (s *testLogSuite) SetUpTest(c *C) { + s.buf = &bytes.Buffer{} +} + +func (s *testLogSuite) TestStringToLogLevel(c *C) { + c.Assert(stringToLogLevel("fatal"), Equals, log.FatalLevel) + c.Assert(stringToLogLevel("ERROR"), Equals, log.ErrorLevel) + c.Assert(stringToLogLevel("warn"), Equals, log.WarnLevel) + c.Assert(stringToLogLevel("warning"), Equals, log.WarnLevel) + c.Assert(stringToLogLevel("debug"), Equals, log.DebugLevel) + c.Assert(stringToLogLevel("info"), Equals, log.InfoLevel) + c.Assert(stringToLogLevel("whatever"), Equals, log.InfoLevel) +} + +// TestLogging assure log format and log redirection works. +func (s *testLogSuite) TestLogging(c *C) { + conf := NewLogConfig("warn", DefaultLogFormat, NewFileLogConfig(0), false) + conf.File.Filename = "log_file" + c.Assert(InitLogger(conf), IsNil) + + log.SetOutput(s.buf) + + log.Infof("[this message should not be sent to buf]") + c.Assert(s.buf.Len(), Equals, 0) + + log.Warningf("[this message should be sent to buf]") + entry, err := s.buf.ReadString('\n') + c.Assert(err, IsNil) + c.Assert(entry, Matches, logPattern) + + log.Warnf("this message comes from logrus") + entry, err = s.buf.ReadString('\n') + c.Assert(err, IsNil) + c.Assert(entry, Matches, logPattern) + fmt.Println(entry, logPattern) + c.Assert(strings.Contains(entry, "log_test.go"), IsTrue) +} + +func (s *testLogSuite) TestLoggerKeepOrder(c *C) { + conf := NewLogConfig("warn", DefaultLogFormat, EmptyFileLogConfig, true) + c.Assert(InitLogger(conf), IsNil) + logger := log.StandardLogger() + ft, ok := logger.Formatter.(*textFormatter) + c.Assert(ok, IsTrue) + ft.EnableEntryOrder = true + logger.Out = s.buf + logEntry := log.NewEntry(logger) + logEntry.Data = log.Fields{ + "connectionId": 1, + "costTime": "1", + "database": "test", + "sql": "select 1", + "txnStartTS": 1, + } + + _, _, line, _ := runtime.Caller(0) + logEntry.WithField("type", "slow-query").WithField("succ", true).Warnf("slow-query") + expectMsg := fmt.Sprintf("log_test.go:%v: [warning] slow-query connectionId=1 costTime=1 database=test sql=select 1 succ=true txnStartTS=1 type=slow-query\n", line+1) + c.Assert(s.buf.String(), Equals, expectMsg) + + s.buf.Reset() + logEntry.Data = log.Fields{ + "a": "a", + "d": "d", + "e": "e", + "b": "b", + "f": "f", + "c": "c", + } + + _, _, line, _ = runtime.Caller(0) + logEntry.Warnf("slow-query") + expectMsg = fmt.Sprintf("log_test.go:%v: [warning] slow-query a=a b=b c=c d=d e=e f=f\n", line+1) + c.Assert(s.buf.String(), Equals, expectMsg) +} + +func (s *testLogSuite) TestZapLoggerWithKeys(c *C) { + fileCfg := FileLogConfig{zaplog.FileLogConfig{Filename: "zap_log", MaxSize: 4096}} + conf := NewLogConfig("info", DefaultLogFormat, fileCfg, false) + err := InitZapLogger(conf) + c.Assert(err, IsNil) + connID := uint32(123) + ctx := WithConnID(context.Background(), connID) + s.testZapLogger(ctx, c, fileCfg.Filename, zapLogWithConnIDPattern) + os.Remove(fileCfg.Filename) + + err = InitZapLogger(conf) + c.Assert(err, IsNil) + key := "ctxKey" + val := "ctxValue" + ctx1 := WithKeyValue(context.Background(), key, val) + s.testZapLogger(ctx1, c, fileCfg.Filename, zapLogWithKeyValPattern) + os.Remove(fileCfg.Filename) +} + +func (s *testLogSuite) testZapLogger(ctx context.Context, c *C, fileName, pattern string) { + Logger(ctx).Debug("debug msg", zap.String("test with key", "true")) + Logger(ctx).Info("info msg", zap.String("test with key", "true")) + Logger(ctx).Warn("warn msg", zap.String("test with key", "true")) + Logger(ctx).Error("error msg", zap.String("test with key", "true")) + + f, err := os.Open(fileName) + c.Assert(err, IsNil) + defer f.Close() + + r := bufio.NewReader(f) + for { + var str string + str, err = r.ReadString('\n') + if err != nil { + break + } + c.Assert(str, Matches, pattern) + c.Assert(strings.Contains(str, "stack"), IsFalse) + c.Assert(strings.Contains(str, "errorVerbose"), IsFalse) + } + c.Assert(err, Equals, io.EOF) +} + +func (s *testLogSuite) TestSetLevel(c *C) { + conf := NewLogConfig("info", DefaultLogFormat, EmptyFileLogConfig, false) + err := InitZapLogger(conf) + c.Assert(err, IsNil) + + c.Assert(zaplog.GetLevel(), Equals, zap.InfoLevel) + err = SetLevel("warn") + c.Assert(err, IsNil) + c.Assert(zaplog.GetLevel(), Equals, zap.WarnLevel) + err = SetLevel("Error") + c.Assert(err, IsNil) + c.Assert(zaplog.GetLevel(), Equals, zap.ErrorLevel) + err = SetLevel("DEBUG") + c.Assert(err, IsNil) + c.Assert(zaplog.GetLevel(), Equals, zap.DebugLevel) +} diff --git a/util/math/math.go b/util/math/math.go new file mode 100644 index 0000000..3a25178 --- /dev/null +++ b/util/math/math.go @@ -0,0 +1,50 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package math + +import "math" + +// Abs implement the abs function according to http://cavaliercoder.com/blog/optimized-abs-for-int64-in-go.html +func Abs(n int64) int64 { + y := n >> 63 + return (n ^ y) - y +} + +// uintSizeTable is used as a table to do comparison to get uint length is faster than doing loop on division with 10 +var uintSizeTable = [21]uint64{ + 0, // redundant 0 here, so to make function StrLenOfUint64Fast to count from 1 and return i directly + 9, 99, 999, 9999, 99999, + 999999, 9999999, 99999999, 999999999, 9999999999, + 99999999999, 999999999999, 9999999999999, 99999999999999, 999999999999999, + 9999999999999999, 99999999999999999, 999999999999999999, 9999999999999999999, + math.MaxUint64, +} // math.MaxUint64 is 18446744073709551615 and it has 20 digits + +// StrLenOfUint64Fast efficiently calculate the string character lengths of an uint64 as input +func StrLenOfUint64Fast(x uint64) int { + for i := 1; ; i++ { + if x <= uintSizeTable[i] { + return i + } + } +} + +// StrLenOfInt64Fast efficiently calculate the string character lengths of an int64 as input +func StrLenOfInt64Fast(x int64) int { + size := 0 + if x < 0 { + size = 1 // add "-" sign on the length count + } + return size + StrLenOfUint64Fast(uint64(Abs(x))) +} diff --git a/util/math/math_test.go b/util/math/math_test.go new file mode 100644 index 0000000..1b34d28 --- /dev/null +++ b/util/math/math_test.go @@ -0,0 +1,56 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package math + +import ( + "math/rand" + "strconv" + "testing" + + . "github.com/pingcap/check" +) + +func TestT(t *testing.T) { + TestingT(t) +} + +var _ = Suite(&testMath{}) + +type testMath struct{} + +func (s *testMath) TestStrLenOfUint64Fast_RandomTestCases(c *C) { + for i := 0; i < 1000000; i++ { + num := rand.Uint64() + expected := len(strconv.FormatUint(num, 10)) + actual := StrLenOfUint64Fast(num) + c.Assert(actual, Equals, expected) + } +} + +func (s *testMath) TestStrLenOfUint64Fast_ManualTestCases(c *C) { + nums := [22]uint64{0, + 1, 12, 123, 1234, 12345, + 123456, 1234567, 12345678, 123456789, 1234567890, + 1234567891, 12345678912, 123456789123, 1234567891234, 12345678912345, + 123456789123456, 1234567891234567, 12345678912345678, 123456789123456789, + 123456789123457890, + ^uint64(0), + } + + for _, num := range nums { + expected := len(strconv.FormatUint(num, 10)) + actual := StrLenOfUint64Fast(num) + c.Assert(actual, Equals, expected) + } +} diff --git a/util/misc.go b/util/misc.go new file mode 100644 index 0000000..2c6cc39 --- /dev/null +++ b/util/misc.go @@ -0,0 +1,149 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "runtime" + "strings" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/parser" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +const ( + // DefaultMaxRetries indicates the max retry count. + DefaultMaxRetries = 30 + // RetryInterval indicates retry interval. + RetryInterval uint64 = 500 + // GCTimeFormat is the format that gc_worker used to store times. + GCTimeFormat = "20060102-15:04:05 -0700" +) + +// RunWithRetry will run the f with backoff and retry. +// retryCnt: Max retry count +// backoff: When run f failed, it will sleep backoff * triedCount time.Millisecond. +// Function f should have two return value. The first one is an bool which indicate if the err if retryable. +// The second is if the f meet any error. +func RunWithRetry(retryCnt int, backoff uint64, f func() (bool, error)) (err error) { + for i := 1; i <= retryCnt; i++ { + var retryAble bool + retryAble, err = f() + if err == nil || !retryAble { + return errors.Trace(err) + } + sleepTime := time.Duration(backoff*uint64(i)) * time.Millisecond + time.Sleep(sleepTime) + } + return errors.Trace(err) +} + +// GetStack gets the stacktrace. +func GetStack() []byte { + const size = 4096 + buf := make([]byte, size) + stackSize := runtime.Stack(buf, false) + buf = buf[:stackSize] + return buf +} + +// WithRecovery wraps goroutine startup call with force recovery. +// it will dump current goroutine stack into log if catch any recover result. +// exec: execute logic function. +// recoverFn: handler will be called after recover and before dump stack, passing `nil` means noop. +func WithRecovery(exec func(), recoverFn func(r interface{})) { + defer func() { + r := recover() + if recoverFn != nil { + recoverFn(r) + } + if r != nil { + logutil.BgLogger().Error("panic in the recoverable goroutine", + zap.Reflect("r", r), + zap.Stack("stack trace")) + } + }() + exec() +} + +// CompatibleParseGCTime parses a string with `GCTimeFormat` and returns a time.Time. If `value` can't be parsed as that +// format, truncate to last space and try again. This function is only useful when loading times that saved by +// gc_worker. We have changed the format that gc_worker saves time (removed the last field), but when loading times it +// should be compatible with the old format. +func CompatibleParseGCTime(value string) (time.Time, error) { + t, err := time.Parse(GCTimeFormat, value) + + if err != nil { + // Remove the last field that separated by space + parts := strings.Split(value, " ") + prefix := strings.Join(parts[:len(parts)-1], " ") + t, err = time.Parse(GCTimeFormat, prefix) + } + + if err != nil { + err = errors.Errorf("string \"%v\" doesn't has a prefix that matches format \"%v\"", value, GCTimeFormat) + } + return t, err +} + +const ( + // syntaxErrorPrefix is the common prefix for SQL syntax error in TiDB. + syntaxErrorPrefix = "You have an error in your SQL syntax; check the manual that corresponds to your TiDB version for the right syntax to use" +) + +// SyntaxError converts parser error to TiDB's syntax error. +func SyntaxError(err error) error { + if err == nil { + return nil + } + logutil.BgLogger().Error("syntax error", zap.Error(err)) + + // If the error is already a terror with stack, pass it through. + if errors.HasStack(err) { + cause := errors.Cause(err) + if _, ok := cause.(*terror.Error); ok { + return err + } + } + + return parser.ErrParse.GenWithStackByArgs(syntaxErrorPrefix, err.Error()) +} + +// SyntaxWarn converts parser warn to TiDB's syntax warn. +func SyntaxWarn(err error) error { + if err == nil { + return nil + } + return parser.ErrParse.GenWithStackByArgs(syntaxErrorPrefix, err.Error()) +} + +const ( + // InformationSchemaName is the `INFORMATION_SCHEMA` database name. + InformationSchemaName = "INFORMATION_SCHEMA" + // InformationSchemaLowerName is the `INFORMATION_SCHEMA` database lower name. + InformationSchemaLowerName = "information_schema" +) + +// IsMemOrSysDB uses to check whether dbLowerName is memory database or system database. +func IsMemOrSysDB(dbLowerName string) bool { + switch dbLowerName { + case InformationSchemaLowerName, mysql.SystemDB: + return true + } + return false +} diff --git a/util/misc_test.go b/util/misc_test.go new file mode 100644 index 0000000..58c349d --- /dev/null +++ b/util/misc_test.go @@ -0,0 +1,148 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "bytes" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/parser" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/util/testleak" +) + +var _ = Suite(&testMiscSuite{}) + +type testMiscSuite struct { +} + +func (s *testMiscSuite) SetUpSuite(c *C) { +} + +func (s *testMiscSuite) TearDownSuite(c *C) { +} + +func (s *testMiscSuite) TestRunWithRetry(c *C) { + defer testleak.AfterTest(c)() + // Run succ. + cnt := 0 + err := RunWithRetry(3, 1, func() (bool, error) { + cnt++ + if cnt < 2 { + return true, errors.New("err") + } + return true, nil + }) + c.Assert(err, IsNil) + c.Assert(cnt, Equals, 2) + + // Run failed. + cnt = 0 + err = RunWithRetry(3, 1, func() (bool, error) { + cnt++ + if cnt < 4 { + return true, errors.New("err") + } + return true, nil + }) + c.Assert(err, NotNil) + c.Assert(cnt, Equals, 3) + + // Run failed. + cnt = 0 + err = RunWithRetry(3, 1, func() (bool, error) { + cnt++ + if cnt < 2 { + return false, errors.New("err") + } + return true, nil + }) + c.Assert(err, NotNil) + c.Assert(cnt, Equals, 1) +} + +func (s *testMiscSuite) TestCompatibleParseGCTime(c *C) { + values := []string{ + "20181218-19:53:37 +0800 CST", + "20181218-19:53:37 +0800 MST", + "20181218-19:53:37 +0800 FOO", + "20181218-19:53:37 +0800 +08", + "20181218-19:53:37 +0800", + "20181218-19:53:37 +0800 ", + "20181218-11:53:37 +0000", + } + + invalidValues := []string{ + "", + " ", + "foo", + "20181218-11:53:37", + "20181218-19:53:37 +0800CST", + "20181218-19:53:37 +0800 FOO BAR", + "20181218-19:53:37 +0800FOOOOOOO BAR", + "20181218-19:53:37 ", + } + + expectedTime := time.Date(2018, 12, 18, 11, 53, 37, 0, time.UTC) + expectedTimeFormatted := "20181218-19:53:37 +0800" + + beijing, err := time.LoadLocation("Asia/Shanghai") + c.Assert(err, IsNil) + + for _, value := range values { + t, err := CompatibleParseGCTime(value) + c.Assert(err, IsNil) + c.Assert(t.Equal(expectedTime), Equals, true) + + formatted := t.In(beijing).Format(GCTimeFormat) + c.Assert(formatted, Equals, expectedTimeFormatted) + } + + for _, value := range invalidValues { + _, err := CompatibleParseGCTime(value) + c.Assert(err, NotNil) + } +} + +func (s *testMiscSuite) TestBasicFunc(c *C) { + // Test for GetStack. + b := GetStack() + c.Assert(len(b) < 4096, IsTrue) + + // Test for WithRecovery. + var recover interface{} + WithRecovery(func() { + panic("test") + }, func(r interface{}) { + recover = r + }) + c.Assert(recover, Equals, "test") + + // Test for SyntaxError. + c.Assert(SyntaxError(nil), IsNil) + c.Assert(terror.ErrorEqual(SyntaxError(errors.New("test")), parser.ErrParse), IsTrue) + c.Assert(terror.ErrorEqual(SyntaxError(parser.ErrSyntax.GenWithStackByArgs()), parser.ErrSyntax), IsTrue) + + // Test for SyntaxWarn. + c.Assert(SyntaxWarn(nil), IsNil) + c.Assert(terror.ErrorEqual(SyntaxWarn(errors.New("test")), parser.ErrParse), IsTrue) + + // Test for RandomBuf. + buf := RandomBuf(5) + c.Assert(len(buf), Equals, 5) + c.Assert(bytes.Contains(buf, []byte("$")), IsFalse) + c.Assert(bytes.Contains(buf, []byte{0}), IsFalse) +} diff --git a/util/mock/client.go b/util/mock/client.go new file mode 100644 index 0000000..46496df --- /dev/null +++ b/util/mock/client.go @@ -0,0 +1,32 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mock + +import ( + "context" + + "github.com/pingcap/tidb/kv" +) + +// Client implement kv.Client interface, mocked from "CopClient" defined in +// "store/tikv/copprocessor.go". +type Client struct { + kv.RequestTypeSupportedChecker + MockResponse kv.Response +} + +// Send implement kv.Client interface. +func (c *Client) Send(ctx context.Context, req *kv.Request, kv *kv.Variables) kv.Response { + return c.MockResponse +} diff --git a/util/mock/context.go b/util/mock/context.go new file mode 100644 index 0000000..90759f2 --- /dev/null +++ b/util/mock/context.go @@ -0,0 +1,248 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package mock is just for test only. +package mock + +import ( + "context" + "fmt" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/owner" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/util/sqlexec" +) + +var _ sessionctx.Context = (*Context)(nil) +var _ sqlexec.SQLExecutor = (*Context)(nil) + +// Context represents mocked sessionctx.Context. +type Context struct { + values map[fmt.Stringer]interface{} + txn wrapTxn // mock global variable + Store kv.Storage // mock global variable + sessionVars *variable.SessionVars + ctx context.Context + cancel context.CancelFunc +} + +type wrapTxn struct { + kv.Transaction +} + +func (txn *wrapTxn) Valid() bool { + return txn.Transaction != nil && txn.Transaction.Valid() +} + +// Execute implements sqlexec.SQLExecutor Execute interface. +func (c *Context) Execute(ctx context.Context, sql string) ([]sqlexec.RecordSet, error) { + return nil, errors.Errorf("Not Support.") +} + +type mockDDLOwnerChecker struct{} + +func (c *mockDDLOwnerChecker) IsOwner() bool { return true } + +// DDLOwnerChecker returns owner.DDLOwnerChecker. +func (c *Context) DDLOwnerChecker() owner.DDLOwnerChecker { + return &mockDDLOwnerChecker{} +} + +// SetValue implements sessionctx.Context SetValue interface. +func (c *Context) SetValue(key fmt.Stringer, value interface{}) { + c.values[key] = value +} + +// Value implements sessionctx.Context Value interface. +func (c *Context) Value(key fmt.Stringer) interface{} { + value := c.values[key] + return value +} + +// ClearValue implements sessionctx.Context ClearValue interface. +func (c *Context) ClearValue(key fmt.Stringer) { + delete(c.values, key) +} + +// GetSessionVars implements the sessionctx.Context GetSessionVars interface. +func (c *Context) GetSessionVars() *variable.SessionVars { + return c.sessionVars +} + +// Txn implements sessionctx.Context Txn interface. +func (c *Context) Txn(bool) (kv.Transaction, error) { + return &c.txn, nil +} + +// GetClient implements sessionctx.Context GetClient interface. +func (c *Context) GetClient() kv.Client { + if c.Store == nil { + return nil + } + return c.Store.GetClient() +} + +// GetGlobalSysVar implements GlobalVarAccessor GetGlobalSysVar interface. +func (c *Context) GetGlobalSysVar(ctx sessionctx.Context, name string) (string, error) { + v := variable.GetSysVar(name) + if v == nil { + return "", variable.ErrUnknownSystemVar.GenWithStackByArgs(name) + } + return v.Value, nil +} + +// SetGlobalSysVar implements GlobalVarAccessor SetGlobalSysVar interface. +func (c *Context) SetGlobalSysVar(ctx sessionctx.Context, name string, value string) error { + v := variable.GetSysVar(name) + if v == nil { + return variable.ErrUnknownSystemVar.GenWithStackByArgs(name) + } + v.Value = value + return nil +} + +// NewTxn implements the sessionctx.Context interface. +func (c *Context) NewTxn(context.Context) error { + if c.Store == nil { + return errors.New("store is not set") + } + if c.txn.Valid() { + err := c.txn.Commit(c.ctx) + if err != nil { + return errors.Trace(err) + } + } + + txn, err := c.Store.Begin() + if err != nil { + return errors.Trace(err) + } + c.txn.Transaction = txn + return nil +} + +// RefreshTxnCtx implements the sessionctx.Context interface. +func (c *Context) RefreshTxnCtx(ctx context.Context) error { + return errors.Trace(c.NewTxn(ctx)) +} + +// InitTxnWithStartTS implements the sessionctx.Context interface with startTS. +func (c *Context) InitTxnWithStartTS(startTS uint64) error { + if c.txn.Valid() { + return nil + } + if c.Store != nil { + txn, err := c.Store.BeginWithStartTS(startTS) + if err != nil { + return errors.Trace(err) + } + txn.SetCap(kv.DefaultTxnMembufCap) + c.txn.Transaction = txn + } + return nil +} + +// GetStore gets the store of session. +func (c *Context) GetStore() kv.Storage { + return c.Store +} + +// Cancel implements the Session interface. +func (c *Context) Cancel() { + c.cancel() +} + +// GoCtx returns standard sessionctx.Context that bind with current transaction. +func (c *Context) GoCtx() context.Context { + return c.ctx +} + +// StmtCommit implements the sessionctx.Context interface. +func (c *Context) StmtCommit() error { + return nil +} + +// StmtRollback implements the sessionctx.Context interface. +func (c *Context) StmtRollback() { +} + +// StmtAddDirtyTableOP implements the sessionctx.Context interface. +func (c *Context) StmtAddDirtyTableOP(op int, tid int64, handle int64) { +} + +// AddTableLock implements the sessionctx.Context interface. +func (c *Context) AddTableLock(_ []model.TableLockTpInfo) { +} + +// ReleaseTableLocks implements the sessionctx.Context interface. +func (c *Context) ReleaseTableLocks(locks []model.TableLockTpInfo) { +} + +// ReleaseTableLockByTableIDs implements the sessionctx.Context interface. +func (c *Context) ReleaseTableLockByTableIDs(tableIDs []int64) { +} + +// CheckTableLocked implements the sessionctx.Context interface. +func (c *Context) CheckTableLocked(_ int64) (bool, model.TableLockType) { + return false, model.TableLockNone +} + +// GetAllTableLocks implements the sessionctx.Context interface. +func (c *Context) GetAllTableLocks() []model.TableLockTpInfo { + return nil +} + +// ReleaseAllTableLocks implements the sessionctx.Context interface. +func (c *Context) ReleaseAllTableLocks() { +} + +// HasLockedTables implements the sessionctx.Context interface. +func (c *Context) HasLockedTables() bool { + return false +} + +// PrepareTxnFuture implements the sessionctx.Context interface. +func (c *Context) PrepareTxnFuture(ctx context.Context) { +} + +// Close implements the sessionctx.Context interface. +func (c *Context) Close() { +} + +// NewContext creates a new mocked sessionctx.Context. +func NewContext() *Context { + ctx, cancel := context.WithCancel(context.Background()) + sctx := &Context{ + values: make(map[fmt.Stringer]interface{}), + sessionVars: variable.NewSessionVars(), + ctx: ctx, + cancel: cancel, + } + sctx.sessionVars.InitChunkSize = 2 + sctx.sessionVars.MaxChunkSize = 32 + sctx.sessionVars.StmtCtx.TimeZone = time.UTC + sctx.sessionVars.GlobalVarsAccessor = variable.NewMockGlobalAccessor() + if err := sctx.GetSessionVars().SetSystemVar(variable.MaxAllowedPacket, "67108864"); err != nil { + panic(err) + } + return sctx +} + +// HookKeyForTest is as alias, used by context.WithValue. +// golint forbits using string type as key in context.WithValue. +type HookKeyForTest string diff --git a/util/mock/mock_test.go b/util/mock/mock_test.go new file mode 100644 index 0000000..701c656 --- /dev/null +++ b/util/mock/mock_test.go @@ -0,0 +1,59 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mock + +import ( + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/util/testleak" +) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + TestingT(t) +} + +var _ = Suite(&testMockSuite{}) + +type testMockSuite struct { +} + +type contextKeyType int + +func (k contextKeyType) String() string { + return "mock_key" +} + +const contextKey contextKeyType = 0 + +func (s *testMockSuite) TestContext(c *C) { + defer testleak.AfterTest(c)() + ctx := NewContext() + + ctx.SetValue(contextKey, 1) + v := ctx.Value(contextKey) + c.Assert(v, Equals, 1) + + ctx.ClearValue(contextKey) + v = ctx.Value(contextKey) + c.Assert(v, IsNil) +} + +func BenchmarkNewContext(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + NewContext() + } +} diff --git a/util/mock/store.go b/util/mock/store.go new file mode 100644 index 0000000..0322017 --- /dev/null +++ b/util/mock/store.go @@ -0,0 +1,64 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mock + +import ( + "context" + + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/store/tikv/oracle" +) + +// Store implements kv.Storage interface. +type Store struct { + Client kv.Client +} + +// GetClient implements kv.Storage interface. +func (s *Store) GetClient() kv.Client { return s.Client } + +// GetOracle implements kv.Storage interface. +func (s *Store) GetOracle() oracle.Oracle { return nil } + +// Begin implements kv.Storage interface. +func (s *Store) Begin() (kv.Transaction, error) { return nil, nil } + +// BeginWithStartTS implements kv.Storage interface. +func (s *Store) BeginWithStartTS(startTS uint64) (kv.Transaction, error) { return s.Begin() } + +// GetSnapshot implements kv.Storage interface. +func (s *Store) GetSnapshot(ver kv.Version) (kv.Snapshot, error) { return nil, nil } + +// Close implements kv.Storage interface. +func (s *Store) Close() error { return nil } + +// UUID implements kv.Storage interface. +func (s *Store) UUID() string { return "mock" } + +// CurrentVersion implements kv.Storage interface. +func (s *Store) CurrentVersion() (kv.Version, error) { return kv.Version{}, nil } + +// SupportDeleteRange implements kv.Storage interface. +func (s *Store) SupportDeleteRange() bool { return false } + +// Name implements kv.Storage interface. +func (s *Store) Name() string { return "UtilMockStorage" } + +// Describe implements kv.Storage interface. +func (s *Store) Describe() string { + return "UtilMockStorage is a mock Store implementation, only for unittests in util package" +} + +// ShowStatus implements kv.Storage interface. +func (s *Store) ShowStatus(ctx context.Context, key string) (interface{}, error) { return nil, nil } diff --git a/util/mvmap/fnv.go b/util/mvmap/fnv.go new file mode 100644 index 0000000..d9e27aa --- /dev/null +++ b/util/mvmap/fnv.go @@ -0,0 +1,33 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvmap + +const ( + offset64 uint64 = 14695981039346656037 + prime64 uint64 = 1099511628211 +) + +// fnvHash64 is ported from go library, which is thread-safe. +func fnvHash64(data []byte) uint64 { + hash := offset64 + for _, c := range data { + hash *= prime64 + hash ^= uint64(c) + } + return hash +} diff --git a/util/mvmap/mvmap.go b/util/mvmap/mvmap.go new file mode 100644 index 0000000..c26e95a --- /dev/null +++ b/util/mvmap/mvmap.go @@ -0,0 +1,211 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvmap + +import ( + "bytes" +) + +type entry struct { + addr dataAddr + keyLen uint32 + valLen uint32 + next entryAddr +} + +type entryStore struct { + slices [][]entry + sliceIdx uint32 + sliceLen uint32 +} + +type dataStore struct { + slices [][]byte + sliceIdx uint32 + sliceLen uint32 +} + +type entryAddr struct { + sliceIdx uint32 + offset uint32 +} + +type dataAddr struct { + sliceIdx uint32 + offset uint32 +} + +const ( + maxDataSliceLen = 64 * 1024 + maxEntrySliceLen = 8 * 1024 +) + +func (ds *dataStore) put(key, value []byte) dataAddr { + dataLen := uint32(len(key) + len(value)) + if ds.sliceLen != 0 && ds.sliceLen+dataLen > maxDataSliceLen { + ds.slices = append(ds.slices, make([]byte, 0, max(maxDataSliceLen, int(dataLen)))) + ds.sliceLen = 0 + ds.sliceIdx++ + } + addr := dataAddr{sliceIdx: ds.sliceIdx, offset: ds.sliceLen} + slice := ds.slices[ds.sliceIdx] + slice = append(slice, key...) + slice = append(slice, value...) + ds.slices[ds.sliceIdx] = slice + ds.sliceLen += dataLen + return addr +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func (ds *dataStore) get(e entry, key []byte) []byte { + slice := ds.slices[e.addr.sliceIdx] + valOffset := e.addr.offset + e.keyLen + if !bytes.Equal(key, slice[e.addr.offset:valOffset]) { + return nil + } + return slice[valOffset : valOffset+e.valLen] +} + +func (ds *dataStore) getEntryData(e entry) (key, value []byte) { + slice := ds.slices[e.addr.sliceIdx] + keyOffset := e.addr.offset + key = slice[keyOffset : keyOffset+e.keyLen] + valOffset := e.addr.offset + e.keyLen + value = slice[valOffset : valOffset+e.valLen] + return +} + +var nullEntryAddr = entryAddr{} + +func (es *entryStore) put(e entry) entryAddr { + if es.sliceLen == maxEntrySliceLen { + es.slices = append(es.slices, make([]entry, 0, maxEntrySliceLen)) + es.sliceLen = 0 + es.sliceIdx++ + } + addr := entryAddr{sliceIdx: es.sliceIdx, offset: es.sliceLen} + slice := es.slices[es.sliceIdx] + slice = append(slice, e) + es.slices[es.sliceIdx] = slice + es.sliceLen++ + return addr +} + +func (es *entryStore) get(addr entryAddr) entry { + return es.slices[addr.sliceIdx][addr.offset] +} + +// MVMap stores multiple value for a given key with minimum GC overhead. +// A given key can store multiple values. +// It is not thread-safe, should only be used in one goroutine. +type MVMap struct { + entryStore entryStore + dataStore dataStore + hashTable map[uint64]entryAddr + length int +} + +// NewMVMap creates a new multi-value map. +func NewMVMap() *MVMap { + m := new(MVMap) + m.hashTable = make(map[uint64]entryAddr) + m.entryStore.slices = [][]entry{make([]entry, 0, 64)} + // Append the first empty entry, so the zero entryAddr can represent null. + m.entryStore.put(entry{}) + m.dataStore.slices = [][]byte{make([]byte, 0, 1024)} + return m +} + +// Put puts the key/value pairs to the MVMap, if the key already exists, old value will not be overwritten, +// values are stored in a list. +func (m *MVMap) Put(key, value []byte) { + hashKey := fnvHash64(key) + oldEntryAddr := m.hashTable[hashKey] + dataAddr := m.dataStore.put(key, value) + e := entry{ + addr: dataAddr, + keyLen: uint32(len(key)), + valLen: uint32(len(value)), + next: oldEntryAddr, + } + newEntryAddr := m.entryStore.put(e) + m.hashTable[hashKey] = newEntryAddr + m.length++ +} + +// Get gets the values of the "key" and appends them to "values". +func (m *MVMap) Get(key []byte, values [][]byte) [][]byte { + hashKey := fnvHash64(key) + entryAddr := m.hashTable[hashKey] + for entryAddr != nullEntryAddr { + e := m.entryStore.get(entryAddr) + entryAddr = e.next + val := m.dataStore.get(e, key) + if val == nil { + continue + } + values = append(values, val) + } + // Keep the order of input. + for i := 0; i < len(values)/2; i++ { + j := len(values) - 1 - i + values[i], values[j] = values[j], values[i] + } + return values +} + +// Len returns the number of values in th mv map, the number of keys may be less than Len +// if the same key is put more than once. +func (m *MVMap) Len() int { + return m.length +} + +// Iterator is used to iterate the MVMap. +type Iterator struct { + m *MVMap + sliceCur int + entryCur int +} + +// Next returns the next key/value pair of the MVMap. +// It returns (nil, nil) when there is no more entries to iterate. +func (i *Iterator) Next() (key, value []byte) { + for { + if i.sliceCur >= len(i.m.entryStore.slices) { + return nil, nil + } + entrySlice := i.m.entryStore.slices[i.sliceCur] + if i.entryCur >= len(entrySlice) { + i.sliceCur++ + i.entryCur = 0 + continue + } + entry := entrySlice[i.entryCur] + key, value = i.m.dataStore.getEntryData(entry) + i.entryCur++ + return + } +} + +// NewIterator creates a iterator for the MVMap. +func (m *MVMap) NewIterator() *Iterator { + // The first entry is empty, so init entryCur to 1. + return &Iterator{m: m, entryCur: 1} +} diff --git a/util/mvmap/mvmap_test.go b/util/mvmap/mvmap_test.go new file mode 100644 index 0000000..3c273f9 --- /dev/null +++ b/util/mvmap/mvmap_test.go @@ -0,0 +1,101 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvmap + +import ( + "bytes" + "encoding/binary" + "fmt" + "hash/fnv" + "testing" + + . "github.com/pingcap/check" +) + +func TestT(t *testing.T) { + TestingT(t) +} + +func TestMVMap(t *testing.T) { + m := NewMVMap() + var vals [][]byte + m.Put([]byte("abc"), []byte("abc1")) + m.Put([]byte("abc"), []byte("abc2")) + m.Put([]byte("def"), []byte("def1")) + m.Put([]byte("def"), []byte("def2")) + vals = m.Get([]byte("abc"), vals[:0]) + if fmt.Sprintf("%s", vals) != "[abc1 abc2]" { + t.FailNow() + } + vals = m.Get([]byte("def"), vals[:0]) + if fmt.Sprintf("%s", vals) != "[def1 def2]" { + t.FailNow() + } + + if m.Len() != 4 { + t.FailNow() + } + + results := []string{"abc abc1", "abc abc2", "def def1", "def def2"} + it := m.NewIterator() + for i := 0; i < 4; i++ { + key, val := it.Next() + if fmt.Sprintf("%s %s", key, val) != results[i] { + t.FailNow() + } + } + key, val := it.Next() + if key != nil || val != nil { + t.FailNow() + } +} + +func BenchmarkMVMapPut(b *testing.B) { + m := NewMVMap() + buffer := make([]byte, 8) + for i := 0; i < b.N; i++ { + binary.BigEndian.PutUint64(buffer, uint64(i)) + m.Put(buffer, buffer) + } +} + +func BenchmarkMVMapGet(b *testing.B) { + m := NewMVMap() + buffer := make([]byte, 8) + for i := 0; i < b.N; i++ { + binary.BigEndian.PutUint64(buffer, uint64(i)) + m.Put(buffer, buffer) + } + val := make([][]byte, 0, 8) + b.ResetTimer() + for i := 0; i < b.N; i++ { + binary.BigEndian.PutUint64(buffer, uint64(i)) + val = m.Get(buffer, val[:0]) + if len(val) != 1 || !bytes.Equal(val[0], buffer) { + b.FailNow() + } + } +} + +func TestFNVHash(t *testing.T) { + b := []byte{0xcb, 0xf2, 0x9c, 0xe4, 0x84, 0x22, 0x23, 0x25} + sum1 := fnvHash64(b) + hash := fnv.New64() + hash.Reset() + hash.Write(b) + sum2 := hash.Sum64() + if sum1 != sum2 { + t.FailNow() + } +} diff --git a/util/prefix_helper.go b/util/prefix_helper.go new file mode 100644 index 0000000..cb01e3b --- /dev/null +++ b/util/prefix_helper.go @@ -0,0 +1,97 @@ +// Copyright 2014 The ql Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSES/QL-LICENSE file. + +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "bytes" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" +) + +// ScanMetaWithPrefix scans metadata with the prefix. +func ScanMetaWithPrefix(retriever kv.Retriever, prefix kv.Key, filter func(kv.Key, []byte) bool) error { + iter, err := retriever.Iter(prefix, prefix.PrefixNext()) + if err != nil { + return errors.Trace(err) + } + defer iter.Close() + + for { + if err != nil { + return errors.Trace(err) + } + + if iter.Valid() && iter.Key().HasPrefix(prefix) { + if !filter(iter.Key(), iter.Value()) { + break + } + err = iter.Next() + if err != nil { + return errors.Trace(err) + } + } else { + break + } + } + + return nil +} + +// DelKeyWithPrefix deletes keys with prefix. +func DelKeyWithPrefix(rm kv.RetrieverMutator, prefix kv.Key) error { + var keys []kv.Key + iter, err := rm.Iter(prefix, prefix.PrefixNext()) + if err != nil { + return errors.Trace(err) + } + + defer iter.Close() + for { + if err != nil { + return errors.Trace(err) + } + + if iter.Valid() && iter.Key().HasPrefix(prefix) { + keys = append(keys, iter.Key().Clone()) + err = iter.Next() + if err != nil { + return errors.Trace(err) + } + } else { + break + } + } + + for _, key := range keys { + err := rm.Delete(key) + if err != nil { + return errors.Trace(err) + } + } + + return nil +} + +// RowKeyPrefixFilter returns a function which checks whether currentKey has decoded rowKeyPrefix as prefix. +func RowKeyPrefixFilter(rowKeyPrefix kv.Key) kv.FnKeyCmp { + return func(currentKey kv.Key) bool { + // Next until key without prefix of this record. + return !bytes.HasPrefix(currentKey, rowKeyPrefix) + } +} diff --git a/util/prefix_helper_test.go b/util/prefix_helper_test.go new file mode 100644 index 0000000..b13497a --- /dev/null +++ b/util/prefix_helper_test.go @@ -0,0 +1,159 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package util_test + +import ( + "context" + "fmt" + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/util" + "github.com/pingcap/tidb/util/testleak" +) + +const ( + startIndex = 0 + testCount = 12 + testPow = 10 +) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + TestingT(t) +} + +var _ = Suite(&testPrefixSuite{}) + +type testPrefixSuite struct { + s kv.Storage +} + +func (s *testPrefixSuite) SetUpSuite(c *C) { + testleak.BeforeTest() + store, err := mockstore.NewMockTikvStore() + c.Assert(err, IsNil) + s.s = store + +} + +func (s *testPrefixSuite) TearDownSuite(c *C) { + err := s.s.Close() + c.Assert(err, IsNil) + testleak.AfterTest(c)() +} + +func encodeInt(n int) []byte { + return []byte(fmt.Sprintf("%d", n)) +} + +type MockContext struct { + prefix int + values map[fmt.Stringer]interface{} + kv.Storage + txn kv.Transaction +} + +func (c *MockContext) SetValue(key fmt.Stringer, value interface{}) { + c.values[key] = value +} + +func (c *MockContext) Value(key fmt.Stringer) interface{} { + value := c.values[key] + return value +} + +func (c *MockContext) ClearValue(key fmt.Stringer) { + delete(c.values, key) +} + +func (c *MockContext) GetTxn(forceNew bool) (kv.Transaction, error) { + var err error + c.txn, err = c.Begin() + if err != nil { + return nil, err + } + + return c.txn, nil +} + +func (c *MockContext) fillTxn() error { + if c.txn == nil { + return nil + } + + var err error + for i := startIndex; i < testCount; i++ { + val := encodeInt(i + (c.prefix * testPow)) + err = c.txn.Set(val, val) + if err != nil { + return err + } + } + + return nil +} + +func (c *MockContext) CommitTxn() error { + if c.txn == nil { + return nil + } + return c.txn.Commit(context.Background()) +} + +func (s *testPrefixSuite) TestPrefix(c *C) { + ctx := &MockContext{10000000, make(map[fmt.Stringer]interface{}), s.s, nil} + ctx.fillTxn() + txn, err := ctx.GetTxn(false) + c.Assert(err, IsNil) + err = util.DelKeyWithPrefix(txn, encodeInt(ctx.prefix)) + c.Assert(err, IsNil) + err = ctx.CommitTxn() + c.Assert(err, IsNil) + + txn, err = s.s.Begin() + c.Assert(err, IsNil) + k := []byte("key100jfowi878230") + err = txn.Set(k, []byte(`val32dfaskli384757^*&%^`)) + c.Assert(err, IsNil) + err = util.ScanMetaWithPrefix(txn, k, func(kv.Key, []byte) bool { + return true + }) + c.Assert(err, IsNil) + err = util.ScanMetaWithPrefix(txn, k, func(kv.Key, []byte) bool { + return false + }) + c.Assert(err, IsNil) + err = util.DelKeyWithPrefix(txn, []byte("key")) + c.Assert(err, IsNil) + _, err = txn.Get(context.TODO(), k) + c.Assert(terror.ErrorEqual(kv.ErrNotExist, err), IsTrue) + + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) +} + +func (s *testPrefixSuite) TestPrefixFilter(c *C) { + rowKey := []byte(`test@#$%l(le[0]..prefix) 2uio`) + rowKey[8] = 0x00 + rowKey[9] = 0x00 + f := util.RowKeyPrefixFilter(rowKey) + b := f(append(rowKey, []byte("akjdf3*(34")...)) + c.Assert(b, IsFalse) + buf := f([]byte("sjfkdlsaf")) + c.Assert(buf, IsTrue) +} diff --git a/util/random.go b/util/random.go new file mode 100644 index 0000000..fe8cbdf --- /dev/null +++ b/util/random.go @@ -0,0 +1,31 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "math/rand" +) + +// RandomBuf generates a random string using ASCII characters but avoid separator character. +// See https://github.com/mysql/mysql-server/blob/5.7/mysys_ssl/crypt_genhash_impl.cc#L435 +func RandomBuf(size int) []byte { + buf := make([]byte, size) + for i := 0; i < size; i++ { + buf[i] = byte(rand.Intn(127)) + if buf[i] == 0 || buf[i] == byte('$') { + buf[i]++ + } + } + return buf +} diff --git a/util/ranger/checker.go b/util/ranger/checker.go new file mode 100644 index 0000000..e5acba0 --- /dev/null +++ b/util/ranger/checker.go @@ -0,0 +1,84 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ranger + +import ( + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/types" +) + +// conditionChecker checks if this condition can be pushed to index planner. +type conditionChecker struct { + colUniqueID int64 + shouldReserve bool // check if a access condition should be reserved in filter conditions. + length int +} + +func (c *conditionChecker) check(condition expression.Expression) bool { + switch x := condition.(type) { + case *expression.ScalarFunction: + return c.checkScalarFunction(x) + case *expression.Column: + return c.checkColumn(x) + case *expression.Constant: + return true + } + return false +} + +func (c *conditionChecker) checkScalarFunction(scalar *expression.ScalarFunction) bool { + switch scalar.FuncName.L { + case ast.LogicOr, ast.LogicAnd: + return c.check(scalar.GetArgs()[0]) && c.check(scalar.GetArgs()[1]) + case ast.EQ, ast.NE, ast.GE, ast.GT, ast.LE, ast.LT: + if _, ok := scalar.GetArgs()[0].(*expression.Constant); ok { + if c.checkColumn(scalar.GetArgs()[1]) { + return scalar.FuncName.L != ast.NE || c.length == types.UnspecifiedLength + } + } + if _, ok := scalar.GetArgs()[1].(*expression.Constant); ok { + if c.checkColumn(scalar.GetArgs()[0]) { + return scalar.FuncName.L != ast.NE || c.length == types.UnspecifiedLength + } + } + case ast.IsNull: + return c.checkColumn(scalar.GetArgs()[0]) + case ast.UnaryNot: + if _, ok := scalar.GetArgs()[0].(*expression.ScalarFunction); ok { + return c.check(scalar.GetArgs()[0]) + } + // "not column" or "not constant" can't lead to a range. + return false + case ast.In: + if !c.checkColumn(scalar.GetArgs()[0]) { + return false + } + for _, v := range scalar.GetArgs()[1:] { + if _, ok := v.(*expression.Constant); !ok { + return false + } + } + return true + } + return false +} + +func (c *conditionChecker) checkColumn(expr expression.Expression) bool { + col, ok := expr.(*expression.Column) + if !ok { + return false + } + return c.colUniqueID == col.UniqueID +} diff --git a/util/ranger/detacher.go b/util/ranger/detacher.go new file mode 100644 index 0000000..9675eda --- /dev/null +++ b/util/ranger/detacher.go @@ -0,0 +1,402 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ranger + +import ( + "github.com/pingcap/errors" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" +) + +// detachColumnCNFConditions detaches the condition for calculating range from the other conditions. +// Please make sure that the top level is CNF form. +func detachColumnCNFConditions(sctx sessionctx.Context, conditions []expression.Expression, checker *conditionChecker) ([]expression.Expression, []expression.Expression) { + var accessConditions, filterConditions []expression.Expression + for _, cond := range conditions { + if sf, ok := cond.(*expression.ScalarFunction); ok && sf.FuncName.L == ast.LogicOr { + dnfItems := expression.FlattenDNFConditions(sf) + colulmnDNFItems, hasResidual := detachColumnDNFConditions(sctx, dnfItems, checker) + // If this CNF has expression that cannot be resolved as access condition, then the total DNF expression + // should be also appended into filter condition. + if hasResidual { + filterConditions = append(filterConditions, cond) + } + if len(colulmnDNFItems) == 0 { + continue + } + rebuildDNF := expression.ComposeDNFCondition(sctx, colulmnDNFItems...) + accessConditions = append(accessConditions, rebuildDNF) + continue + } + if !checker.check(cond) { + filterConditions = append(filterConditions, cond) + continue + } + accessConditions = append(accessConditions, cond) + if checker.shouldReserve { + filterConditions = append(filterConditions, cond) + checker.shouldReserve = checker.length != types.UnspecifiedLength + } + } + return accessConditions, filterConditions +} + +// detachColumnDNFConditions detaches the condition for calculating range from the other conditions. +// Please make sure that the top level is DNF form. +func detachColumnDNFConditions(sctx sessionctx.Context, conditions []expression.Expression, checker *conditionChecker) ([]expression.Expression, bool) { + var ( + hasResidualConditions bool + accessConditions []expression.Expression + ) + for _, cond := range conditions { + if sf, ok := cond.(*expression.ScalarFunction); ok && sf.FuncName.L == ast.LogicAnd { + cnfItems := expression.FlattenCNFConditions(sf) + columnCNFItems, others := detachColumnCNFConditions(sctx, cnfItems, checker) + if len(others) > 0 { + hasResidualConditions = true + } + // If one part of DNF has no access condition. Then this DNF cannot get range. + if len(columnCNFItems) == 0 { + return nil, true + } + rebuildCNF := expression.ComposeCNFCondition(sctx, columnCNFItems...) + accessConditions = append(accessConditions, rebuildCNF) + } else if checker.check(cond) { + accessConditions = append(accessConditions, cond) + if checker.shouldReserve { + hasResidualConditions = true + checker.shouldReserve = checker.length != types.UnspecifiedLength + } + } else { + return nil, true + } + } + return accessConditions, hasResidualConditions +} + +// getEqOrInColOffset checks if the expression is a eq function that one side is constant and another is column or an +// in function which is `column in (constant list)`. +// If so, it will return the offset of this column in the slice, otherwise return -1 for not found. +func getEqOrInColOffset(expr expression.Expression, cols []*expression.Column) int { + f, ok := expr.(*expression.ScalarFunction) + if !ok { + return -1 + } + if f.FuncName.L == ast.EQ { + if c, ok := f.GetArgs()[0].(*expression.Column); ok { + if _, ok := f.GetArgs()[1].(*expression.Constant); ok { + for i, col := range cols { + if col.Equal(nil, c) { + return i + } + } + } + } + if c, ok := f.GetArgs()[1].(*expression.Column); ok { + if _, ok := f.GetArgs()[0].(*expression.Constant); ok { + for i, col := range cols { + if col.Equal(nil, c) { + return i + } + } + } + } + } + if f.FuncName.L == ast.In { + c, ok := f.GetArgs()[0].(*expression.Column) + if !ok { + return -1 + } + for _, arg := range f.GetArgs()[1:] { + if _, ok := arg.(*expression.Constant); !ok { + return -1 + } + } + for i, col := range cols { + if col.Equal(nil, c) { + return i + } + } + } + return -1 +} + +// detachCNFCondAndBuildRangeForIndex will detach the index filters from table filters. These conditions are connected with `and` +// It will first find the point query column and then extract the range query column. +// considerDNF is true means it will try to extract access conditions from the DNF expressions. +func detachCNFCondAndBuildRangeForIndex(sctx sessionctx.Context, conditions []expression.Expression, cols []*expression.Column, + tpSlice []*types.FieldType, lengths []int, considerDNF bool) (*DetachRangeResult, error) { + var ( + eqCount int + ranges []*Range + err error + ) + res := &DetachRangeResult{} + + accessConds, filterConds, newConditions, emptyRange := ExtractEqAndInCondition(sctx, conditions, cols, lengths) + if emptyRange { + return res, nil + } + + for ; eqCount < len(accessConds); eqCount++ { + if accessConds[eqCount].(*expression.ScalarFunction).FuncName.L != ast.EQ { + break + } + } + eqOrInCount := len(accessConds) + res.EqCondCount = eqCount + res.EqOrInCount = eqOrInCount + if eqOrInCount == len(cols) { + filterConds = append(filterConds, newConditions...) + ranges, err = buildCNFIndexRange(sctx.GetSessionVars().StmtCtx, cols, tpSlice, lengths, eqOrInCount, accessConds) + if err != nil { + return res, err + } + res.Ranges = ranges + res.AccessConds = accessConds + res.RemainedConds = filterConds + return res, nil + } + checker := &conditionChecker{ + colUniqueID: cols[eqOrInCount].UniqueID, + length: lengths[eqOrInCount], + shouldReserve: lengths[eqOrInCount] != types.UnspecifiedLength, + } + if considerDNF { + accesses, filters := detachColumnCNFConditions(sctx, newConditions, checker) + accessConds = append(accessConds, accesses...) + filterConds = append(filterConds, filters...) + } else { + for _, cond := range newConditions { + if !checker.check(cond) { + filterConds = append(filterConds, cond) + continue + } + accessConds = append(accessConds, cond) + } + } + ranges, err = buildCNFIndexRange(sctx.GetSessionVars().StmtCtx, cols, tpSlice, lengths, eqOrInCount, accessConds) + res.Ranges = ranges + res.AccessConds = accessConds + res.RemainedConds = filterConds + return res, err +} + +// ExtractEqAndInCondition will split the given condition into three parts by the information of index columns and their lengths. +// accesses: The condition will be used to build range. +// filters: filters is the part that some access conditions need to be evaluate again since it's only the prefix part of char column. +// newConditions: We'll simplify the given conditions if there're multiple in conditions or eq conditions on the same column. +// e.g. if there're a in (1, 2, 3) and a in (2, 3, 4). This two will be combined to a in (2, 3) and pushed to newConditions. +// bool: indicate whether there's nil range when merging eq and in conditions. +func ExtractEqAndInCondition(sctx sessionctx.Context, conditions []expression.Expression, + cols []*expression.Column, lengths []int) ([]expression.Expression, []expression.Expression, []expression.Expression, bool) { + var filters []expression.Expression + rb := builder{sc: sctx.GetSessionVars().StmtCtx} + accesses := make([]expression.Expression, len(cols)) + points := make([][]point, len(cols)) + mergedAccesses := make([]expression.Expression, len(cols)) + newConditions := make([]expression.Expression, 0, len(conditions)) + for _, cond := range conditions { + offset := getEqOrInColOffset(cond, cols) + if offset == -1 { + newConditions = append(newConditions, cond) + continue + } + if accesses[offset] == nil { + accesses[offset] = cond + continue + } + // Multiple Eq/In conditions for one column in CNF, apply intersection on them + // Lazily compute the points for the previously visited Eq/In + if mergedAccesses[offset] == nil { + mergedAccesses[offset] = accesses[offset] + points[offset] = rb.build(accesses[offset]) + } + points[offset] = rb.intersection(points[offset], rb.build(cond)) + // Early termination if false expression found + if len(points[offset]) == 0 { + return nil, nil, nil, true + } + } + for i, ma := range mergedAccesses { + if ma == nil { + if accesses[i] != nil { + newConditions = append(newConditions, accesses[i]) + } + continue + } + accesses[i] = points2EqOrInCond(sctx, points[i], mergedAccesses[i]) + newConditions = append(newConditions, accesses[i]) + } + for i, cond := range accesses { + if cond == nil { + accesses = accesses[:i] + break + } + if lengths[i] != types.UnspecifiedLength { + filters = append(filters, cond) + } + } + // We should remove all accessConds, so that they will not be added to filter conditions. + newConditions = removeAccessConditions(newConditions, accesses) + return accesses, filters, newConditions, false +} + +// detachDNFCondAndBuildRangeForIndex will detach the index filters from table filters when it's a DNF. +// We will detach the conditions of every DNF items, then compose them to a DNF. +func detachDNFCondAndBuildRangeForIndex(sctx sessionctx.Context, condition *expression.ScalarFunction, + cols []*expression.Column, newTpSlice []*types.FieldType, lengths []int) ([]*Range, []expression.Expression, bool, error) { + sc := sctx.GetSessionVars().StmtCtx + firstColumnChecker := &conditionChecker{ + colUniqueID: cols[0].UniqueID, + shouldReserve: lengths[0] != types.UnspecifiedLength, + length: lengths[0], + } + rb := builder{sc: sc} + dnfItems := expression.FlattenDNFConditions(condition) + newAccessItems := make([]expression.Expression, 0, len(dnfItems)) + var totalRanges []*Range + hasResidual := false + for _, item := range dnfItems { + if sf, ok := item.(*expression.ScalarFunction); ok && sf.FuncName.L == ast.LogicAnd { + cnfItems := expression.FlattenCNFConditions(sf) + var accesses, filters []expression.Expression + res, err := detachCNFCondAndBuildRangeForIndex(sctx, cnfItems, cols, newTpSlice, lengths, true) + if err != nil { + return nil, nil, false, nil + } + ranges := res.Ranges + accesses = res.AccessConds + filters = res.RemainedConds + if len(accesses) == 0 { + return FullRange(), nil, true, nil + } + if len(filters) > 0 { + hasResidual = true + } + totalRanges = append(totalRanges, ranges...) + newAccessItems = append(newAccessItems, expression.ComposeCNFCondition(sctx, accesses...)) + } else if firstColumnChecker.check(item) { + if firstColumnChecker.shouldReserve { + hasResidual = true + firstColumnChecker.shouldReserve = lengths[0] != types.UnspecifiedLength + } + points := rb.build(item) + ranges, err := points2Ranges(sc, points, newTpSlice[0]) + if err != nil { + return nil, nil, false, errors.Trace(err) + } + totalRanges = append(totalRanges, ranges...) + newAccessItems = append(newAccessItems, item) + } else { + return FullRange(), nil, true, nil + } + } + + totalRanges, err := unionRanges(sc, totalRanges) + if err != nil { + return nil, nil, false, errors.Trace(err) + } + + return totalRanges, []expression.Expression{expression.ComposeDNFCondition(sctx, newAccessItems...)}, hasResidual, nil +} + +// DetachRangeResult wraps up results when detaching conditions and builing ranges. +type DetachRangeResult struct { + // Ranges is the ranges extracted and built from conditions. + Ranges []*Range + // AccessConds is the extracted conditions for access. + AccessConds []expression.Expression + // RemainedConds is the filter conditions which should be kept after access. + RemainedConds []expression.Expression + // EqCondCount is the number of equal conditions extracted. + EqCondCount int + // EqOrInCount is the number of equal/in conditions extracted. + EqOrInCount int + // IsDNFCond indicates if the top layer of conditions are in DNF. + IsDNFCond bool +} + +// DetachCondAndBuildRangeForIndex will detach the index filters from table filters. +// The returned values are encapsulated into a struct DetachRangeResult, see its comments for explanation. +func DetachCondAndBuildRangeForIndex(sctx sessionctx.Context, conditions []expression.Expression, cols []*expression.Column, + lengths []int) (*DetachRangeResult, error) { + res := &DetachRangeResult{} + newTpSlice := make([]*types.FieldType, 0, len(cols)) + for _, col := range cols { + newTpSlice = append(newTpSlice, newFieldType(col.RetType)) + } + if len(conditions) == 1 { + if sf, ok := conditions[0].(*expression.ScalarFunction); ok && sf.FuncName.L == ast.LogicOr { + ranges, accesses, hasResidual, err := detachDNFCondAndBuildRangeForIndex(sctx, sf, cols, newTpSlice, lengths) + if err != nil { + return res, errors.Trace(err) + } + res.Ranges = ranges + res.AccessConds = accesses + res.IsDNFCond = true + // If this DNF have something cannot be to calculate range, then all this DNF should be pushed as filter condition. + if hasResidual { + res.RemainedConds = conditions + return res, nil + } + return res, nil + } + } + return detachCNFCondAndBuildRangeForIndex(sctx, conditions, cols, newTpSlice, lengths, true) +} + +// DetachSimpleCondAndBuildRangeForIndex will detach the index filters from table filters. +// It will find the point query column firstly and then extract the range query column. +func DetachSimpleCondAndBuildRangeForIndex(sctx sessionctx.Context, conditions []expression.Expression, + cols []*expression.Column, lengths []int) ([]*Range, []expression.Expression, error) { + newTpSlice := make([]*types.FieldType, 0, len(cols)) + for _, col := range cols { + newTpSlice = append(newTpSlice, newFieldType(col.RetType)) + } + res, err := detachCNFCondAndBuildRangeForIndex(sctx, conditions, cols, newTpSlice, lengths, false) + return res.Ranges, res.AccessConds, err +} + +func removeAccessConditions(conditions, accessConds []expression.Expression) []expression.Expression { + filterConds := make([]expression.Expression, 0, len(conditions)) + for _, cond := range conditions { + if !expression.Contains(accessConds, cond) { + filterConds = append(filterConds, cond) + } + } + return filterConds +} + +// ExtractAccessConditionsForColumn extracts the access conditions used for range calculation. Since +// we don't need to return the remained filter conditions, it is much simpler than DetachCondsForColumn. +func ExtractAccessConditionsForColumn(conds []expression.Expression, uniqueID int64) []expression.Expression { + checker := conditionChecker{ + colUniqueID: uniqueID, + length: types.UnspecifiedLength, + } + accessConds := make([]expression.Expression, 0, 8) + return expression.Filter(accessConds, conds, checker.check) +} + +// DetachCondsForColumn detaches access conditions for specified column from other filter conditions. +func DetachCondsForColumn(sctx sessionctx.Context, conds []expression.Expression, col *expression.Column) (accessConditions, otherConditions []expression.Expression) { + checker := &conditionChecker{ + colUniqueID: col.UniqueID, + length: types.UnspecifiedLength, + } + return detachColumnCNFConditions(sctx, conds, checker) +} diff --git a/util/ranger/points.go b/util/ranger/points.go new file mode 100644 index 0000000..144d21c --- /dev/null +++ b/util/ranger/points.go @@ -0,0 +1,541 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ranger + +import ( + "fmt" + "math" + "sort" + "strings" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/charset" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" +) + +// Error instances. +var ( + ErrUnsupportedType = terror.ClassOptimizer.New(mysql.ErrUnsupportedType, mysql.MySQLErrName[mysql.ErrUnsupportedType]) +) + +// RangeType is alias for int. +type RangeType int + +// RangeType constants. +const ( + IntRangeType RangeType = iota + ColumnRangeType + IndexRangeType +) + +// Point is the end point of range interval. +type point struct { + value types.Datum + excl bool // exclude + start bool +} + +func (rp point) String() string { + val := rp.value.GetValue() + if rp.value.Kind() == types.KindMinNotNull { + val = "-inf" + } else if rp.value.Kind() == types.KindMaxValue { + val = "+inf" + } + if rp.start { + symbol := "[" + if rp.excl { + symbol = "(" + } + return fmt.Sprintf("%s%v", symbol, val) + } + symbol := "]" + if rp.excl { + symbol = ")" + } + return fmt.Sprintf("%v%s", val, symbol) +} + +type pointSorter struct { + points []point + err error + sc *stmtctx.StatementContext +} + +func (r *pointSorter) Len() int { + return len(r.points) +} + +func (r *pointSorter) Less(i, j int) bool { + a := r.points[i] + b := r.points[j] + less, err := rangePointLess(r.sc, a, b) + if err != nil { + r.err = err + } + return less +} + +func rangePointLess(sc *stmtctx.StatementContext, a, b point) (bool, error) { + cmp, err := a.value.CompareDatum(sc, &b.value) + if cmp != 0 { + return cmp < 0, nil + } + return rangePointEqualValueLess(a, b), errors.Trace(err) +} + +func rangePointEqualValueLess(a, b point) bool { + if a.start && b.start { + return !a.excl && b.excl + } else if a.start { + return !a.excl && !b.excl + } else if b.start { + return a.excl || b.excl + } + return a.excl && !b.excl +} + +func (r *pointSorter) Swap(i, j int) { + r.points[i], r.points[j] = r.points[j], r.points[i] +} + +// fullRange is (-∞, +∞). +var fullRange = []point{ + {start: true}, + {value: types.MaxValueDatum()}, +} + +// FullIntRange is used for table range. Since table range cannot accept MaxValueDatum as the max value. +// So we need to set it to MaxInt64. +func FullIntRange(isUnsigned bool) []*Range { + if isUnsigned { + return []*Range{{LowVal: []types.Datum{types.NewUintDatum(0)}, HighVal: []types.Datum{types.NewUintDatum(math.MaxUint64)}}} + } + return []*Range{{LowVal: []types.Datum{types.NewIntDatum(math.MinInt64)}, HighVal: []types.Datum{types.NewIntDatum(math.MaxInt64)}}} +} + +// FullRange is [null, +∞) for Range. +func FullRange() []*Range { + return []*Range{{LowVal: []types.Datum{{}}, HighVal: []types.Datum{types.MaxValueDatum()}}} +} + +// FullNotNullRange is (-∞, +∞) for Range. +func FullNotNullRange() []*Range { + return []*Range{{LowVal: []types.Datum{types.MinNotNullDatum()}, HighVal: []types.Datum{types.MaxValueDatum()}}} +} + +// NullRange is [null, null] for Range. +func NullRange() []*Range { + return []*Range{{LowVal: []types.Datum{{}}, HighVal: []types.Datum{{}}}} +} + +// builder is the range builder struct. +type builder struct { + err error + sc *stmtctx.StatementContext +} + +func (r *builder) build(expr expression.Expression) []point { + switch x := expr.(type) { + case *expression.Column: + return r.buildFromColumn(x) + case *expression.ScalarFunction: + return r.buildFromScalarFunc(x) + case *expression.Constant: + return r.buildFromConstant(x) + } + + return fullRange +} + +func (r *builder) buildFromConstant(expr *expression.Constant) []point { + dt, err := expr.Eval(chunk.Row{}) + if err != nil { + r.err = err + return nil + } + if dt.IsNull() { + return nil + } + + val, err := dt.ToBool(r.sc) + if err != nil { + r.err = err + return nil + } + + if val == 0 { + return nil + } + return fullRange +} + +func (r *builder) buildFromColumn(expr *expression.Column) []point { + // column name expression is equivalent to column name is true. + startPoint1 := point{value: types.MinNotNullDatum(), start: true} + endPoint1 := point{excl: true} + endPoint1.value.SetInt64(0) + startPoint2 := point{excl: true, start: true} + startPoint2.value.SetInt64(0) + endPoint2 := point{value: types.MaxValueDatum()} + return []point{startPoint1, endPoint1, startPoint2, endPoint2} +} + +func (r *builder) buildFormBinOp(expr *expression.ScalarFunction) []point { + // This has been checked that the binary operation is comparison operation, and one of + // the operand is column name expression. + var ( + op string + value types.Datum + err error + ft *types.FieldType + ) + if col, ok := expr.GetArgs()[0].(*expression.Column); ok { + value, err = expr.GetArgs()[1].Eval(chunk.Row{}) + op = expr.FuncName.L + ft = col.RetType + } else { + col, ok := expr.GetArgs()[1].(*expression.Column) + if !ok { + return nil + } + ft = col.RetType + + value, err = expr.GetArgs()[0].Eval(chunk.Row{}) + switch expr.FuncName.L { + case ast.GE: + op = ast.LE + case ast.GT: + op = ast.LT + case ast.LT: + op = ast.GT + case ast.LE: + op = ast.GE + default: + op = expr.FuncName.L + } + } + if err != nil { + return nil + } + if value.IsNull() { + return nil + } + + value, err = HandlePadCharToFullLength(r.sc, ft, value) + if err != nil { + return nil + } + + value, op, isValidRange := handleUnsignedIntCol(ft, value, op) + if !isValidRange { + return nil + } + + switch op { + case ast.EQ: + startPoint := point{value: value, start: true} + endPoint := point{value: value} + return []point{startPoint, endPoint} + case ast.NE: + startPoint1 := point{value: types.MinNotNullDatum(), start: true} + endPoint1 := point{value: value, excl: true} + startPoint2 := point{value: value, start: true, excl: true} + endPoint2 := point{value: types.MaxValueDatum()} + return []point{startPoint1, endPoint1, startPoint2, endPoint2} + case ast.LT: + startPoint := point{value: types.MinNotNullDatum(), start: true} + endPoint := point{value: value, excl: true} + return []point{startPoint, endPoint} + case ast.LE: + startPoint := point{value: types.MinNotNullDatum(), start: true} + endPoint := point{value: value} + return []point{startPoint, endPoint} + case ast.GT: + startPoint := point{value: value, start: true, excl: true} + endPoint := point{value: types.MaxValueDatum()} + return []point{startPoint, endPoint} + case ast.GE: + startPoint := point{value: value, start: true} + endPoint := point{value: types.MaxValueDatum()} + return []point{startPoint, endPoint} + } + return nil +} + +// HandlePadCharToFullLength handles the "PAD_CHAR_TO_FULL_LENGTH" sql mode for +// CHAR[N] index columns. +// NOTE: kv.ErrNotExist is returned to indicate that this value can not match +// any (key, value) pair in tikv storage. This error should be handled by +// the caller. +func HandlePadCharToFullLength(sc *stmtctx.StatementContext, ft *types.FieldType, val types.Datum) (types.Datum, error) { + isChar := (ft.Tp == mysql.TypeString) + isBinary := (isChar && ft.Collate == charset.CollationBin) + isVarchar := (ft.Tp == mysql.TypeVarString || ft.Tp == mysql.TypeVarchar) + isVarBinary := (isVarchar && ft.Collate == charset.CollationBin) + + if !isChar && !isVarchar && !isBinary && !isVarBinary { + return val, nil + } + + hasBinaryFlag := mysql.HasBinaryFlag(ft.Flag) + targetStr, err := val.ToString() + if err != nil { + return val, err + } + + switch { + case isBinary || isVarBinary: + val.SetString(targetStr) + return val, nil + case isVarchar && hasBinaryFlag: + noTrailingSpace := strings.TrimRight(targetStr, " ") + if numSpacesToFill := ft.Flen - len(noTrailingSpace); numSpacesToFill > 0 { + noTrailingSpace += strings.Repeat(" ", numSpacesToFill) + } + val.SetString(noTrailingSpace) + return val, nil + case isVarchar && !hasBinaryFlag: + val.SetString(targetStr) + return val, nil + case isChar && hasBinaryFlag: + noTrailingSpace := strings.TrimRight(targetStr, " ") + val.SetString(noTrailingSpace) + return val, nil + case isChar && !hasBinaryFlag && !sc.PadCharToFullLength: + val.SetString(targetStr) + return val, nil + case isChar && !hasBinaryFlag && sc.PadCharToFullLength: + if len(targetStr) != ft.Flen { + // return kv.ErrNotExist to indicate that this value can not match any + // (key, value) pair in tikv storage. + return val, kv.ErrNotExist + } + // Trailing spaces of data typed "CHAR[N]" is trimed in the storage, we + // need to trim these trailing spaces as well. + noTrailingSpace := strings.TrimRight(targetStr, " ") + val.SetString(noTrailingSpace) + return val, nil + default: + return val, nil + } +} + +// handleUnsignedIntCol handles the case when unsigned column meets negative integer value. +// The three returned values are: fixed constant value, fixed operator, and a boolean +// which indicates whether the range is valid or not. +func handleUnsignedIntCol(ft *types.FieldType, val types.Datum, op string) (types.Datum, string, bool) { + isUnsigned := mysql.HasUnsignedFlag(ft.Flag) + isIntegerType := mysql.IsIntegerType(ft.Tp) + isNegativeInteger := (val.Kind() == types.KindInt64 && val.GetInt64() < 0) + + if !isUnsigned || !isIntegerType || !isNegativeInteger { + return val, op, true + } + + // If the operator is GT, GE or NE, the range should be [0, +inf]. + // Otherwise the value is out of valid range. + if op == ast.GT || op == ast.GE || op == ast.NE { + op = ast.GE + val.SetUint64(0) + return val, op, true + } + + return val, op, false +} + +func (r *builder) buildFromIn(expr *expression.ScalarFunction) ([]point, bool) { + list := expr.GetArgs()[1:] + rangePoints := make([]point, 0, len(list)*2) + hasNull := false + for _, e := range list { + v, ok := e.(*expression.Constant) + if !ok { + r.err = ErrUnsupportedType.GenWithStack("expr:%v is not constant", e) + return fullRange, hasNull + } + dt, err := v.Eval(chunk.Row{}) + if err != nil { + r.err = ErrUnsupportedType.GenWithStack("expr:%v is not evaluated", e) + return fullRange, hasNull + } + if dt.IsNull() { + hasNull = true + continue + } + startPoint := point{value: types.NewDatum(dt.GetValue()), start: true} + endPoint := point{value: types.NewDatum(dt.GetValue())} + rangePoints = append(rangePoints, startPoint, endPoint) + } + sorter := pointSorter{points: rangePoints, sc: r.sc} + sort.Sort(&sorter) + if sorter.err != nil { + r.err = sorter.err + } + // check and remove duplicates + curPos, frontPos := 0, 0 + for frontPos < len(rangePoints) { + if rangePoints[curPos].start == rangePoints[frontPos].start { + frontPos++ + } else { + curPos++ + rangePoints[curPos] = rangePoints[frontPos] + frontPos++ + } + } + if curPos > 0 { + curPos++ + } + return rangePoints[:curPos], hasNull +} + +func (r *builder) buildFromNot(expr *expression.ScalarFunction) []point { + switch n := expr.FuncName.L; n { + case ast.In: + var ( + isUnsignedIntCol bool + nonNegativePos int + ) + rangePoints, hasNull := r.buildFromIn(expr) + if hasNull { + return nil + } + if x, ok := expr.GetArgs()[0].(*expression.Column); ok { + isUnsignedIntCol = mysql.HasUnsignedFlag(x.RetType.Flag) && mysql.IsIntegerType(x.RetType.Tp) + } + // negative ranges can be directly ignored for unsigned int columns. + if isUnsignedIntCol { + for nonNegativePos = 0; nonNegativePos < len(rangePoints); nonNegativePos += 2 { + if rangePoints[nonNegativePos].value.Kind() == types.KindUint64 || rangePoints[nonNegativePos].value.GetInt64() >= 0 { + break + } + } + rangePoints = rangePoints[nonNegativePos:] + } + retRangePoints := make([]point, 0, 2+len(rangePoints)) + previousValue := types.Datum{} + for i := 0; i < len(rangePoints); i += 2 { + retRangePoints = append(retRangePoints, point{value: previousValue, start: true, excl: true}) + retRangePoints = append(retRangePoints, point{value: rangePoints[i].value, excl: true}) + previousValue = rangePoints[i].value + } + // Append the interval (last element, max value]. + retRangePoints = append(retRangePoints, point{value: previousValue, start: true, excl: true}) + retRangePoints = append(retRangePoints, point{value: types.MaxValueDatum()}) + return retRangePoints + case ast.IsNull: + startPoint := point{value: types.MinNotNullDatum(), start: true} + endPoint := point{value: types.MaxValueDatum()} + return []point{startPoint, endPoint} + } + return nil +} + +func (r *builder) buildFromScalarFunc(expr *expression.ScalarFunction) []point { + switch op := expr.FuncName.L; op { + case ast.GE, ast.GT, ast.LT, ast.LE, ast.EQ, ast.NE: + return r.buildFormBinOp(expr) + case ast.LogicAnd: + return r.intersection(r.build(expr.GetArgs()[0]), r.build(expr.GetArgs()[1])) + case ast.LogicOr: + return r.union(r.build(expr.GetArgs()[0]), r.build(expr.GetArgs()[1])) + case ast.In: + retPoints, _ := r.buildFromIn(expr) + return retPoints + case ast.IsNull: + startPoint := point{start: true} + endPoint := point{} + return []point{startPoint, endPoint} + case ast.UnaryNot: + return r.buildFromNot(expr.GetArgs()[0].(*expression.ScalarFunction)) + } + + return nil +} + +func (r *builder) intersection(a, b []point) []point { + return r.merge(a, b, false) +} + +func (r *builder) union(a, b []point) []point { + return r.merge(a, b, true) +} + +func (r *builder) mergeSorted(a, b []point) []point { + ret := make([]point, 0, len(a)+len(b)) + i, j := 0, 0 + for i < len(a) && j < len(b) { + less, err := rangePointLess(r.sc, a[i], b[j]) + if err != nil { + r.err = err + return nil + } + if less { + ret = append(ret, a[i]) + i++ + } else { + ret = append(ret, b[j]) + j++ + } + } + if i < len(a) { + ret = append(ret, a[i:]...) + } else if j < len(b) { + ret = append(ret, b[j:]...) + } + return ret +} + +func (r *builder) merge(a, b []point, union bool) []point { + mergedPoints := r.mergeSorted(a, b) + if r.err != nil { + return nil + } + + var ( + inRangeCount int + requiredInRangeCount int + ) + if union { + requiredInRangeCount = 1 + } else { + requiredInRangeCount = 2 + } + curTail := 0 + for _, val := range mergedPoints { + if val.start { + inRangeCount++ + if inRangeCount == requiredInRangeCount { + // Just reached the required in range count, a new range started. + mergedPoints[curTail] = val + curTail++ + } + } else { + if inRangeCount == requiredInRangeCount { + // Just about to leave the required in range count, the range is ended. + mergedPoints[curTail] = val + curTail++ + } + inRangeCount-- + } + } + return mergedPoints[:curTail] +} diff --git a/util/ranger/ranger.go b/util/ranger/ranger.go new file mode 100644 index 0000000..41bebbd --- /dev/null +++ b/util/ranger/ranger.go @@ -0,0 +1,529 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ranger + +import ( + "bytes" + "math" + "sort" + "unicode/utf8" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/charset" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/codec" +) + +func validInterval(sc *stmtctx.StatementContext, low, high point) (bool, error) { + l, err := codec.EncodeKey(sc, nil, low.value) + if err != nil { + return false, errors.Trace(err) + } + if low.excl { + l = []byte(kv.Key(l).PrefixNext()) + } + r, err := codec.EncodeKey(sc, nil, high.value) + if err != nil { + return false, errors.Trace(err) + } + if !high.excl { + r = []byte(kv.Key(r).PrefixNext()) + } + return bytes.Compare(l, r) < 0, nil +} + +// points2Ranges build index ranges from range points. +// Only one column is built there. If there're multiple columns, use appendPoints2Ranges. +func points2Ranges(sc *stmtctx.StatementContext, rangePoints []point, tp *types.FieldType) ([]*Range, error) { + ranges := make([]*Range, 0, len(rangePoints)/2) + for i := 0; i < len(rangePoints); i += 2 { + startPoint, err := convertPoint(sc, rangePoints[i], tp) + if err != nil { + return nil, errors.Trace(err) + } + endPoint, err := convertPoint(sc, rangePoints[i+1], tp) + if err != nil { + return nil, errors.Trace(err) + } + less, err := validInterval(sc, startPoint, endPoint) + if err != nil { + return nil, errors.Trace(err) + } + if !less { + continue + } + // If column has not null flag, [null, null] should be removed. + if mysql.HasNotNullFlag(tp.Flag) && endPoint.value.Kind() == types.KindNull { + continue + } + + ran := &Range{ + LowVal: []types.Datum{startPoint.value}, + LowExclude: startPoint.excl, + HighVal: []types.Datum{endPoint.value}, + HighExclude: endPoint.excl, + } + ranges = append(ranges, ran) + } + return ranges, nil +} + +func convertPoint(sc *stmtctx.StatementContext, point point, tp *types.FieldType) (point, error) { + switch point.value.Kind() { + case types.KindMaxValue, types.KindMinNotNull: + return point, nil + } + casted, err := point.value.ConvertTo(sc, tp) + if err != nil { + return point, errors.Trace(err) + } + valCmpCasted, err := point.value.CompareDatum(sc, &casted) + if err != nil { + return point, errors.Trace(err) + } + point.value = casted + if valCmpCasted == 0 { + return point, nil + } + if point.start { + if point.excl { + if valCmpCasted < 0 { + // e.g. "a > 1.9" convert to "a >= 2". + point.excl = false + } + } else { + if valCmpCasted > 0 { + // e.g. "a >= 1.1 convert to "a > 1" + point.excl = true + } + } + } else { + if point.excl { + if valCmpCasted > 0 { + // e.g. "a < 1.1" convert to "a <= 1" + point.excl = false + } + } else { + if valCmpCasted < 0 { + // e.g. "a <= 1.9" convert to "a < 2" + point.excl = true + } + } + } + return point, nil +} + +// appendPoints2Ranges appends additional column ranges for multi-column index. +// The additional column ranges can only be appended to point ranges. +// for example we have an index (a, b), if the condition is (a > 1 and b = 2) +// then we can not build a conjunctive ranges for this index. +func appendPoints2Ranges(sc *stmtctx.StatementContext, origin []*Range, rangePoints []point, + ft *types.FieldType) ([]*Range, error) { + var newIndexRanges []*Range + for i := 0; i < len(origin); i++ { + oRange := origin[i] + if !oRange.IsPoint(sc) { + newIndexRanges = append(newIndexRanges, oRange) + } else { + newRanges, err := appendPoints2IndexRange(sc, oRange, rangePoints, ft) + if err != nil { + return nil, errors.Trace(err) + } + newIndexRanges = append(newIndexRanges, newRanges...) + } + } + return newIndexRanges, nil +} + +func appendPoints2IndexRange(sc *stmtctx.StatementContext, origin *Range, rangePoints []point, + ft *types.FieldType) ([]*Range, error) { + newRanges := make([]*Range, 0, len(rangePoints)/2) + for i := 0; i < len(rangePoints); i += 2 { + startPoint, err := convertPoint(sc, rangePoints[i], ft) + if err != nil { + return nil, errors.Trace(err) + } + endPoint, err := convertPoint(sc, rangePoints[i+1], ft) + if err != nil { + return nil, errors.Trace(err) + } + less, err := validInterval(sc, startPoint, endPoint) + if err != nil { + return nil, errors.Trace(err) + } + if !less { + continue + } + + lowVal := make([]types.Datum, len(origin.LowVal)+1) + copy(lowVal, origin.LowVal) + lowVal[len(origin.LowVal)] = startPoint.value + + highVal := make([]types.Datum, len(origin.HighVal)+1) + copy(highVal, origin.HighVal) + highVal[len(origin.HighVal)] = endPoint.value + + ir := &Range{ + LowVal: lowVal, + LowExclude: startPoint.excl, + HighVal: highVal, + HighExclude: endPoint.excl, + } + newRanges = append(newRanges, ir) + } + return newRanges, nil +} + +// points2TableRanges build ranges for table scan from range points. +// It will remove the nil and convert MinNotNull and MaxValue to MinInt64 or MinUint64 and MaxInt64 or MaxUint64. +func points2TableRanges(sc *stmtctx.StatementContext, rangePoints []point, tp *types.FieldType) ([]*Range, error) { + ranges := make([]*Range, 0, len(rangePoints)/2) + var minValueDatum, maxValueDatum types.Datum + // Currently, table's kv range cannot accept encoded value of MaxValueDatum. we need to convert it. + if mysql.HasUnsignedFlag(tp.Flag) { + minValueDatum.SetUint64(0) + maxValueDatum.SetUint64(math.MaxUint64) + } else { + minValueDatum.SetInt64(math.MinInt64) + maxValueDatum.SetInt64(math.MaxInt64) + } + for i := 0; i < len(rangePoints); i += 2 { + startPoint, err := convertPoint(sc, rangePoints[i], tp) + if err != nil { + return nil, errors.Trace(err) + } + if startPoint.value.Kind() == types.KindNull { + startPoint.value = minValueDatum + startPoint.excl = false + } else if startPoint.value.Kind() == types.KindMinNotNull { + startPoint.value = minValueDatum + } + endPoint, err := convertPoint(sc, rangePoints[i+1], tp) + if err != nil { + return nil, errors.Trace(err) + } + if endPoint.value.Kind() == types.KindMaxValue { + endPoint.value = maxValueDatum + } else if endPoint.value.Kind() == types.KindNull { + continue + } + less, err := validInterval(sc, startPoint, endPoint) + if err != nil { + return nil, errors.Trace(err) + } + if !less { + continue + } + ran := &Range{ + LowVal: []types.Datum{startPoint.value}, + LowExclude: startPoint.excl, + HighVal: []types.Datum{endPoint.value}, + HighExclude: endPoint.excl, + } + ranges = append(ranges, ran) + } + return ranges, nil +} + +// buildColumnRange builds range from CNF conditions. +func buildColumnRange(accessConditions []expression.Expression, sc *stmtctx.StatementContext, tp *types.FieldType, tableRange bool, colLen int) (ranges []*Range, err error) { + rb := builder{sc: sc} + rangePoints := fullRange + for _, cond := range accessConditions { + rangePoints = rb.intersection(rangePoints, rb.build(cond)) + if rb.err != nil { + return nil, errors.Trace(rb.err) + } + } + newTp := newFieldType(tp) + if tableRange { + ranges, err = points2TableRanges(sc, rangePoints, newTp) + } else { + ranges, err = points2Ranges(sc, rangePoints, newTp) + } + if err != nil { + return nil, errors.Trace(err) + } + if colLen != types.UnspecifiedLength { + for _, ran := range ranges { + if CutDatumByPrefixLen(&ran.LowVal[0], colLen, tp) { + ran.LowExclude = false + } + if CutDatumByPrefixLen(&ran.HighVal[0], colLen, tp) { + ran.HighExclude = false + } + } + ranges, err = unionRanges(sc, ranges) + if err != nil { + return nil, err + } + } + return ranges, nil +} + +// BuildTableRange builds range of PK column for PhysicalTableScan. +func BuildTableRange(accessConditions []expression.Expression, sc *stmtctx.StatementContext, tp *types.FieldType) ([]*Range, error) { + return buildColumnRange(accessConditions, sc, tp, true, types.UnspecifiedLength) +} + +// BuildColumnRange builds range from access conditions for general columns. +func BuildColumnRange(conds []expression.Expression, sc *stmtctx.StatementContext, tp *types.FieldType, colLen int) ([]*Range, error) { + if len(conds) == 0 { + return []*Range{{LowVal: []types.Datum{{}}, HighVal: []types.Datum{types.MaxValueDatum()}}}, nil + } + return buildColumnRange(conds, sc, tp, false, colLen) +} + +// buildCNFIndexRange builds the range for index where the top layer is CNF. +func buildCNFIndexRange(sc *stmtctx.StatementContext, cols []*expression.Column, newTp []*types.FieldType, lengths []int, + eqAndInCount int, accessCondition []expression.Expression) ([]*Range, error) { + rb := builder{sc: sc} + var ( + ranges []*Range + err error + ) + for _, col := range cols { + newTp = append(newTp, newFieldType(col.RetType)) + } + for i := 0; i < eqAndInCount; i++ { + if sf, ok := accessCondition[i].(*expression.ScalarFunction); !ok || (sf.FuncName.L != ast.EQ && sf.FuncName.L != ast.In) { + break + } + // Build ranges for equal or in access conditions. + point := rb.build(accessCondition[i]) + if rb.err != nil { + return nil, errors.Trace(rb.err) + } + if i == 0 { + ranges, err = points2Ranges(sc, point, newTp[i]) + } else { + ranges, err = appendPoints2Ranges(sc, ranges, point, newTp[i]) + } + if err != nil { + return nil, errors.Trace(err) + } + } + rangePoints := fullRange + // Build rangePoints for non-equal access conditions. + for i := eqAndInCount; i < len(accessCondition); i++ { + rangePoints = rb.intersection(rangePoints, rb.build(accessCondition[i])) + if rb.err != nil { + return nil, errors.Trace(rb.err) + } + } + if eqAndInCount == 0 { + ranges, err = points2Ranges(sc, rangePoints, newTp[0]) + } else if eqAndInCount < len(accessCondition) { + ranges, err = appendPoints2Ranges(sc, ranges, rangePoints, newTp[eqAndInCount]) + } + if err != nil { + return nil, errors.Trace(err) + } + + // Take prefix index into consideration. + if hasPrefix(lengths) { + if fixPrefixColRange(ranges, lengths, newTp) { + ranges, err = unionRanges(sc, ranges) + if err != nil { + return nil, errors.Trace(err) + } + } + } + + return ranges, nil +} + +type sortRange struct { + originalValue *Range + encodedStart []byte + encodedEnd []byte +} + +func unionRanges(sc *stmtctx.StatementContext, ranges []*Range) ([]*Range, error) { + if len(ranges) == 0 { + return nil, nil + } + objects := make([]*sortRange, 0, len(ranges)) + for _, ran := range ranges { + left, err := codec.EncodeKey(sc, nil, ran.LowVal...) + if err != nil { + return nil, errors.Trace(err) + } + if ran.LowExclude { + left = kv.Key(left).PrefixNext() + } + right, err := codec.EncodeKey(sc, nil, ran.HighVal...) + if err != nil { + return nil, errors.Trace(err) + } + if !ran.HighExclude { + right = kv.Key(right).PrefixNext() + } + objects = append(objects, &sortRange{originalValue: ran, encodedStart: left, encodedEnd: right}) + } + sort.Slice(objects, func(i, j int) bool { + return bytes.Compare(objects[i].encodedStart, objects[j].encodedStart) < 0 + }) + ranges = ranges[:0] + lastRange := objects[0] + for i := 1; i < len(objects); i++ { + // For two intervals [a, b], [c, d], we have guaranteed that a >= c. If b >= c. Then two intervals are overlapped. + // And this two can be merged as [a, max(b, d)]. + // Otherwise they aren't overlapped. + if bytes.Compare(lastRange.encodedEnd, objects[i].encodedStart) >= 0 { + if bytes.Compare(lastRange.encodedEnd, objects[i].encodedEnd) < 0 { + lastRange.encodedEnd = objects[i].encodedEnd + lastRange.originalValue.HighVal = objects[i].originalValue.HighVal + lastRange.originalValue.HighExclude = objects[i].originalValue.HighExclude + } + } else { + ranges = append(ranges, lastRange.originalValue) + lastRange = objects[i] + } + } + ranges = append(ranges, lastRange.originalValue) + return ranges, nil +} + +func hasPrefix(lengths []int) bool { + for _, l := range lengths { + if l != types.UnspecifiedLength { + return true + } + } + return false +} + +// fixPrefixColRange checks whether the range of one column exceeds the length and needs to be cut. +// It specially handles the last column of each range point. If the last one need to be cut, it will +// change the exclude status of that point and return `true` to tell +// that we need do a range merging since that interval may have intersection. +// e.g. if the interval is (-inf -inf, a xxxxx), (a xxxxx, +inf +inf) and the length of the last column is 3, +// then we'll change it to (-inf -inf, a xxx], [a xxx, +inf +inf). You can see that this two interval intersect, +// so we need a merge operation. +// Q: only checking the last column to decide whether the endpoint's exclude status needs to be reset is enough? +// A: Yes, suppose that the interval is (-inf -inf, a xxxxx b) and only the second column needs to be cut. +// The result would be (-inf -inf, a xxx b) if the length of it is 3. Obviously we only need to care about the data +// whose the first two key is `a` and `xxx`. It read all data whose index value begins with `a` and `xxx` and the third +// value less than `b`, covering the values begin with `a` and `xxxxx` and the third value less than `b` perfectly. +// So in this case we don't need to reset its exclude status. The right endpoint case can be proved in the same way. +func fixPrefixColRange(ranges []*Range, lengths []int, tp []*types.FieldType) bool { + var hasCut bool + for _, ran := range ranges { + lowTail := len(ran.LowVal) - 1 + for i := 0; i < lowTail; i++ { + CutDatumByPrefixLen(&ran.LowVal[i], lengths[i], tp[i]) + } + lowCut := CutDatumByPrefixLen(&ran.LowVal[lowTail], lengths[lowTail], tp[lowTail]) + if lowCut { + ran.LowExclude = false + } + highTail := len(ran.HighVal) - 1 + for i := 0; i < highTail; i++ { + CutDatumByPrefixLen(&ran.HighVal[i], lengths[i], tp[i]) + } + highCut := CutDatumByPrefixLen(&ran.HighVal[highTail], lengths[highTail], tp[highTail]) + if highCut { + ran.HighExclude = false + } + hasCut = lowCut || highCut + } + return hasCut +} + +// CutDatumByPrefixLen cuts the datum according to the prefix length. +// If it's UTF8 encoded, we will cut it by characters rather than bytes. +func CutDatumByPrefixLen(v *types.Datum, length int, tp *types.FieldType) bool { + if v.Kind() == types.KindString || v.Kind() == types.KindBytes { + colCharset := tp.Charset + colValue := v.GetBytes() + isUTF8Charset := colCharset == charset.CharsetUTF8 || colCharset == charset.CharsetUTF8MB4 + if isUTF8Charset { + if length != types.UnspecifiedLength && utf8.RuneCount(colValue) > length { + rs := bytes.Runes(colValue) + truncateStr := string(rs[:length]) + // truncate value and limit its length + v.SetString(truncateStr) + return true + } + } else if length != types.UnspecifiedLength && len(colValue) > length { + // truncate value and limit its length + v.SetBytes(colValue[:length]) + return true + } + } + return false +} + +// We cannot use the FieldType of column directly. e.g. the column a is int32 and we have a > 1111111111111111111. +// Obviously the constant is bigger than MaxInt32, so we will get overflow error if we use the FieldType of column a. +func newFieldType(tp *types.FieldType) *types.FieldType { + switch tp.Tp { + // To avoid overflow error. + case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong: + newTp := types.NewFieldType(mysql.TypeLonglong) + newTp.Flag = tp.Flag + newTp.Charset = tp.Charset + return newTp + // To avoid data truncate error. + case mysql.TypeFloat, mysql.TypeDouble, mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob, + mysql.TypeString, mysql.TypeVarchar, mysql.TypeVarString: + newTp := types.NewFieldType(tp.Tp) + newTp.Charset = tp.Charset + return newTp + default: + return tp + } +} + +// points2EqOrInCond constructs a 'EQUAL' or 'IN' scalar function based on the +// 'points'. The target column is extracted from the 'expr'. +// NOTE: +// 1. 'expr' must be either 'EQUAL' or 'IN' function. +// 2. 'points' should not be empty. +func points2EqOrInCond(ctx sessionctx.Context, points []point, expr expression.Expression) expression.Expression { + // len(points) cannot be 0 here, since we impose early termination in ExtractEqAndInCondition + sf, _ := expr.(*expression.ScalarFunction) + // Constant and Column args should have same RetType, simply get from first arg + retType := sf.GetArgs()[0].GetType() + args := make([]expression.Expression, 0, len(points)/2) + if sf.FuncName.L == ast.EQ { + if c, ok := sf.GetArgs()[0].(*expression.Column); ok { + args = append(args, c) + } else if c, ok := sf.GetArgs()[1].(*expression.Column); ok { + args = append(args, c) + } + } else { + args = append(args, sf.GetArgs()[0]) + } + for i := 0; i < len(points); i = i + 2 { + value := &expression.Constant{ + Value: points[i].value, + RetType: retType, + } + args = append(args, value) + } + funcName := ast.EQ + if len(args) > 2 { + funcName = ast.In + } + f := expression.NewFunctionInternal(ctx, funcName, sf.GetType(), args...) + return f +} diff --git a/util/ranger/ranger_test.go b/util/ranger/ranger_test.go new file mode 100644 index 0000000..e0af1c2 --- /dev/null +++ b/util/ranger/ranger_test.go @@ -0,0 +1,913 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ranger_test + +import ( + "context" + "fmt" + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser" + plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/store/mockstore/mocktikv" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/ranger" + "github.com/pingcap/tidb/util/testkit" + "github.com/pingcap/tidb/util/testleak" +) + +func TestT(t *testing.T) { + TestingT(t) +} + +var _ = Suite(&testRangerSuite{}) + +type testRangerSuite struct { + *parser.Parser +} + +func (s *testRangerSuite) SetUpSuite(c *C) { + s.Parser = parser.New() +} + +func newDomainStoreWithBootstrap(c *C) (*domain.Domain, kv.Storage, error) { + cluster := mocktikv.NewCluster() + mocktikv.BootstrapWithSingleStore(cluster) + mvccStore := mocktikv.MustNewMVCCStore() + store, err := mockstore.NewMockTikvStore( + mockstore.WithCluster(cluster), + mockstore.WithMVCCStore(mvccStore), + ) + c.Assert(err, IsNil) + session.SetSchemaLease(0) + session.DisableStats4Test() + if err != nil { + return nil, nil, errors.Trace(err) + } + dom, err := session.BootstrapSession(store) + return dom, store, errors.Trace(err) +} + +func (s *testRangerSuite) TestTableRange(c *C) { + defer testleak.AfterTest(c)() + dom, store, err := newDomainStoreWithBootstrap(c) + defer func() { + dom.Close() + store.Close() + }() + c.Assert(err, IsNil) + testKit := testkit.NewTestKit(c, store) + testKit.MustExec("use test") + testKit.MustExec("drop table if exists t") + testKit.MustExec("create table t(a int, b int, c int unsigned)") + + tests := []struct { + exprStr string + accessConds string + filterConds string + resultStr string + }{ + { + exprStr: "a = 1", + accessConds: "[eq(test.t.a, 1)]", + filterConds: "[]", + resultStr: "[[1,1]]", + }, + { + exprStr: "1 = a", + accessConds: "[eq(1, test.t.a)]", + filterConds: "[]", + resultStr: "[[1,1]]", + }, + { + exprStr: "a != 1", + accessConds: "[ne(test.t.a, 1)]", + filterConds: "[]", + resultStr: "[[-inf,1) (1,+inf]]", + }, + { + exprStr: "1 != a", + accessConds: "[ne(1, test.t.a)]", + filterConds: "[]", + resultStr: "[[-inf,1) (1,+inf]]", + }, + { + exprStr: "a > 1", + accessConds: "[gt(test.t.a, 1)]", + filterConds: "[]", + resultStr: "[(1,+inf]]", + }, + { + exprStr: "1 < a", + accessConds: "[lt(1, test.t.a)]", + filterConds: "[]", + resultStr: "[(1,+inf]]", + }, + { + exprStr: "a >= 1", + accessConds: "[ge(test.t.a, 1)]", + filterConds: "[]", + resultStr: "[[1,+inf]]", + }, + { + exprStr: "1 <= a", + accessConds: "[le(1, test.t.a)]", + filterConds: "[]", + resultStr: "[[1,+inf]]", + }, + { + exprStr: "a < 1", + accessConds: "[lt(test.t.a, 1)]", + filterConds: "[]", + resultStr: "[[-inf,1)]", + }, + { + exprStr: "1 > a", + accessConds: "[gt(1, test.t.a)]", + filterConds: "[]", + resultStr: "[[-inf,1)]", + }, + { + exprStr: "a <= 1", + accessConds: "[le(test.t.a, 1)]", + filterConds: "[]", + resultStr: "[[-inf,1]]", + }, + { + exprStr: "1 >= test.t.a", + accessConds: "[ge(1, test.t.a)]", + filterConds: "[]", + resultStr: "[[-inf,1]]", + }, + { + exprStr: "(a)", + accessConds: "[test.t.a]", + filterConds: "[]", + resultStr: "[[-inf,0) (0,+inf]]", + }, + { + exprStr: "a in (1, 3, NULL, 2)", + accessConds: "[in(test.t.a, 1, 3, , 2)]", + filterConds: "[]", + resultStr: "[[1,1] [2,2] [3,3]]", + }, + { + exprStr: `a IN (8,8,81,45)`, + accessConds: "[in(test.t.a, 8, 8, 81, 45)]", + filterConds: "[]", + resultStr: `[[8,8] [45,45] [81,81]]`, + }, + { + exprStr: "a between 1 and 2", + accessConds: "[ge(test.t.a, 1) le(test.t.a, 2)]", + filterConds: "[]", + resultStr: "[[1,2]]", + }, + { + exprStr: "a not between 1 and 2", + accessConds: "[or(lt(test.t.a, 1), gt(test.t.a, 2))]", + filterConds: "[]", + resultStr: "[[-inf,1) (2,+inf]]", + }, + { + exprStr: "a between 2 and 1", + accessConds: "[ge(test.t.a, 2) le(test.t.a, 1)]", + filterConds: "[]", + resultStr: "[]", + }, + { + exprStr: "a not between 2 and 1", + accessConds: "[or(lt(test.t.a, 2), gt(test.t.a, 1))]", + filterConds: "[]", + resultStr: "[[-inf,+inf]]", + }, + { + exprStr: "a IS NULL", + accessConds: "[isnull(test.t.a)]", + filterConds: "[]", + resultStr: "[]", + }, + { + exprStr: "a IS NOT NULL", + accessConds: "[not(isnull(test.t.a))]", + filterConds: "[]", + resultStr: "[[-inf,+inf]]", + }, + { + exprStr: "a = 1 or a = 3 or a = 4 or (a > 1 and (a = -1 or a = 5))", + accessConds: "[or(or(eq(test.t.a, 1), eq(test.t.a, 3)), or(eq(test.t.a, 4), and(gt(test.t.a, 1), or(eq(test.t.a, -1), eq(test.t.a, 5)))))]", + filterConds: "[]", + resultStr: "[[1,1] [3,3] [4,4] [5,5]]", + }, + { + exprStr: "(a = 1 and b = 1) or (a = 2 and b = 2)", + accessConds: "[or(eq(test.t.a, 1), eq(test.t.a, 2))]", + filterConds: "[or(and(eq(test.t.a, 1), eq(test.t.b, 1)), and(eq(test.t.a, 2), eq(test.t.b, 2)))]", + resultStr: "[[1,1] [2,2]]", + }, + { + exprStr: "a = 1 or a = 3 or a = 4 or (b > 1 and (a = -1 or a = 5))", + accessConds: "[or(or(eq(test.t.a, 1), eq(test.t.a, 3)), or(eq(test.t.a, 4), or(eq(test.t.a, -1), eq(test.t.a, 5))))]", + filterConds: "[or(or(or(eq(test.t.a, 1), eq(test.t.a, 3)), eq(test.t.a, 4)), and(gt(test.t.b, 1), or(eq(test.t.a, -1), eq(test.t.a, 5))))]", + resultStr: "[[-1,-1] [1,1] [3,3] [4,4] [5,5]]", + }, + { + exprStr: "a in (1, 1, 1, 1, 1, 1, 2, 1, 2, 3, 2, 3, 4, 4, 1, 2)", + accessConds: "[in(test.t.a, 1, 1, 1, 1, 1, 1, 2, 1, 2, 3, 2, 3, 4, 4, 1, 2)]", + filterConds: "[]", + resultStr: "[[1,1] [2,2] [3,3] [4,4]]", + }, + { + exprStr: "a not in (1, 2, 3)", + accessConds: "[not(in(test.t.a, 1, 2, 3))]", + filterConds: "[]", + resultStr: "[[-inf,1) (3,+inf]]", + }, + { + exprStr: "a > 9223372036854775807", + accessConds: "[gt(test.t.a, 9223372036854775807)]", + filterConds: "[]", + resultStr: "[]", + }, + { + exprStr: "a >= 9223372036854775807", + accessConds: "[ge(test.t.a, 9223372036854775807)]", + filterConds: "[]", + resultStr: "[[9223372036854775807,+inf]]", + }, + { + exprStr: "a < -9223372036854775807", + accessConds: "[lt(test.t.a, -9223372036854775807)]", + filterConds: "[]", + resultStr: "[[-inf,-9223372036854775807)]", + }, + { + exprStr: "a < -9223372036854775808", + accessConds: "[lt(test.t.a, -9223372036854775808)]", + filterConds: "[]", + resultStr: "[]", + }, + } + + ctx := context.Background() + for _, tt := range tests { + sql := "select * from t where " + tt.exprStr + sctx := testKit.Se.(sessionctx.Context) + stmts, err := session.Parse(sctx, sql) + c.Assert(err, IsNil, Commentf("error %v, for expr %s", err, tt.exprStr)) + c.Assert(stmts, HasLen, 1) + is := domain.GetDomain(sctx).InfoSchema() + err = plannercore.Preprocess(sctx, stmts[0], is) + c.Assert(err, IsNil, Commentf("error %v, for resolve name, expr %s", err, tt.exprStr)) + p, _, err := plannercore.BuildLogicalPlan(ctx, sctx, stmts[0], is) + c.Assert(err, IsNil, Commentf("error %v, for build plan, expr %s", err, tt.exprStr)) + selection := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) + conds := make([]expression.Expression, len(selection.Conditions)) + for i, cond := range selection.Conditions { + conds[i] = expression.PushDownNot(sctx, cond) + } + tbl := selection.Children()[0].(*plannercore.DataSource).TableInfo() + col := expression.ColInfo2Col(selection.Schema().Columns, tbl.Columns[0]) + c.Assert(col, NotNil) + var filter []expression.Expression + conds, filter = ranger.DetachCondsForColumn(sctx, conds, col) + c.Assert(fmt.Sprintf("%s", conds), Equals, tt.accessConds, Commentf("wrong access conditions for expr: %s", tt.exprStr)) + c.Assert(fmt.Sprintf("%s", filter), Equals, tt.filterConds, Commentf("wrong filter conditions for expr: %s", tt.exprStr)) + result, err := ranger.BuildTableRange(conds, new(stmtctx.StatementContext), col.RetType) + c.Assert(err, IsNil, Commentf("failed to build table range for expr %s", tt.exprStr)) + got := fmt.Sprintf("%v", result) + c.Assert(got, Equals, tt.resultStr, Commentf("different for expr %s", tt.exprStr)) + } +} + +func (s *testRangerSuite) TestIndexRange(c *C) { + defer testleak.AfterTest(c)() + dom, store, err := newDomainStoreWithBootstrap(c) + defer func() { + dom.Close() + store.Close() + }() + c.Assert(err, IsNil) + testKit := testkit.NewTestKit(c, store) + testKit.MustExec("use test") + testKit.MustExec("drop table if exists t") + testKit.MustExec("create table t(a varchar(50), b int, c double, d varchar(10), e binary(10), index idx_ab(a(50), b), index idx_cb(c, a), index idx_d(d(2)), index idx_e(e(2)))") + + tests := []struct { + indexPos int + exprStr string + accessConds string + filterConds string + resultStr string + }{ + { + indexPos: 0, + exprStr: `a > NULL`, + accessConds: "[gt(test.t.a, )]", + filterConds: "[]", + resultStr: `[]`, + }, + { + indexPos: 0, + exprStr: `a = 'a' and b in (1, 2, 3)`, + accessConds: "[eq(test.t.a, a) in(test.t.b, 1, 2, 3)]", + filterConds: "[]", + resultStr: "[[\"a\" 1,\"a\" 1] [\"a\" 2,\"a\" 2] [\"a\" 3,\"a\" 3]]", + }, + { + indexPos: 0, + exprStr: `a = 'a' and b not in (1, 2, 3)`, + accessConds: "[eq(test.t.a, a) not(in(test.t.b, 1, 2, 3))]", + filterConds: "[]", + resultStr: "[(\"a\" NULL,\"a\" 1) (\"a\" 3,\"a\" +inf]]", + }, + { + indexPos: 1, + exprStr: `c in ('1.1', 1, 1.1) and a in ('1', 'a', NULL)`, + accessConds: "[in(test.t.c, 1.1, 1, 1.1) in(test.t.a, 1, a, )]", + filterConds: "[]", + resultStr: "[[1 \"1\",1 \"1\"] [1 \"a\",1 \"a\"] [1.1 \"1\",1.1 \"1\"] [1.1 \"a\",1.1 \"a\"]]", + }, + { + indexPos: 1, + exprStr: "c in (1, 1, 1, 1, 1, 1, 2, 1, 2, 3, 2, 3, 4, 4, 1, 2)", + accessConds: "[in(test.t.c, 1, 1, 1, 1, 1, 1, 2, 1, 2, 3, 2, 3, 4, 4, 1, 2)]", + filterConds: "[]", + resultStr: "[[1,1] [2,2] [3,3] [4,4]]", + }, + { + indexPos: 1, + exprStr: "c not in (1, 2, 3)", + accessConds: "[not(in(test.t.c, 1, 2, 3))]", + filterConds: "[]", + resultStr: "[(NULL,1) (1,2) (2,3) (3,+inf]]", + }, + { + indexPos: 1, + exprStr: "c in (1, 2) and c in (1, 3)", + accessConds: "[eq(test.t.c, 1)]", + filterConds: "[]", + resultStr: "[[1,1]]", + }, + { + indexPos: 1, + exprStr: "c = 1 and c = 2", + accessConds: "[]", + filterConds: "[]", + resultStr: "[]", + }, + { + indexPos: 0, + exprStr: "a in (NULL)", + accessConds: "[eq(test.t.a, )]", + filterConds: "[]", + resultStr: "[]", + }, + { + indexPos: 0, + exprStr: "a not in (NULL, '1', '2', '3')", + accessConds: "[not(in(test.t.a, , 1, 2, 3))]", + filterConds: "[]", + resultStr: "[]", + }, + { + indexPos: 0, + exprStr: "(a > 'b' and a < 'bbb') or (a < 'cb' and a > 'a')", + accessConds: "[or(and(gt(test.t.a, b), lt(test.t.a, bbb)), and(lt(test.t.a, cb), gt(test.t.a, a)))]", + filterConds: "[]", + resultStr: "[(\"a\",\"cb\")]", + }, + { + indexPos: 0, + exprStr: "(a > 'a' and a < 'b') or (a >= 'b' and a < 'c')", + accessConds: "[or(and(gt(test.t.a, a), lt(test.t.a, b)), and(ge(test.t.a, b), lt(test.t.a, c)))]", + filterConds: "[]", + resultStr: "[(\"a\",\"c\")]", + }, + { + indexPos: 0, + exprStr: "(a > 'a' and a < 'b' and b < 1) or (a >= 'b' and a < 'c')", + accessConds: "[or(and(gt(test.t.a, a), lt(test.t.a, b)), and(ge(test.t.a, b), lt(test.t.a, c)))]", + filterConds: "[or(and(and(gt(test.t.a, a), lt(test.t.a, b)), lt(test.t.b, 1)), and(ge(test.t.a, b), lt(test.t.a, c)))]", + resultStr: "[(\"a\",\"c\")]", + }, + { + indexPos: 0, + exprStr: "(a in ('a', 'b') and b < 1) or (a >= 'b' and a < 'c')", + accessConds: "[or(and(in(test.t.a, a, b), lt(test.t.b, 1)), and(ge(test.t.a, b), lt(test.t.a, c)))]", + filterConds: "[]", + resultStr: `[["a" -inf,"a" 1) ["b","c")]`, + }, + { + indexPos: 0, + exprStr: "(a > 'a') or (c > 1)", + accessConds: "[]", + filterConds: "[or(gt(test.t.a, a), gt(test.t.c, 1))]", + resultStr: "[[NULL,+inf]]", + }, + { + indexPos: 2, + exprStr: `d = "你好啊"`, + accessConds: "[eq(test.t.d, 你好啊)]", + filterConds: "[eq(test.t.d, 你好啊)]", + resultStr: "[[\"你好\",\"你好\"]]", + }, + { + indexPos: 3, + exprStr: `e = "你好啊"`, + accessConds: "[eq(test.t.e, 你好啊)]", + filterConds: "[eq(test.t.e, 你好啊)]", + resultStr: "[[\"[228 189]\",\"[228 189]\"]]", + }, + { + indexPos: 2, + exprStr: `d in ("你好啊", "再见")`, + accessConds: "[in(test.t.d, 你好啊, 再见)]", + filterConds: "[in(test.t.d, 你好啊, 再见)]", + resultStr: "[[\"你好\",\"你好\"] [\"再见\",\"再见\"]]", + }, + { + indexPos: 2, + exprStr: `d not in ("你好啊")`, + accessConds: "[]", + filterConds: "[ne(test.t.d, 你好啊)]", + resultStr: "[[NULL,+inf]]", + }, + { + indexPos: 2, + exprStr: `d < "你好" || d > "你好"`, + accessConds: "[or(lt(test.t.d, 你好), gt(test.t.d, 你好))]", + filterConds: "[or(lt(test.t.d, 你好), gt(test.t.d, 你好))]", + resultStr: "[[-inf,\"你好\") (\"你好\",+inf]]", + }, + } + + ctx := context.Background() + for _, tt := range tests { + sql := "select * from t where " + tt.exprStr + sctx := testKit.Se.(sessionctx.Context) + stmts, err := session.Parse(sctx, sql) + c.Assert(err, IsNil, Commentf("error %v, for expr %s", err, tt.exprStr)) + c.Assert(stmts, HasLen, 1) + is := domain.GetDomain(sctx).InfoSchema() + err = plannercore.Preprocess(sctx, stmts[0], is) + c.Assert(err, IsNil, Commentf("error %v, for resolve name, expr %s", err, tt.exprStr)) + p, _, err := plannercore.BuildLogicalPlan(ctx, sctx, stmts[0], is) + c.Assert(err, IsNil, Commentf("error %v, for build plan, expr %s", err, tt.exprStr)) + selection := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) + tbl := selection.Children()[0].(*plannercore.DataSource).TableInfo() + c.Assert(selection, NotNil, Commentf("expr:%v", tt.exprStr)) + conds := make([]expression.Expression, len(selection.Conditions)) + for i, cond := range selection.Conditions { + conds[i] = expression.PushDownNot(sctx, cond) + } + cols, lengths := expression.IndexInfo2PrefixCols(tbl.Columns, selection.Schema().Columns, tbl.Indices[tt.indexPos]) + c.Assert(cols, NotNil) + res, err := ranger.DetachCondAndBuildRangeForIndex(sctx, conds, cols, lengths) + c.Assert(err, IsNil) + c.Assert(fmt.Sprintf("%s", res.AccessConds), Equals, tt.accessConds, Commentf("wrong access conditions for expr: %s", tt.exprStr)) + c.Assert(fmt.Sprintf("%s", res.RemainedConds), Equals, tt.filterConds, Commentf("wrong filter conditions for expr: %s", tt.exprStr)) + got := fmt.Sprintf("%v", res.Ranges) + c.Assert(got, Equals, tt.resultStr, Commentf("different for expr %s", tt.exprStr)) + } +} + +// for issue #6661 +func (s *testRangerSuite) TestIndexRangeForUnsignedInt(c *C) { + defer testleak.AfterTest(c)() + dom, store, err := newDomainStoreWithBootstrap(c) + defer func() { + dom.Close() + store.Close() + }() + c.Assert(err, IsNil) + testKit := testkit.NewTestKit(c, store) + testKit.MustExec("use test") + testKit.MustExec("drop table if exists t") + testKit.MustExec("create table t (a smallint(5) unsigned,key (a) )") + + tests := []struct { + indexPos int + exprStr string + accessConds string + filterConds string + resultStr string + }{ + { + indexPos: 0, + exprStr: `a not in (0, 1, 2)`, + accessConds: "[not(in(test.t.a, 0, 1, 2))]", + filterConds: "[]", + resultStr: `[(NULL,0) (2,+inf]]`, + }, + { + indexPos: 0, + exprStr: `a not in (-1, 1, 2)`, + accessConds: "[not(in(test.t.a, -1, 1, 2))]", + filterConds: "[]", + resultStr: `[(NULL,1) (2,+inf]]`, + }, + { + indexPos: 0, + exprStr: `a not in (-2, -1, 1, 2)`, + accessConds: "[not(in(test.t.a, -2, -1, 1, 2))]", + filterConds: "[]", + resultStr: `[(NULL,1) (2,+inf]]`, + }, + { + indexPos: 0, + exprStr: `a not in (111)`, + accessConds: "[ne(test.t.a, 111)]", + filterConds: "[]", + resultStr: `[[-inf,111) (111,+inf]]`, + }, + { + indexPos: 0, + exprStr: `a not in (1, 2, 9223372036854775810)`, + accessConds: "[not(in(test.t.a, 1, 2, 9223372036854775810))]", + filterConds: "[]", + resultStr: `[(NULL,1) (2,9223372036854775810) (9223372036854775810,+inf]]`, + }, + { + indexPos: 0, + exprStr: `a >= -2147483648`, + accessConds: "[ge(test.t.a, -2147483648)]", + filterConds: "[]", + resultStr: `[[0,+inf]]`, + }, + { + indexPos: 0, + exprStr: `a > -2147483648`, + accessConds: "[gt(test.t.a, -2147483648)]", + filterConds: "[]", + resultStr: `[[0,+inf]]`, + }, + { + indexPos: 0, + exprStr: `a != -2147483648`, + accessConds: "[ne(test.t.a, -2147483648)]", + filterConds: "[]", + resultStr: `[[0,+inf]]`, + }, + { + exprStr: "a < -1 or a < 1", + accessConds: "[or(lt(test.t.a, -1), lt(test.t.a, 1))]", + filterConds: "[]", + resultStr: "[[-inf,1)]", + }, + { + exprStr: "a < -1 and a < 1", + accessConds: "[lt(test.t.a, -1) lt(test.t.a, 1)]", + filterConds: "[]", + resultStr: "[]", + }, + } + + ctx := context.Background() + for _, tt := range tests { + sql := "select * from t where " + tt.exprStr + sctx := testKit.Se.(sessionctx.Context) + stmts, err := session.Parse(sctx, sql) + c.Assert(err, IsNil, Commentf("error %v, for expr %s", err, tt.exprStr)) + c.Assert(stmts, HasLen, 1) + is := domain.GetDomain(sctx).InfoSchema() + err = plannercore.Preprocess(sctx, stmts[0], is) + c.Assert(err, IsNil, Commentf("error %v, for resolve name, expr %s", err, tt.exprStr)) + p, _, err := plannercore.BuildLogicalPlan(ctx, sctx, stmts[0], is) + c.Assert(err, IsNil, Commentf("error %v, for build plan, expr %s", err, tt.exprStr)) + selection := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) + tbl := selection.Children()[0].(*plannercore.DataSource).TableInfo() + c.Assert(selection, NotNil, Commentf("expr:%v", tt.exprStr)) + conds := make([]expression.Expression, len(selection.Conditions)) + for i, cond := range selection.Conditions { + conds[i] = expression.PushDownNot(sctx, cond) + } + cols, lengths := expression.IndexInfo2PrefixCols(tbl.Columns, selection.Schema().Columns, tbl.Indices[tt.indexPos]) + c.Assert(cols, NotNil) + res, err := ranger.DetachCondAndBuildRangeForIndex(sctx, conds, cols, lengths) + c.Assert(err, IsNil) + c.Assert(fmt.Sprintf("%s", res.AccessConds), Equals, tt.accessConds, Commentf("wrong access conditions for expr: %s", tt.exprStr)) + c.Assert(fmt.Sprintf("%s", res.RemainedConds), Equals, tt.filterConds, Commentf("wrong filter conditions for expr: %s", tt.exprStr)) + got := fmt.Sprintf("%v", res.Ranges) + c.Assert(got, Equals, tt.resultStr, Commentf("different for expr %s", tt.exprStr)) + } +} + +func (s *testRangerSuite) TestColumnRange(c *C) { + defer testleak.AfterTest(c)() + dom, store, err := newDomainStoreWithBootstrap(c) + defer func() { + dom.Close() + store.Close() + }() + c.Assert(err, IsNil) + testKit := testkit.NewTestKit(c, store) + testKit.MustExec("use test") + testKit.MustExec("drop table if exists t") + testKit.MustExec("create table t(a int, b double, c float(3, 2), d varchar(3), e bigint unsigned)") + + tests := []struct { + colPos int + exprStr string + accessConds string + filterConds string + resultStr string + length int + }{ + { + colPos: 0, + exprStr: "a = 1 and b > 1", + accessConds: "[eq(test.t.a, 1)]", + filterConds: "[gt(test.t.b, 1)]", + resultStr: "[[1,1]]", + length: types.UnspecifiedLength, + }, + { + colPos: 1, + exprStr: "b > 1", + accessConds: "[gt(test.t.b, 1)]", + filterConds: "[]", + resultStr: "[(1,+inf]]", + length: types.UnspecifiedLength, + }, + { + colPos: 0, + exprStr: "1 = a", + accessConds: "[eq(1, test.t.a)]", + filterConds: "[]", + resultStr: "[[1,1]]", + length: types.UnspecifiedLength, + }, + { + colPos: 0, + exprStr: "a != 1", + accessConds: "[ne(test.t.a, 1)]", + filterConds: "[]", + resultStr: "[[-inf,1) (1,+inf]]", + length: types.UnspecifiedLength, + }, + { + colPos: 0, + exprStr: "1 != a", + accessConds: "[ne(1, test.t.a)]", + filterConds: "[]", + resultStr: "[[-inf,1) (1,+inf]]", + length: types.UnspecifiedLength, + }, + { + colPos: 0, + exprStr: "a > 1", + accessConds: "[gt(test.t.a, 1)]", + filterConds: "[]", + resultStr: "[(1,+inf]]", + length: types.UnspecifiedLength, + }, + { + colPos: 0, + exprStr: "1 < a", + accessConds: "[lt(1, test.t.a)]", + filterConds: "[]", + resultStr: "[(1,+inf]]", + length: types.UnspecifiedLength, + }, + { + colPos: 0, + exprStr: "a >= 1", + accessConds: "[ge(test.t.a, 1)]", + filterConds: "[]", + resultStr: "[[1,+inf]]", + length: types.UnspecifiedLength, + }, + { + colPos: 0, + exprStr: "1 <= a", + accessConds: "[le(1, test.t.a)]", + filterConds: "[]", + resultStr: "[[1,+inf]]", + length: types.UnspecifiedLength, + }, + { + colPos: 0, + exprStr: "a < 1", + accessConds: "[lt(test.t.a, 1)]", + filterConds: "[]", + resultStr: "[[-inf,1)]", + length: types.UnspecifiedLength, + }, + { + colPos: 0, + exprStr: "1 > a", + accessConds: "[gt(1, test.t.a)]", + filterConds: "[]", + resultStr: "[[-inf,1)]", + length: types.UnspecifiedLength, + }, + { + colPos: 0, + exprStr: "a <= 1", + accessConds: "[le(test.t.a, 1)]", + filterConds: "[]", + resultStr: "[[-inf,1]]", + length: types.UnspecifiedLength, + }, + { + colPos: 0, + exprStr: "1 >= a", + accessConds: "[ge(1, test.t.a)]", + filterConds: "[]", + resultStr: "[[-inf,1]]", + length: types.UnspecifiedLength, + }, + { + colPos: 0, + exprStr: "(a)", + accessConds: "[test.t.a]", + filterConds: "[]", + resultStr: "[[-inf,0) (0,+inf]]", + length: types.UnspecifiedLength, + }, + { + colPos: 0, + exprStr: "a in (1, 3, NULL, 2)", + accessConds: "[in(test.t.a, 1, 3, , 2)]", + filterConds: "[]", + resultStr: "[[1,1] [2,2] [3,3]]", + length: types.UnspecifiedLength, + }, + { + colPos: 0, + exprStr: `a IN (8,8,81,45)`, + accessConds: "[in(test.t.a, 8, 8, 81, 45)]", + filterConds: "[]", + resultStr: `[[8,8] [45,45] [81,81]]`, + length: types.UnspecifiedLength, + }, + { + colPos: 0, + exprStr: "a between 1 and 2", + accessConds: "[ge(test.t.a, 1) le(test.t.a, 2)]", + filterConds: "[]", + resultStr: "[[1,2]]", + length: types.UnspecifiedLength, + }, + { + colPos: 0, + exprStr: "a not between 1 and 2", + accessConds: "[or(lt(test.t.a, 1), gt(test.t.a, 2))]", + filterConds: "[]", + resultStr: "[[-inf,1) (2,+inf]]", + length: types.UnspecifiedLength, + }, + //{ + // `a > null` will be converted to `castAsString(a) > null` which can not be extracted as access condition. + // exprStr: "a not between null and 0", + // resultStr[(0,+inf]] + //}, + { + colPos: 0, + exprStr: "a between 2 and 1", + accessConds: "[ge(test.t.a, 2) le(test.t.a, 1)]", + filterConds: "[]", + resultStr: "[]", + length: types.UnspecifiedLength, + }, + { + colPos: 0, + exprStr: "a not between 2 and 1", + accessConds: "[or(lt(test.t.a, 2), gt(test.t.a, 1))]", + filterConds: "[]", + resultStr: "[[-inf,+inf]]", + length: types.UnspecifiedLength, + }, + { + colPos: 0, + exprStr: "a IS NULL", + accessConds: "[isnull(test.t.a)]", + filterConds: "[]", + resultStr: "[[NULL,NULL]]", + length: types.UnspecifiedLength, + }, + { + colPos: 0, + exprStr: "a IS NOT NULL", + accessConds: "[not(isnull(test.t.a))]", + filterConds: "[]", + resultStr: "[[-inf,+inf]]", + length: types.UnspecifiedLength, + }, + { + colPos: 1, + exprStr: `b in (1, '2.1')`, + accessConds: "[in(test.t.b, 1, 2.1)]", + filterConds: "[]", + resultStr: "[[1,1] [2.1,2.1]]", + length: types.UnspecifiedLength, + }, + { + colPos: 0, + exprStr: `a > 9223372036854775807`, + accessConds: "[gt(test.t.a, 9223372036854775807)]", + filterConds: "[]", + resultStr: "[(9223372036854775807,+inf]]", + length: types.UnspecifiedLength, + }, + { + colPos: 2, + exprStr: `c > 111.11111111`, + accessConds: "[gt(test.t.c, 111.11111111)]", + filterConds: "[]", + resultStr: "[[111.111115,+inf]]", + length: types.UnspecifiedLength, + }, + { + colPos: 3, + exprStr: `d > 'aaaaaaaaaaaaaa'`, + accessConds: "[gt(test.t.d, aaaaaaaaaaaaaa)]", + filterConds: "[]", + resultStr: "[(\"aaaaaaaaaaaaaa\",+inf]]", + length: types.UnspecifiedLength, + }, + { + colPos: 4, + exprStr: `e > 18446744073709500000`, + accessConds: "[gt(test.t.e, 18446744073709500000)]", + filterConds: "[]", + resultStr: "[(18446744073709500000,+inf]]", + length: types.UnspecifiedLength, + }, + { + colPos: 4, + exprStr: `e > -2147483648`, + accessConds: "[gt(test.t.e, -2147483648)]", + filterConds: "[]", + resultStr: "[[0,+inf]]", + length: types.UnspecifiedLength, + }, + { + colPos: 3, + exprStr: "d = 'aab' or d = 'aac'", + accessConds: "[or(eq(test.t.d, aab), eq(test.t.d, aac))]", + filterConds: "[]", + resultStr: "[[\"a\",\"a\"]]", + length: 1, + }, + // This test case cannot be simplified to [1, 3] otherwise the index join will executes wrongly. + { + colPos: 0, + exprStr: "a in (1, 2, 3)", + accessConds: "[in(test.t.a, 1, 2, 3)]", + filterConds: "", + resultStr: "[[1,1] [2,2] [3,3]]", + length: types.UnspecifiedLength, + }, + } + + ctx := context.Background() + for _, tt := range tests { + sql := "select * from t where " + tt.exprStr + sctx := testKit.Se.(sessionctx.Context) + stmts, err := session.Parse(sctx, sql) + c.Assert(err, IsNil, Commentf("error %v, for expr %s", err, tt.exprStr)) + c.Assert(stmts, HasLen, 1) + is := domain.GetDomain(sctx).InfoSchema() + err = plannercore.Preprocess(sctx, stmts[0], is) + c.Assert(err, IsNil, Commentf("error %v, for resolve name, expr %s", err, tt.exprStr)) + p, _, err := plannercore.BuildLogicalPlan(ctx, sctx, stmts[0], is) + c.Assert(err, IsNil, Commentf("error %v, for build plan, expr %s", err, tt.exprStr)) + sel := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) + ds, ok := sel.Children()[0].(*plannercore.DataSource) + c.Assert(ok, IsTrue, Commentf("expr:%v", tt.exprStr)) + conds := make([]expression.Expression, len(sel.Conditions)) + for i, cond := range sel.Conditions { + conds[i] = expression.PushDownNot(sctx, cond) + } + col := expression.ColInfo2Col(sel.Schema().Columns, ds.TableInfo().Columns[tt.colPos]) + c.Assert(col, NotNil) + conds = ranger.ExtractAccessConditionsForColumn(conds, col.UniqueID) + c.Assert(fmt.Sprintf("%s", conds), Equals, tt.accessConds, Commentf("wrong access conditions for expr: %s", tt.exprStr)) + result, err := ranger.BuildColumnRange(conds, new(stmtctx.StatementContext), col.RetType, tt.length) + c.Assert(err, IsNil) + got := fmt.Sprintf("%v", result) + c.Assert(got, Equals, tt.resultStr, Commentf("different for expr %s, col: %v", tt.exprStr, col)) + } +} diff --git a/util/ranger/types.go b/util/ranger/types.go new file mode 100644 index 0000000..0f30967 --- /dev/null +++ b/util/ranger/types.go @@ -0,0 +1,163 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ranger + +import ( + "fmt" + "math" + "strings" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/codec" +) + +// Range represents a range generated in physical plan building phase. +type Range struct { + LowVal []types.Datum + HighVal []types.Datum + + LowExclude bool // Low value is exclusive. + HighExclude bool // High value is exclusive. +} + +// Clone clones a Range. +func (ran *Range) Clone() *Range { + newRange := &Range{ + LowVal: make([]types.Datum, 0, len(ran.LowVal)), + HighVal: make([]types.Datum, 0, len(ran.HighVal)), + LowExclude: ran.LowExclude, + HighExclude: ran.HighExclude, + } + for i, length := 0, len(ran.LowVal); i < length; i++ { + newRange.LowVal = append(newRange.LowVal, ran.LowVal[i]) + } + for i, length := 0, len(ran.HighVal); i < length; i++ { + newRange.HighVal = append(newRange.HighVal, ran.HighVal[i]) + } + return newRange +} + +// IsPoint returns if the range is a point. +func (ran *Range) IsPoint(sc *stmtctx.StatementContext) bool { + if len(ran.LowVal) != len(ran.HighVal) { + return false + } + for i := range ran.LowVal { + a := ran.LowVal[i] + b := ran.HighVal[i] + if a.Kind() == types.KindMinNotNull || b.Kind() == types.KindMaxValue { + return false + } + cmp, err := a.CompareDatum(sc, &b) + if err != nil { + return false + } + if cmp != 0 { + return false + } + + if a.IsNull() { + return false + } + } + return !ran.LowExclude && !ran.HighExclude +} + +// String implements the Stringer interface. +func (ran *Range) String() string { + lowStrs := make([]string, 0, len(ran.LowVal)) + for _, d := range ran.LowVal { + lowStrs = append(lowStrs, formatDatum(d, true)) + } + highStrs := make([]string, 0, len(ran.LowVal)) + for _, d := range ran.HighVal { + highStrs = append(highStrs, formatDatum(d, false)) + } + l, r := "[", "]" + if ran.LowExclude { + l = "(" + } + if ran.HighExclude { + r = ")" + } + return l + strings.Join(lowStrs, " ") + "," + strings.Join(highStrs, " ") + r +} + +// Encode encodes the range to its encoded value. +func (ran *Range) Encode(sc *stmtctx.StatementContext, lowBuffer, highBuffer []byte) ([]byte, []byte, error) { + var err error + lowBuffer, err = codec.EncodeKey(sc, lowBuffer[:0], ran.LowVal...) + if err != nil { + return nil, nil, err + } + if ran.LowExclude { + lowBuffer = kv.Key(lowBuffer).PrefixNext() + } + highBuffer, err = codec.EncodeKey(sc, highBuffer[:0], ran.HighVal...) + if err != nil { + return nil, nil, err + } + if !ran.HighExclude { + highBuffer = kv.Key(highBuffer).PrefixNext() + } + return lowBuffer, highBuffer, nil +} + +// PrefixEqualLen tells you how long the prefix of the range is a point. +// e.g. If this range is (1 2 3, 1 2 +inf), then the return value is 2. +func (ran *Range) PrefixEqualLen(sc *stmtctx.StatementContext) (int, error) { + // Here, len(ran.LowVal) always equal to len(ran.HighVal) + for i := 0; i < len(ran.LowVal); i++ { + cmp, err := ran.LowVal[i].CompareDatum(sc, &ran.HighVal[i]) + if err != nil { + return 0, errors.Trace(err) + } + if cmp != 0 { + return i, nil + } + } + return len(ran.LowVal), nil +} + +func formatDatum(d types.Datum, isLeftSide bool) string { + switch d.Kind() { + case types.KindNull: + return "NULL" + case types.KindMinNotNull: + return "-inf" + case types.KindMaxValue: + return "+inf" + case types.KindInt64: + switch d.GetInt64() { + case math.MinInt64: + if isLeftSide { + return "-inf" + } + case math.MaxInt64: + if !isLeftSide { + return "+inf" + } + } + case types.KindUint64: + if d.GetUint64() == math.MaxUint64 && !isLeftSide { + return "+inf" + } + case types.KindString, types.KindBytes: + return fmt.Sprintf("\"%v\"", d.GetValue()) + } + return fmt.Sprintf("%v", d.GetValue()) +} diff --git a/util/ranger/types_test.go b/util/ranger/types_test.go new file mode 100644 index 0000000..b76006f --- /dev/null +++ b/util/ranger/types_test.go @@ -0,0 +1,131 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package ranger_test + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/ranger" +) + +var _ = Suite(&testRangeSuite{}) + +type testRangeSuite struct { +} + +func (s *testRangeSuite) TestRange(c *C) { + simpleTests := []struct { + ran ranger.Range + str string + }{ + { + ran: ranger.Range{ + LowVal: []types.Datum{types.NewIntDatum(1)}, + HighVal: []types.Datum{types.NewIntDatum(1)}, + }, + str: "[1,1]", + }, + { + ran: ranger.Range{ + LowVal: []types.Datum{types.NewIntDatum(1)}, + HighVal: []types.Datum{types.NewIntDatum(1)}, + HighExclude: true, + }, + str: "[1,1)", + }, + { + ran: ranger.Range{ + LowVal: []types.Datum{types.NewIntDatum(1)}, + HighVal: []types.Datum{types.NewIntDatum(2)}, + LowExclude: true, + HighExclude: true, + }, + str: "(1,2)", + }, + { + ran: ranger.Range{ + LowVal: []types.Datum{types.NewFloat64Datum(1.1)}, + HighVal: []types.Datum{types.NewFloat64Datum(1.9)}, + HighExclude: true, + }, + str: "[1.1,1.9)", + }, + { + ran: ranger.Range{ + LowVal: []types.Datum{types.MinNotNullDatum()}, + HighVal: []types.Datum{types.NewIntDatum(1)}, + HighExclude: true, + }, + str: "[-inf,1)", + }, + } + for _, t := range simpleTests { + c.Assert(t.ran.String(), Equals, t.str) + } + + isPointTests := []struct { + ran ranger.Range + isPoint bool + }{ + { + ran: ranger.Range{ + LowVal: []types.Datum{types.NewIntDatum(1)}, + HighVal: []types.Datum{types.NewIntDatum(1)}, + }, + isPoint: true, + }, + { + ran: ranger.Range{ + LowVal: []types.Datum{types.NewStringDatum("abc")}, + HighVal: []types.Datum{types.NewStringDatum("abc")}, + }, + isPoint: true, + }, + { + ran: ranger.Range{ + LowVal: []types.Datum{types.NewIntDatum(1)}, + HighVal: []types.Datum{types.NewIntDatum(1), types.NewIntDatum(1)}, + }, + isPoint: false, + }, + { + ran: ranger.Range{ + LowVal: []types.Datum{types.NewIntDatum(1)}, + HighVal: []types.Datum{types.NewIntDatum(1)}, + LowExclude: true, + }, + isPoint: false, + }, + { + ran: ranger.Range{ + LowVal: []types.Datum{types.NewIntDatum(1)}, + HighVal: []types.Datum{types.NewIntDatum(1)}, + HighExclude: true, + }, + isPoint: false, + }, + { + ran: ranger.Range{ + LowVal: []types.Datum{types.NewIntDatum(1)}, + HighVal: []types.Datum{types.NewIntDatum(2)}, + }, + isPoint: false, + }, + } + sc := new(stmtctx.StatementContext) + for _, t := range isPointTests { + c.Assert(t.ran.IsPoint(sc), Equals, t.isPoint) + } +} diff --git a/util/rowDecoder/decoder.go b/util/rowDecoder/decoder.go new file mode 100644 index 0000000..299ed3d --- /dev/null +++ b/util/rowDecoder/decoder.go @@ -0,0 +1,79 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package decoder + +import ( + "time" + + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/rowcodec" +) + +// Column contains the info and generated expr of column. +type Column struct { + Col *table.Column +} + +// RowDecoder decodes a byte slice into datums and eval the generated column value. +type RowDecoder struct { + colTypes map[int64]*types.FieldType +} + +// NewRowDecoder returns a new RowDecoder. +func NewRowDecoder(tbl table.Table, decodeColMap map[int64]Column) *RowDecoder { + colFieldMap := make(map[int64]*types.FieldType, len(decodeColMap)) + for id, col := range decodeColMap { + colFieldMap[id] = &col.Col.ColumnInfo.FieldType + } + return &RowDecoder{ + colTypes: colFieldMap, + } +} + +// DecodeAndEvalRowWithMap decodes a byte slice into datums and evaluates the generated column value. +func (rd *RowDecoder) DecodeAndEvalRowWithMap(ctx sessionctx.Context, handle int64, b []byte, decodeLoc, sysLoc *time.Location, row map[int64]types.Datum) (map[int64]types.Datum, error) { + var err error + if !rowcodec.IsNewFormat(b) { + row, err = tablecodec.DecodeRowWithMap(b, rd.colTypes, decodeLoc, row) + } else { + row, err = tablecodec.DecodeRowWithMapNew(b, rd.colTypes, decodeLoc, row) + } + if err != nil { + return nil, err + } + return row, nil +} + +// BuildFullDecodeColMap build a map that contains [columnID -> struct{*table.Column, expression.Expression}] from +// indexed columns and all of its depending columns. +func BuildFullDecodeColMap(indexedCols []*table.Column) (map[int64]Column, error) { + pendingCols := make([]*table.Column, len(indexedCols)) + copy(pendingCols, indexedCols) + decodeColMap := make(map[int64]Column, len(pendingCols)) + + for i := 0; i < len(pendingCols); i++ { + col := pendingCols[i] + if _, ok := decodeColMap[col.ID]; ok { + continue // already discovered + } + + decodeColMap[col.ID] = Column{ + Col: col, + } + } + return decodeColMap, nil +} diff --git a/util/rowcodec/bench_test.go b/util/rowcodec/bench_test.go new file mode 100644 index 0000000..8f4c142 --- /dev/null +++ b/util/rowcodec/bench_test.go @@ -0,0 +1,75 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package rowcodec_test + +import ( + "testing" + "time" + + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/rowcodec" +) + +func BenchmarkEncode(b *testing.B) { + b.ReportAllocs() + oldRow := types.MakeDatums(1, "abc", 1.1) + var xb rowcodec.Encoder + var buf []byte + colIDs := []int64{1, 2, 3} + var err error + for i := 0; i < b.N; i++ { + buf, err = xb.Encode(nil, colIDs, oldRow, buf) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkDecode(b *testing.B) { + b.ReportAllocs() + oldRow := types.MakeDatums(1, "abc", 1.1) + colIDs := []int64{-1, 2, 3} + tps := []*types.FieldType{ + types.NewFieldType(mysql.TypeLonglong), + types.NewFieldType(mysql.TypeString), + types.NewFieldType(mysql.TypeDouble), + } + var xb rowcodec.Encoder + xRowData, err := xb.Encode(nil, colIDs, oldRow, nil) + if err != nil { + b.Fatal(err) + } + cols := make([]rowcodec.ColInfo, len(tps)) + for i, tp := range tps { + cols[i] = rowcodec.ColInfo{ + ID: colIDs[i], + Tp: int32(tp.Tp), + Flag: int32(tp.Flag), + Flen: tp.Flen, + Decimal: tp.Decimal, + Elems: tp.Elems, + } + } + decoder := rowcodec.NewChunkDecoder(cols, -1, nil, time.Local) + chk := chunk.NewChunkWithCapacity(tps, 1) + for i := 0; i < b.N; i++ { + chk.Reset() + err = decoder.DecodeToChunk(xRowData, 1, chk) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/util/rowcodec/common.go b/util/rowcodec/common.go new file mode 100644 index 0000000..ef1a20b --- /dev/null +++ b/util/rowcodec/common.go @@ -0,0 +1,230 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package rowcodec + +import ( + "encoding/binary" + "reflect" + "unsafe" + + "github.com/pingcap/errors" +) + +// CodecVer is the constant number that represent the new row format. +const CodecVer = 128 + +var errInvalidCodecVer = errors.New("invalid codec version") + +// First byte in the encoded value which specifies the encoding type. +const ( + NilFlag byte = 0 + BytesFlag byte = 1 + CompactBytesFlag byte = 2 + IntFlag byte = 3 + UintFlag byte = 4 + FloatFlag byte = 5 + DecimalFlag byte = 6 + VarintFlag byte = 8 + VaruintFlag byte = 9 + JSONFlag byte = 10 +) + +func bytesToU32Slice(b []byte) []uint32 { + if len(b) == 0 { + return nil + } + var u32s []uint32 + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&u32s)) + hdr.Len = len(b) / 4 + hdr.Cap = hdr.Len + hdr.Data = uintptr(unsafe.Pointer(&b[0])) + return u32s +} + +func bytes2U16Slice(b []byte) []uint16 { + if len(b) == 0 { + return nil + } + var u16s []uint16 + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&u16s)) + hdr.Len = len(b) / 2 + hdr.Cap = hdr.Len + hdr.Data = uintptr(unsafe.Pointer(&b[0])) + return u16s +} + +func u16SliceToBytes(u16s []uint16) []byte { + if len(u16s) == 0 { + return nil + } + var b []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + hdr.Len = len(u16s) * 2 + hdr.Cap = hdr.Len + hdr.Data = uintptr(unsafe.Pointer(&u16s[0])) + return b +} + +func u32SliceToBytes(u32s []uint32) []byte { + if len(u32s) == 0 { + return nil + } + var b []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + hdr.Len = len(u32s) * 4 + hdr.Cap = hdr.Len + hdr.Data = uintptr(unsafe.Pointer(&u32s[0])) + return b +} + +func encodeInt(buf []byte, iVal int64) []byte { + var tmp [8]byte + if int64(int8(iVal)) == iVal { + buf = append(buf, byte(iVal)) + } else if int64(int16(iVal)) == iVal { + binary.LittleEndian.PutUint16(tmp[:], uint16(iVal)) + buf = append(buf, tmp[:2]...) + } else if int64(int32(iVal)) == iVal { + binary.LittleEndian.PutUint32(tmp[:], uint32(iVal)) + buf = append(buf, tmp[:4]...) + } else { + binary.LittleEndian.PutUint64(tmp[:], uint64(iVal)) + buf = append(buf, tmp[:8]...) + } + return buf +} + +func decodeInt(val []byte) int64 { + switch len(val) { + case 1: + return int64(int8(val[0])) + case 2: + return int64(int16(binary.LittleEndian.Uint16(val))) + case 4: + return int64(int32(binary.LittleEndian.Uint32(val))) + default: + return int64(binary.LittleEndian.Uint64(val)) + } +} + +func encodeUint(buf []byte, uVal uint64) []byte { + var tmp [8]byte + if uint64(uint8(uVal)) == uVal { + buf = append(buf, byte(uVal)) + } else if uint64(uint16(uVal)) == uVal { + binary.LittleEndian.PutUint16(tmp[:], uint16(uVal)) + buf = append(buf, tmp[:2]...) + } else if uint64(uint32(uVal)) == uVal { + binary.LittleEndian.PutUint32(tmp[:], uint32(uVal)) + buf = append(buf, tmp[:4]...) + } else { + binary.LittleEndian.PutUint64(tmp[:], uint64(uVal)) + buf = append(buf, tmp[:8]...) + } + return buf +} + +func decodeUint(val []byte) uint64 { + switch len(val) { + case 1: + return uint64(val[0]) + case 2: + return uint64(binary.LittleEndian.Uint16(val)) + case 4: + return uint64(binary.LittleEndian.Uint32(val)) + default: + return binary.LittleEndian.Uint64(val) + } +} + +type largeNotNullSorter Encoder + +func (s *largeNotNullSorter) Less(i, j int) bool { + return s.colIDs32[i] < s.colIDs32[j] +} + +func (s *largeNotNullSorter) Len() int { + return int(s.numNotNullCols) +} + +func (s *largeNotNullSorter) Swap(i, j int) { + s.colIDs32[i], s.colIDs32[j] = s.colIDs32[j], s.colIDs32[i] + s.values[i], s.values[j] = s.values[j], s.values[i] +} + +type smallNotNullSorter Encoder + +func (s *smallNotNullSorter) Less(i, j int) bool { + return s.colIDs[i] < s.colIDs[j] +} + +func (s *smallNotNullSorter) Len() int { + return int(s.numNotNullCols) +} + +func (s *smallNotNullSorter) Swap(i, j int) { + s.colIDs[i], s.colIDs[j] = s.colIDs[j], s.colIDs[i] + s.values[i], s.values[j] = s.values[j], s.values[i] +} + +type smallNullSorter Encoder + +func (s *smallNullSorter) Less(i, j int) bool { + nullCols := s.colIDs[s.numNotNullCols:] + return nullCols[i] < nullCols[j] +} + +func (s *smallNullSorter) Len() int { + return int(s.numNullCols) +} + +func (s *smallNullSorter) Swap(i, j int) { + nullCols := s.colIDs[s.numNotNullCols:] + nullCols[i], nullCols[j] = nullCols[j], nullCols[i] +} + +type largeNullSorter Encoder + +func (s *largeNullSorter) Less(i, j int) bool { + nullCols := s.colIDs32[s.numNotNullCols:] + return nullCols[i] < nullCols[j] +} + +func (s *largeNullSorter) Len() int { + return int(s.numNullCols) +} + +func (s *largeNullSorter) Swap(i, j int) { + nullCols := s.colIDs32[s.numNotNullCols:] + nullCols[i], nullCols[j] = nullCols[j], nullCols[i] +} + +const ( + // Length of rowkey. + rowKeyLen = 19 + // Index of record flag 'r' in rowkey used by master tidb-server. + // The rowkey format is t{8 bytes id}_r{8 bytes handle} + recordPrefixIdx = 10 +) + +// IsRowKey determine whether key is row key. +// this method will be used in unistore. +func IsRowKey(key []byte) bool { + return len(key) == rowKeyLen && key[0] == 't' && key[recordPrefixIdx] == 'r' +} + +// IsNewFormat checks whether row data is in new-format. +func IsNewFormat(rowData []byte) bool { + return rowData[0] == CodecVer +} diff --git a/util/rowcodec/decoder.go b/util/rowcodec/decoder.go new file mode 100644 index 0000000..9b24e54 --- /dev/null +++ b/util/rowcodec/decoder.go @@ -0,0 +1,355 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package rowcodec + +import ( + "fmt" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/codec" +) + +// decoder contains base util for decode row. +type decoder struct { + row + columns []ColInfo + handleColID int64 + loc *time.Location +} + +// NewDecoder creates a decoder. +func NewDecoder(columns []ColInfo, handleColID int64, loc *time.Location) *decoder { + return &decoder{ + columns: columns, + handleColID: handleColID, + loc: loc, + } +} + +// ColInfo is used as column meta info for row decoder. +type ColInfo struct { + ID int64 + Tp int32 + Flag int32 + IsPKHandle bool + + Flen int + Decimal int + Elems []string +} + +// DatumMapDecoder decodes the row to datum map. +type DatumMapDecoder struct { + decoder +} + +// NewDatumMapDecoder creates a DatumMapDecoder. +func NewDatumMapDecoder(columns []ColInfo, handleColID int64, loc *time.Location) *DatumMapDecoder { + return &DatumMapDecoder{decoder{ + columns: columns, + handleColID: handleColID, + loc: loc, + }} +} + +// DecodeToDatumMap decodes byte slices to datum map. +func (decoder *DatumMapDecoder) DecodeToDatumMap(rowData []byte, handle int64, row map[int64]types.Datum) (map[int64]types.Datum, error) { + if row == nil { + row = make(map[int64]types.Datum, len(decoder.columns)) + } + err := decoder.fromBytes(rowData) + if err != nil { + return nil, err + } + for _, col := range decoder.columns { + if col.ID == decoder.handleColID { + row[col.ID] = types.NewIntDatum(handle) + continue + } + idx, isNil, notFound := decoder.row.findColID(col.ID) + if !notFound && !isNil { + colData := decoder.getData(idx) + d, err := decoder.decodeColDatum(&col, colData) + if err != nil { + return nil, err + } + row[col.ID] = d + continue + } + + if isNil { + var d types.Datum + d.SetNull() + row[col.ID] = d + continue + } + } + return row, nil +} + +func (decoder *DatumMapDecoder) decodeColDatum(col *ColInfo, colData []byte) (types.Datum, error) { + var d types.Datum + switch byte(col.Tp) { + case mysql.TypeLonglong, mysql.TypeLong, mysql.TypeInt24, mysql.TypeShort, mysql.TypeTiny, mysql.TypeYear: + if mysql.HasUnsignedFlag(uint(col.Flag)) { + d.SetUint64(decodeUint(colData)) + } else { + d.SetInt64(decodeInt(colData)) + } + case mysql.TypeFloat: + _, fVal, err := codec.DecodeFloat(colData) + if err != nil { + return d, err + } + d.SetFloat32(float32(fVal)) + case mysql.TypeDouble: + _, fVal, err := codec.DecodeFloat(colData) + if err != nil { + return d, err + } + d.SetFloat64(fVal) + case mysql.TypeVarString, mysql.TypeVarchar, mysql.TypeString, + mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob: + d.SetBytes(colData) + case mysql.TypeBit: + byteSize := (col.Flen + 7) >> 3 + d.SetMysqlBit(types.NewBinaryLiteralFromUint(decodeUint(colData), byteSize)) + default: + return d, errors.Errorf("unknown type %d", col.Tp) + } + return d, nil +} + +// ChunkDecoder decodes the row to chunk.Chunk. +type ChunkDecoder struct { + decoder + defDatum func(i int) (types.Datum, error) +} + +// NewChunkDecoder creates a NewChunkDecoder. +func NewChunkDecoder(columns []ColInfo, handleColID int64, defDatum func(i int) (types.Datum, error), loc *time.Location) *ChunkDecoder { + return &ChunkDecoder{ + decoder: decoder{ + columns: columns, + handleColID: handleColID, + loc: loc, + }, + defDatum: defDatum, + } +} + +// DecodeToChunk decodes a row to chunk. +func (decoder *ChunkDecoder) DecodeToChunk(rowData []byte, handle int64, chk *chunk.Chunk) error { + err := decoder.fromBytes(rowData) + if err != nil { + return err + } + + for colIdx, col := range decoder.columns { + if col.ID == decoder.handleColID { + chk.AppendInt64(colIdx, handle) + continue + } + + idx, isNil, notFound := decoder.row.findColID(col.ID) + if !notFound && !isNil { + colData := decoder.getData(idx) + err := decoder.decodeColToChunk(colIdx, &col, colData, chk) + if err != nil { + return err + } + continue + } + + if isNil { + chk.AppendNull(colIdx) + continue + } + + if decoder.defDatum == nil { + chk.AppendNull(colIdx) + continue + } + + defDatum, err := decoder.defDatum(colIdx) + if err != nil { + return err + } + + chk.AppendDatum(colIdx, &defDatum) + } + return nil +} + +func (decoder *ChunkDecoder) decodeColToChunk(colIdx int, col *ColInfo, colData []byte, chk *chunk.Chunk) error { + switch byte(col.Tp) { + case mysql.TypeLonglong, mysql.TypeLong, mysql.TypeInt24, mysql.TypeShort, mysql.TypeTiny, mysql.TypeYear: + if mysql.HasUnsignedFlag(uint(col.Flag)) { + chk.AppendUint64(colIdx, decodeUint(colData)) + } else { + chk.AppendInt64(colIdx, decodeInt(colData)) + } + case mysql.TypeFloat: + _, fVal, err := codec.DecodeFloat(colData) + if err != nil { + return err + } + chk.AppendFloat32(colIdx, float32(fVal)) + case mysql.TypeDouble: + _, fVal, err := codec.DecodeFloat(colData) + if err != nil { + return err + } + chk.AppendFloat64(colIdx, fVal) + case mysql.TypeVarString, mysql.TypeVarchar, mysql.TypeString, + mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob: + chk.AppendBytes(colIdx, colData) + case mysql.TypeBit: + byteSize := (col.Flen + 7) >> 3 + chk.AppendBytes(colIdx, types.NewBinaryLiteralFromUint(decodeUint(colData), byteSize)) + default: + return errors.Errorf("unknown type %d", col.Tp) + } + return nil +} + +// BytesDecoder decodes the row to old datums bytes. +type BytesDecoder struct { + decoder + defBytes func(i int) ([]byte, error) +} + +// NewByteDecoder creates a BytesDecoder. +// defBytes: provided default value bytes in old datum format(flag+colData). +func NewByteDecoder(columns []ColInfo, handleColID int64, defBytes func(i int) ([]byte, error), loc *time.Location) *BytesDecoder { + return &BytesDecoder{ + decoder: decoder{ + columns: columns, + handleColID: handleColID, + loc: loc, + }, + defBytes: defBytes, + } +} + +// DecodeToBytes decodes raw byte slice to row data. +func (decoder *BytesDecoder) DecodeToBytes(outputOffset map[int64]int, handle int64, value []byte, cacheBytes []byte) ([][]byte, error) { + var r row + err := r.fromBytes(value) + if err != nil { + return nil, err + } + values := make([][]byte, len(outputOffset)) + for i, col := range decoder.columns { + tp := fieldType2Flag(byte(col.Tp), uint(col.Flag)&mysql.UnsignedFlag == 0) + colID := col.ID + offset := outputOffset[colID] + if col.IsPKHandle || colID == model.ExtraHandleID { + handleData := cacheBytes + if mysql.HasUnsignedFlag(uint(col.Flag)) { + handleData = append(handleData, UintFlag) + handleData = codec.EncodeUint(handleData, uint64(handle)) + } else { + handleData = append(handleData, IntFlag) + handleData = codec.EncodeInt(handleData, handle) + } + values[offset] = handleData + continue + } + + idx, isNil, notFound := r.findColID(colID) + if !notFound && !isNil { + val := r.getData(idx) + values[offset] = decoder.encodeOldDatum(tp, val) + continue + } + + if isNil { + values[offset] = []byte{NilFlag} + continue + } + + if decoder.defBytes != nil { + defVal, err := decoder.defBytes(i) + if err != nil { + return nil, err + } + if len(defVal) > 0 { + values[offset] = defVal + continue + } + } + + values[offset] = []byte{NilFlag} + } + return values, nil +} + +func (decoder *BytesDecoder) encodeOldDatum(tp byte, val []byte) []byte { + var buf []byte + switch tp { + case BytesFlag: + buf = append(buf, CompactBytesFlag) + buf = codec.EncodeCompactBytes(buf, val) + case IntFlag: + buf = append(buf, VarintFlag) + buf = codec.EncodeVarint(buf, decodeInt(val)) + case UintFlag: + buf = append(buf, VaruintFlag) + buf = codec.EncodeUvarint(buf, decodeUint(val)) + default: + buf = append(buf, tp) + buf = append(buf, val...) + } + return buf +} + +// fieldType2Flag transforms field type into kv type flag. +func fieldType2Flag(tp byte, signed bool) (flag byte) { + switch tp { + case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong: + if signed { + flag = IntFlag + } else { + flag = UintFlag + } + case mysql.TypeFloat, mysql.TypeDouble: + flag = FloatFlag + case mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob, + mysql.TypeString, mysql.TypeVarchar, mysql.TypeVarString: + flag = BytesFlag + case mysql.TypeDatetime, mysql.TypeDate, mysql.TypeTimestamp: + flag = UintFlag + case mysql.TypeDuration: + flag = IntFlag + case mysql.TypeNewDecimal: + flag = DecimalFlag + case mysql.TypeYear: + flag = IntFlag + case mysql.TypeEnum, mysql.TypeBit, mysql.TypeSet: + flag = UintFlag + case mysql.TypeJSON: + flag = JSONFlag + case mysql.TypeNull: + flag = NilFlag + default: + panic(fmt.Sprintf("unknown field type %d", tp)) + } + return +} diff --git a/util/rowcodec/encoder.go b/util/rowcodec/encoder.go new file mode 100644 index 0000000..45789dd --- /dev/null +++ b/util/rowcodec/encoder.go @@ -0,0 +1,194 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package rowcodec + +import ( + "math" + "sort" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/codec" +) + +// Encoder is used to encode a row. +type Encoder struct { + row + tempColIDs []int64 + values []types.Datum +} + +// Encode encodes a row from a datums slice. +func (encoder *Encoder) Encode(sc *stmtctx.StatementContext, colIDs []int64, values []types.Datum, buf []byte) ([]byte, error) { + encoder.reset() + encoder.appendColVals(colIDs, values) + numCols, notNullIdx := encoder.reformatCols() + err := encoder.encodeRowCols(sc, numCols, notNullIdx) + if err != nil { + return nil, err + } + return encoder.row.toBytes(buf[:0]), nil +} + +func (encoder *Encoder) reset() { + encoder.large = false + encoder.numNotNullCols = 0 + encoder.numNullCols = 0 + encoder.data = encoder.data[:0] + encoder.tempColIDs = encoder.tempColIDs[:0] + encoder.values = encoder.values[:0] +} + +func (encoder *Encoder) appendColVals(colIDs []int64, values []types.Datum) { + for i, colID := range colIDs { + encoder.appendColVal(colID, values[i]) + } +} + +func (encoder *Encoder) appendColVal(colID int64, d types.Datum) { + if colID > 255 { + encoder.large = true + } + if d.IsNull() { + encoder.numNullCols++ + } else { + encoder.numNotNullCols++ + } + encoder.tempColIDs = append(encoder.tempColIDs, colID) + encoder.values = append(encoder.values, d) +} + +func (encoder *Encoder) reformatCols() (numCols, notNullIdx int) { + r := &encoder.row + numCols = len(encoder.tempColIDs) + nullIdx := numCols - int(r.numNullCols) + notNullIdx = 0 + if r.large { + r.initColIDs32() + r.initOffsets32() + } else { + r.initColIDs() + r.initOffsets() + } + for i, colID := range encoder.tempColIDs { + if encoder.values[i].IsNull() { + if r.large { + r.colIDs32[nullIdx] = uint32(colID) + } else { + r.colIDs[nullIdx] = byte(colID) + } + nullIdx++ + } else { + if r.large { + r.colIDs32[notNullIdx] = uint32(colID) + } else { + r.colIDs[notNullIdx] = byte(colID) + } + encoder.values[notNullIdx] = encoder.values[i] + notNullIdx++ + } + } + if r.large { + largeNotNullSorter := (*largeNotNullSorter)(encoder) + sort.Sort(largeNotNullSorter) + if r.numNullCols > 0 { + largeNullSorter := (*largeNullSorter)(encoder) + sort.Sort(largeNullSorter) + } + } else { + smallNotNullSorter := (*smallNotNullSorter)(encoder) + sort.Sort(smallNotNullSorter) + if r.numNullCols > 0 { + smallNullSorter := (*smallNullSorter)(encoder) + sort.Sort(smallNullSorter) + } + } + return +} + +func (encoder *Encoder) encodeRowCols(sc *stmtctx.StatementContext, numCols, notNullIdx int) error { + r := &encoder.row + for i := 0; i < notNullIdx; i++ { + d := encoder.values[i] + var err error + r.data, err = EncodeValueDatum(sc, d, r.data) + if err != nil { + return err + } + // handle convert to large + if len(r.data) > math.MaxUint16 && !r.large { + r.initColIDs32() + for j := 0; j < numCols; j++ { + r.colIDs32[j] = uint32(r.colIDs[j]) + } + r.initOffsets32() + for j := 0; j <= i; j++ { + r.offsets32[j] = uint32(r.offsets[j]) + } + r.large = true + } + if r.large { + r.offsets32[i] = uint32(len(r.data)) + } else { + r.offsets[i] = uint16(len(r.data)) + } + } + // handle convert to large + if !r.large { + if len(r.data) >= math.MaxUint16 { + r.large = true + r.initColIDs32() + for i, val := range r.colIDs { + r.colIDs32[i] = uint32(val) + } + } else { + r.initOffsets() + for i, val := range r.offsets32 { + r.offsets[i] = uint16(val) + } + } + } + return nil +} + +// EncodeValueDatum encodes one row datum entry into bytes. +// due to encode as value, this method will flatten value type like tablecodec.flatten +func EncodeValueDatum(sc *stmtctx.StatementContext, d types.Datum, buffer []byte) (nBuffer []byte, err error) { + switch d.Kind() { + case types.KindInt64: + buffer = encodeInt(buffer, d.GetInt64()) + case types.KindUint64: + buffer = encodeUint(buffer, d.GetUint64()) + case types.KindString, types.KindBytes: + buffer = append(buffer, d.GetBytes()...) + case types.KindBinaryLiteral, types.KindMysqlBit: + // We don't need to handle errors here since the literal is ensured to be able to store in uint64 in convertToMysqlBit. + var val uint64 + val, err = d.GetBinaryLiteral().ToInt(sc) + if err != nil { + return + } + buffer = encodeUint(buffer, val) + case types.KindFloat32, types.KindFloat64: + buffer = codec.EncodeFloat(buffer, d.GetFloat64()) + case types.KindNull: + case types.KindMinNotNull: + case types.KindMaxValue: + default: + err = errors.Errorf("unsupport encode type %d", d.Kind()) + } + nBuffer = buffer + return +} diff --git a/util/rowcodec/export_test.go b/util/rowcodec/export_test.go new file mode 100644 index 0000000..323113c --- /dev/null +++ b/util/rowcodec/export_test.go @@ -0,0 +1,49 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package rowcodec + +import ( + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/codec" +) + +// EncodeFromOldRow encodes a row from an old-format row. +// this method will be used in test. +func EncodeFromOldRow(encoder *Encoder, sc *stmtctx.StatementContext, oldRow, buf []byte) ([]byte, error) { + if len(oldRow) > 0 && oldRow[0] == CodecVer { + return oldRow, nil + } + encoder.reset() + for len(oldRow) > 1 { + var d types.Datum + var err error + oldRow, d, err = codec.DecodeOne(oldRow) + if err != nil { + return nil, err + } + colID := d.GetInt64() + oldRow, d, err = codec.DecodeOne(oldRow) + if err != nil { + return nil, err + } + encoder.appendColVal(colID, d) + } + numCols, notNullIdx := encoder.reformatCols() + err := encoder.encodeRowCols(sc, numCols, notNullIdx) + if err != nil { + return nil, err + } + return encoder.row.toBytes(buf[:0]), nil +} diff --git a/util/rowcodec/row.go b/util/rowcodec/row.go new file mode 100644 index 0000000..8eb07df --- /dev/null +++ b/util/rowcodec/row.go @@ -0,0 +1,192 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package rowcodec + +import ( + "encoding/binary" +) + +// row is the struct type used to access the a row. +type row struct { + // small: colID []byte, offsets []uint16, optimized for most cases. + // large: colID []uint32, offsets []uint32. + large bool + numNotNullCols uint16 + numNullCols uint16 + colIDs []byte + + offsets []uint16 + data []byte + + // for large row + colIDs32 []uint32 + offsets32 []uint32 +} + +func (r *row) getData(i int) []byte { + var start, end uint32 + if r.large { + if i > 0 { + start = r.offsets32[i-1] + } + end = r.offsets32[i] + } else { + if i > 0 { + start = uint32(r.offsets[i-1]) + } + end = uint32(r.offsets[i]) + } + return r.data[start:end] +} + +func (r *row) fromBytes(rowData []byte) error { + if rowData[0] != CodecVer { + return errInvalidCodecVer + } + r.large = rowData[1]&1 > 0 + r.numNotNullCols = binary.LittleEndian.Uint16(rowData[2:]) + r.numNullCols = binary.LittleEndian.Uint16(rowData[4:]) + cursor := 6 + if r.large { + colIDsLen := int(r.numNotNullCols+r.numNullCols) * 4 + r.colIDs32 = bytesToU32Slice(rowData[cursor : cursor+colIDsLen]) + cursor += colIDsLen + offsetsLen := int(r.numNotNullCols) * 4 + r.offsets32 = bytesToU32Slice(rowData[cursor : cursor+offsetsLen]) + cursor += offsetsLen + } else { + colIDsLen := int(r.numNotNullCols + r.numNullCols) + r.colIDs = rowData[cursor : cursor+colIDsLen] + cursor += colIDsLen + offsetsLen := int(r.numNotNullCols) * 2 + r.offsets = bytes2U16Slice(rowData[cursor : cursor+offsetsLen]) + cursor += offsetsLen + } + r.data = rowData[cursor:] + return nil +} + +func (r *row) toBytes(buf []byte) []byte { + buf = append(buf, CodecVer) + flag := byte(0) + if r.large { + flag = 1 + } + buf = append(buf, flag) + buf = append(buf, byte(r.numNotNullCols), byte(r.numNotNullCols>>8)) + buf = append(buf, byte(r.numNullCols), byte(r.numNullCols>>8)) + if r.large { + buf = append(buf, u32SliceToBytes(r.colIDs32)...) + buf = append(buf, u32SliceToBytes(r.offsets32)...) + } else { + buf = append(buf, r.colIDs...) + buf = append(buf, u16SliceToBytes(r.offsets)...) + } + buf = append(buf, r.data...) + return buf +} + +func (r *row) findColID(colID int64) (idx int, isNil, notFound bool) { + // Search the column in not-null columns array. + i, j := 0, int(r.numNotNullCols) + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + var v int64 + if r.large { + v = int64(r.colIDs32[h]) + } else { + v = int64(r.colIDs[h]) + } + if v < colID { + i = h + 1 + } else if v > colID { + j = h + } else { + idx = h + return + } + } + + // Search the column in null columns array. + i, j = int(r.numNotNullCols), int(r.numNotNullCols+r.numNullCols) + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + var v int64 + if r.large { + v = int64(r.colIDs32[h]) + } else { + v = int64(r.colIDs[h]) + } + if v < colID { + i = h + 1 + } else if v > colID { + j = h + } else { + isNil = true + return + } + } + notFound = true + return +} + +// ColumnIsNull returns if the column value is null. Mainly used for count column aggregation. +// this method will used in unistore. +func (r *row) ColumnIsNull(rowData []byte, colID int64, defaultVal []byte) (bool, error) { + err := r.fromBytes(rowData) + if err != nil { + return false, err + } + _, isNil, notFound := r.findColID(colID) + if notFound { + return defaultVal == nil, nil + } + return isNil, nil +} + +func (r *row) initColIDs() { + numCols := int(r.numNotNullCols + r.numNullCols) + if cap(r.colIDs) >= numCols { + r.colIDs = r.colIDs[:numCols] + } else { + r.colIDs = make([]byte, numCols) + } +} + +func (r *row) initColIDs32() { + numCols := int(r.numNotNullCols + r.numNullCols) + if cap(r.colIDs32) >= numCols { + r.colIDs32 = r.colIDs32[:numCols] + } else { + r.colIDs32 = make([]uint32, numCols) + } +} + +func (r *row) initOffsets() { + if cap(r.offsets) >= int(r.numNotNullCols) { + r.offsets = r.offsets[:r.numNotNullCols] + } else { + r.offsets = make([]uint16, r.numNotNullCols) + } +} + +func (r *row) initOffsets32() { + if cap(r.offsets32) >= int(r.numNotNullCols) { + r.offsets32 = r.offsets32[:r.numNotNullCols] + } else { + r.offsets32 = make([]uint32, r.numNotNullCols) + } +} diff --git a/util/rowcodec/rowcodec_test.go b/util/rowcodec/rowcodec_test.go new file mode 100644 index 0000000..0116679 --- /dev/null +++ b/util/rowcodec/rowcodec_test.go @@ -0,0 +1,573 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package rowcodec_test + +import ( + "math" + "strings" + "testing" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/codec" + "github.com/pingcap/tidb/util/rowcodec" +) + +func TestT(t *testing.T) { + TestingT(t) +} + +var _ = Suite(&testSuite{}) + +type testSuite struct{} + +type testData struct { + id int64 + ft *types.FieldType + dt types.Datum + bt types.Datum + def *types.Datum + handle bool +} + +func (s *testSuite) TestDecodeRowWithHandle(c *C) { + handleID := int64(-1) + handleValue := int64(10000) + + encodeAndDecodeHandle := func(c *C, testData []testData) { + // transform test data into input. + colIDs := make([]int64, 0, len(testData)) + dts := make([]types.Datum, 0, len(testData)) + fts := make([]*types.FieldType, 0, len(testData)) + cols := make([]rowcodec.ColInfo, 0, len(testData)) + for i := range testData { + t := testData[i] + if !t.handle { + colIDs = append(colIDs, t.id) + dts = append(dts, t.dt) + } + fts = append(fts, t.ft) + cols = append(cols, rowcodec.ColInfo{ + ID: t.id, + Tp: int32(t.ft.Tp), + Flag: int32(t.ft.Flag), + IsPKHandle: t.handle, + Flen: t.ft.Flen, + Decimal: t.ft.Decimal, + Elems: t.ft.Elems, + }) + } + + // test encode input. + var encoder rowcodec.Encoder + sc := new(stmtctx.StatementContext) + sc.TimeZone = time.UTC + newRow, err := encoder.Encode(sc, colIDs, dts, nil) + c.Assert(err, IsNil) + + // decode to datum map. + mDecoder := rowcodec.NewDatumMapDecoder(cols, -1, sc.TimeZone) + dm, err := mDecoder.DecodeToDatumMap(newRow, handleValue, nil) + c.Assert(err, IsNil) + for _, t := range testData { + d, exists := dm[t.id] + c.Assert(exists, IsTrue) + c.Assert(d, DeepEquals, t.dt) + } + + // decode to chunk. + cDecoder := rowcodec.NewChunkDecoder(cols, -1, nil, sc.TimeZone) + chk := chunk.New(fts, 1, 1) + err = cDecoder.DecodeToChunk(newRow, handleValue, chk) + c.Assert(err, IsNil) + chkRow := chk.GetRow(0) + cdt := chkRow.GetDatumRow(fts) + for i, t := range testData { + d := cdt[i] + c.Assert(d, DeepEquals, t.bt) + } + + // decode to old row bytes. + colOffset := make(map[int64]int) + for i, t := range testData { + colOffset[t.id] = i + } + bDecoder := rowcodec.NewByteDecoder(cols, -1, nil, nil) + oldRow, err := bDecoder.DecodeToBytes(colOffset, handleValue, newRow, nil) + c.Assert(err, IsNil) + for i, t := range testData { + remain, d, err := codec.DecodeOne(oldRow[i]) + c.Assert(err, IsNil) + c.Assert(len(remain), Equals, 0) + c.Assert(d, DeepEquals, t.bt) + } + } + + // encode & decode signed int. + testDataSigned := []testData{ + { + handleID, + types.NewFieldType(mysql.TypeLonglong), + types.NewIntDatum(handleValue), + types.NewIntDatum(handleValue), + nil, + true, + }, + { + 10, + types.NewFieldType(mysql.TypeLonglong), + types.NewIntDatum(1), + types.NewIntDatum(1), + nil, + false, + }, + } + encodeAndDecodeHandle(c, testDataSigned) + + // encode & decode unsigned int. + testDataUnsigned := []testData{ + { + handleID, + withUnsigned(types.NewFieldType(mysql.TypeLonglong)), + types.NewIntDatum(handleValue), // decode as chunk & map, always encode it as int + types.NewUintDatum(uint64(handleValue)), // decode as bytes will uint if unsigned. + nil, + true, + }, + { + 10, + types.NewFieldType(mysql.TypeLonglong), + types.NewIntDatum(1), + types.NewIntDatum(1), + nil, + false, + }, + } + encodeAndDecodeHandle(c, testDataUnsigned) +} + +func (s *testSuite) TestTypesNewRowCodec(c *C) { + encodeAndDecode := func(c *C, testData []testData) { + // transform test data into input. + colIDs := make([]int64, 0, len(testData)) + dts := make([]types.Datum, 0, len(testData)) + fts := make([]*types.FieldType, 0, len(testData)) + cols := make([]rowcodec.ColInfo, 0, len(testData)) + for i := range testData { + t := testData[i] + colIDs = append(colIDs, t.id) + dts = append(dts, t.dt) + fts = append(fts, t.ft) + cols = append(cols, rowcodec.ColInfo{ + ID: t.id, + Tp: int32(t.ft.Tp), + Flag: int32(t.ft.Flag), + IsPKHandle: t.handle, + Flen: t.ft.Flen, + Decimal: t.ft.Decimal, + Elems: t.ft.Elems, + }) + } + + // test encode input. + var encoder rowcodec.Encoder + sc := new(stmtctx.StatementContext) + sc.TimeZone = time.UTC + newRow, err := encoder.Encode(sc, colIDs, dts, nil) + c.Assert(err, IsNil) + + // decode to datum map. + mDecoder := rowcodec.NewDatumMapDecoder(cols, -1, sc.TimeZone) + dm, err := mDecoder.DecodeToDatumMap(newRow, -1, nil) + c.Assert(err, IsNil) + for _, t := range testData { + d, exists := dm[t.id] + c.Assert(exists, IsTrue) + c.Assert(d, DeepEquals, t.dt) + } + + // decode to chunk. + cDecoder := rowcodec.NewChunkDecoder(cols, -1, nil, sc.TimeZone) + chk := chunk.New(fts, 1, 1) + err = cDecoder.DecodeToChunk(newRow, -1, chk) + c.Assert(err, IsNil) + chkRow := chk.GetRow(0) + cdt := chkRow.GetDatumRow(fts) + for i, t := range testData { + d := cdt[i] + c.Assert(d, DeepEquals, t.dt) + } + + // decode to old row bytes. + colOffset := make(map[int64]int) + for i, t := range testData { + colOffset[t.id] = i + } + bDecoder := rowcodec.NewByteDecoder(cols, -1, nil, nil) + oldRow, err := bDecoder.DecodeToBytes(colOffset, -1, newRow, nil) + c.Assert(err, IsNil) + for i, t := range testData { + remain, d, err := codec.DecodeOne(oldRow[i]) + c.Assert(err, IsNil) + c.Assert(len(remain), Equals, 0) + c.Assert(d, DeepEquals, t.bt) + } + } + + testData := []testData{ + { + 1, + types.NewFieldType(mysql.TypeLonglong), + types.NewIntDatum(1), + types.NewIntDatum(1), + nil, + false, + }, + { + 22, + withUnsigned(types.NewFieldType(mysql.TypeShort)), + types.NewUintDatum(1), + types.NewUintDatum(1), + nil, + false, + }, + { + 3, + types.NewFieldType(mysql.TypeDouble), + types.NewFloat64Datum(2), + types.NewFloat64Datum(2), + nil, + false, + }, + { + 24, + types.NewFieldType(mysql.TypeString), + types.NewBytesDatum([]byte("abc")), + types.NewBytesDatum([]byte("abc")), + nil, + false, + }, + { + 12, + types.NewFieldType(mysql.TypeYear), + types.NewIntDatum(1999), + types.NewIntDatum(1999), + nil, + false, + }, + { + 11, + types.NewFieldType(mysql.TypeNull), + types.NewDatum(nil), + types.NewDatum(nil), + nil, + false, + }, + { + 2, + types.NewFieldType(mysql.TypeNull), + types.NewDatum(nil), + types.NewDatum(nil), + nil, + false, + }, + { + 100, + types.NewFieldType(mysql.TypeNull), + types.NewDatum(nil), + types.NewDatum(nil), + nil, + false, + }, + { + 116, + types.NewFieldType(mysql.TypeFloat), + types.NewFloat32Datum(6), + types.NewFloat64Datum(6), + nil, + false, + }, + { + 119, + types.NewFieldType(mysql.TypeVarString), + types.NewBytesDatum([]byte("")), + types.NewBytesDatum([]byte("")), + nil, + false, + }, + } + + // test small + encodeAndDecode(c, testData) + + // test large colID + testData[0].id = 300 + encodeAndDecode(c, testData) + testData[0].id = 1 + + // test large data + testData[3].dt = types.NewBytesDatum([]byte(strings.Repeat("a", math.MaxUint16+1))) + testData[3].bt = types.NewBytesDatum([]byte(strings.Repeat("a", math.MaxUint16+1))) + encodeAndDecode(c, testData) +} + +func (s *testSuite) TestNilAndDefault(c *C) { + encodeAndDecode := func(c *C, testData []testData) { + // transform test data into input. + colIDs := make([]int64, 0, len(testData)) + dts := make([]types.Datum, 0, len(testData)) + cols := make([]rowcodec.ColInfo, 0, len(testData)) + fts := make([]*types.FieldType, 0, len(testData)) + for i := range testData { + t := testData[i] + if t.def == nil { + colIDs = append(colIDs, t.id) + dts = append(dts, t.dt) + } + fts = append(fts, t.ft) + cols = append(cols, rowcodec.ColInfo{ + ID: t.id, + Tp: int32(t.ft.Tp), + Flag: int32(t.ft.Flag), + IsPKHandle: t.handle, + Flen: t.ft.Flen, + Decimal: t.ft.Decimal, + Elems: t.ft.Elems, + }) + } + ddf := func(i int) (types.Datum, error) { + t := testData[i] + if t.def == nil { + var d types.Datum + d.SetNull() + return d, nil + } + return *t.def, nil + } + bdf := func(i int) ([]byte, error) { + t := testData[i] + if t.def == nil { + return nil, nil + } + return getOldDatumByte(*t.def), nil + } + // test encode input. + var encoder rowcodec.Encoder + sc := new(stmtctx.StatementContext) + sc.TimeZone = time.UTC + newRow, err := encoder.Encode(sc, colIDs, dts, nil) + c.Assert(err, IsNil) + + // decode to datum map. + mDecoder := rowcodec.NewDatumMapDecoder(cols, -1, sc.TimeZone) + dm, err := mDecoder.DecodeToDatumMap(newRow, -1, nil) + c.Assert(err, IsNil) + for _, t := range testData { + d, exists := dm[t.id] + if t.def != nil { + // for datum should not fill default value. + c.Assert(exists, IsFalse) + } else { + c.Assert(exists, IsTrue) + c.Assert(d, DeepEquals, t.bt) + } + } + + //decode to chunk. + chk := chunk.New(fts, 1, 1) + cDecoder := rowcodec.NewChunkDecoder(cols, -1, ddf, sc.TimeZone) + err = cDecoder.DecodeToChunk(newRow, -1, chk) + c.Assert(err, IsNil) + chkRow := chk.GetRow(0) + cdt := chkRow.GetDatumRow(fts) + for i, t := range testData { + d := cdt[i] + c.Assert(d, DeepEquals, t.bt) + } + + // decode to old row bytes. + colOffset := make(map[int64]int) + for i, t := range testData { + colOffset[t.id] = i + } + bDecoder := rowcodec.NewByteDecoder(cols, -1, bdf, sc.TimeZone) + oldRow, err := bDecoder.DecodeToBytes(colOffset, -1, newRow, nil) + c.Assert(err, IsNil) + for i, t := range testData { + remain, d, err := codec.DecodeOne(oldRow[i]) + c.Assert(err, IsNil) + c.Assert(len(remain), Equals, 0) + c.Assert(d, DeepEquals, t.bt) + } + } + dtNilData := []testData{ + { + 1, + types.NewFieldType(mysql.TypeLonglong), + types.NewIntDatum(1), + types.NewIntDatum(1), + nil, + false, + }, + { + 2, + withUnsigned(types.NewFieldType(mysql.TypeLonglong)), + types.NewUintDatum(1), + types.NewUintDatum(9), + getDatumPoint(types.NewUintDatum(9)), + false, + }, + } + encodeAndDecode(c, dtNilData) +} + +func (s *testSuite) TestVarintCompatibility(c *C) { + encodeAndDecodeByte := func(c *C, testData []testData) { + // transform test data into input. + colIDs := make([]int64, 0, len(testData)) + dts := make([]types.Datum, 0, len(testData)) + fts := make([]*types.FieldType, 0, len(testData)) + cols := make([]rowcodec.ColInfo, 0, len(testData)) + for i := range testData { + t := testData[i] + colIDs = append(colIDs, t.id) + dts = append(dts, t.dt) + fts = append(fts, t.ft) + cols = append(cols, rowcodec.ColInfo{ + ID: t.id, + Tp: int32(t.ft.Tp), + Flag: int32(t.ft.Flag), + IsPKHandle: t.handle, + Flen: t.ft.Flen, + Decimal: t.ft.Decimal, + Elems: t.ft.Elems, + }) + } + + // test encode input. + var encoder rowcodec.Encoder + sc := new(stmtctx.StatementContext) + sc.TimeZone = time.UTC + newRow, err := encoder.Encode(sc, colIDs, dts, nil) + c.Assert(err, IsNil) + decoder := rowcodec.NewByteDecoder(cols, -1, nil, sc.TimeZone) + // decode to old row bytes. + colOffset := make(map[int64]int) + for i, t := range testData { + colOffset[t.id] = i + } + oldRow, err := decoder.DecodeToBytes(colOffset, 1, newRow, nil) + c.Assert(err, IsNil) + for i, t := range testData { + oldVarint, err := tablecodec.EncodeValue(nil, nil, t.bt) // tablecodec will encode as varint/varuint + c.Assert(err, IsNil) + c.Assert(oldVarint, DeepEquals, oldRow[i]) + } + } + + testDataValue := []testData{ + { + 1, + types.NewFieldType(mysql.TypeLonglong), + types.NewIntDatum(1), + types.NewIntDatum(1), + nil, + false, + }, + { + 2, + withUnsigned(types.NewFieldType(mysql.TypeLonglong)), + types.NewUintDatum(1), + types.NewUintDatum(1), + nil, + false, + }, + } + encodeAndDecodeByte(c, testDataValue) +} + +func (s *testSuite) TestCodecUtil(c *C) { + colIDs := []int64{1, 2, 3, 4} + tps := make([]*types.FieldType, 4) + for i := 0; i < 3; i++ { + tps[i] = types.NewFieldType(mysql.TypeLonglong) + } + tps[3] = types.NewFieldType(mysql.TypeNull) + sc := new(stmtctx.StatementContext) + rd := &rowcodec.Encoder{} + oldRow, err := tablecodec.EncodeRow(sc, types.MakeDatums(1, 2, 3, nil), colIDs, nil, nil, rd) + c.Check(err, IsNil) + var ( + rb rowcodec.Encoder + newRow []byte + ) + newRow, err = rowcodec.EncodeFromOldRow(&rb, nil, oldRow, nil) + c.Assert(err, IsNil) + c.Assert(rowcodec.IsNewFormat(newRow), IsTrue) + + // test stringer for decoder. + var cols []rowcodec.ColInfo + for i, ft := range tps { + cols = append(cols, rowcodec.ColInfo{ + ID: colIDs[i], + Tp: int32(ft.Tp), + Flag: int32(ft.Flag), + IsPKHandle: false, + Flen: ft.Flen, + Decimal: ft.Decimal, + Elems: ft.Elems, + }) + } + d := rowcodec.NewDecoder(cols, -1, nil) + + // test ColumnIsNull + isNil, err := d.ColumnIsNull(newRow, 4, nil) + c.Assert(err, IsNil) + c.Assert(isNil, IsTrue) + isNil, err = d.ColumnIsNull(newRow, 1, nil) + c.Assert(err, IsNil) + c.Assert(isNil, IsFalse) + isNil, err = d.ColumnIsNull(newRow, 5, nil) + c.Assert(err, IsNil) + c.Assert(isNil, IsTrue) + isNil, err = d.ColumnIsNull(newRow, 5, []byte{1}) + c.Assert(err, IsNil) + c.Assert(isNil, IsFalse) + + // test isRowKey + c.Assert(rowcodec.IsRowKey([]byte{'b', 't'}), IsFalse) + c.Assert(rowcodec.IsRowKey([]byte{'t', 'r'}), IsFalse) +} + +var ( + withUnsigned = func(ft *types.FieldType) *types.FieldType { + ft.Flag = ft.Flag | mysql.UnsignedFlag + return ft + } + getOldDatumByte = func(d types.Datum) []byte { + b, err := tablecodec.EncodeValue(nil, nil, d) + if err != nil { + panic(err) + } + return b + } + getDatumPoint = func(d types.Datum) *types.Datum { + return &d + } +) diff --git a/util/set/float64_set.go b/util/set/float64_set.go new file mode 100644 index 0000000..e4711cc --- /dev/null +++ b/util/set/float64_set.go @@ -0,0 +1,33 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package set + +// Float64Set is a float64 set. +type Float64Set map[float64]struct{} + +// NewFloat64Set builds a float64 set. +func NewFloat64Set() Float64Set { + return make(map[float64]struct{}) +} + +// Exist checks whether `val` exists in `s`. +func (s Float64Set) Exist(val float64) bool { + _, ok := s[val] + return ok +} + +// Insert inserts `val` into `s`. +func (s Float64Set) Insert(val float64) { + s[val] = struct{}{} +} diff --git a/util/set/float64_set_test.go b/util/set/float64_set_test.go new file mode 100644 index 0000000..92834bc --- /dev/null +++ b/util/set/float64_set_test.go @@ -0,0 +1,47 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package set + +import ( + "testing" + + "github.com/pingcap/check" +) + +func TestT(t *testing.T) { + check.TestingT(t) +} + +var _ = check.Suite(&float64SetTestSuite{}) + +type float64SetTestSuite struct{} + +func (s *float64SetTestSuite) TestFloat64Set(c *check.C) { + set := NewFloat64Set() + vals := []float64{1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0} + for i := range vals { + set.Insert(vals[i]) + set.Insert(vals[i]) + set.Insert(vals[i]) + set.Insert(vals[i]) + set.Insert(vals[i]) + } + + c.Assert(len(set), check.Equals, len(vals)) + for i := range vals { + c.Assert(set.Exist(vals[i]), check.IsTrue) + } + + c.Assert(set.Exist(3), check.IsFalse) +} diff --git a/util/set/int_set.go b/util/set/int_set.go new file mode 100644 index 0000000..dc835d7 --- /dev/null +++ b/util/set/int_set.go @@ -0,0 +1,56 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package set + +// IntSet is a int set. +type IntSet map[int]struct{} + +// NewIntSet builds a IntSet. +func NewIntSet() IntSet { + return make(map[int]struct{}) +} + +// Exist checks whether `val` exists in `s`. +func (s IntSet) Exist(val int) bool { + _, ok := s[val] + return ok +} + +// Insert inserts `val` into `s`. +func (s IntSet) Insert(val int) { + s[val] = struct{}{} +} + +// Int64Set is a int64 set. +type Int64Set map[int64]struct{} + +// NewInt64Set builds a Int64Set. +func NewInt64Set(xs ...int64) Int64Set { + set := make(Int64Set) + for _, x := range xs { + set.Insert(x) + } + return set +} + +// Exist checks whether `val` exists in `s`. +func (s Int64Set) Exist(val int64) bool { + _, ok := s[val] + return ok +} + +// Insert inserts `val` into `s`. +func (s Int64Set) Insert(val int64) { + s[val] = struct{}{} +} diff --git a/util/set/int_set_test.go b/util/set/int_set_test.go new file mode 100644 index 0000000..0fd14e6 --- /dev/null +++ b/util/set/int_set_test.go @@ -0,0 +1,66 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package set + +import ( + "github.com/pingcap/check" +) + +var _ = check.Suite(&intSetTestSuite{}) + +type intSetTestSuite struct{} + +func (s *intSetTestSuite) TestIntSet(c *check.C) { + set := NewIntSet() + vals := []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} + for i := range vals { + set.Insert(vals[i]) + set.Insert(vals[i]) + set.Insert(vals[i]) + set.Insert(vals[i]) + set.Insert(vals[i]) + } + + c.Assert(len(set), check.Equals, len(vals)) + for i := range vals { + c.Assert(set.Exist(vals[i]), check.IsTrue) + } + + c.Assert(set.Exist(11), check.IsFalse) +} + +func (s *intSetTestSuite) TestInt64Set(c *check.C) { + set := NewInt64Set() + vals := []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} + for i := range vals { + set.Insert(vals[i]) + set.Insert(vals[i]) + set.Insert(vals[i]) + set.Insert(vals[i]) + set.Insert(vals[i]) + } + + c.Assert(len(set), check.Equals, len(vals)) + for i := range vals { + c.Assert(set.Exist(vals[i]), check.IsTrue) + } + + c.Assert(set.Exist(11), check.IsFalse) + + set = NewInt64Set(1, 2, 3, 4, 5, 6) + for i := 1; i < 7; i++ { + c.Assert(set.Exist(int64(i)), check.IsTrue) + } + c.Assert(set.Exist(7), check.IsFalse) +} diff --git a/util/set/string_set.go b/util/set/string_set.go new file mode 100644 index 0000000..019c25c --- /dev/null +++ b/util/set/string_set.go @@ -0,0 +1,48 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package set + +// StringSet is a string set. +type StringSet map[string]struct{} + +// NewStringSet builds a float64 set. +func NewStringSet(ss ...string) StringSet { + set := make(StringSet) + for _, s := range ss { + set.Insert(s) + } + return set +} + +// Exist checks whether `val` exists in `s`. +func (s StringSet) Exist(val string) bool { + _, ok := s[val] + return ok +} + +// Insert inserts `val` into `s`. +func (s StringSet) Insert(val string) { + s[val] = struct{}{} +} + +// Intersection returns the intersection of two sets +func (s StringSet) Intersection(rhs StringSet) StringSet { + newSet := NewStringSet() + for elt := range s { + if rhs.Exist(elt) { + newSet.Insert(elt) + } + } + return newSet +} diff --git a/util/set/string_set_test.go b/util/set/string_set_test.go new file mode 100644 index 0000000..21db3fb --- /dev/null +++ b/util/set/string_set_test.go @@ -0,0 +1,63 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package set + +import ( + "fmt" + + "github.com/pingcap/check" +) + +var _ = check.Suite(&stringSetTestSuite{}) + +type stringSetTestSuite struct{} + +func (s *stringSetTestSuite) TestStringSet(c *check.C) { + set := NewStringSet() + vals := []string{"1", "2", "3", "4", "5", "6", "7", "8", "9", "10"} + for i := range vals { + set.Insert(vals[i]) + set.Insert(vals[i]) + set.Insert(vals[i]) + set.Insert(vals[i]) + set.Insert(vals[i]) + } + + c.Assert(len(set), check.Equals, len(vals)) + for i := range vals { + c.Assert(set.Exist(vals[i]), check.IsTrue) + } + + c.Assert(set.Exist("11"), check.IsFalse) + + set = NewStringSet("1", "2", "3", "4", "5", "6") + for i := 1; i < 7; i++ { + c.Assert(set.Exist(fmt.Sprintf("%d", i)), check.IsTrue) + } + c.Assert(set.Exist("7"), check.IsFalse) + + s1 := NewStringSet("1", "2", "3") + s2 := NewStringSet("4", "2", "3") + s3 := s1.Intersection(s2) + c.Assert(s3, check.DeepEquals, NewStringSet("2", "3")) + + s4 := NewStringSet("4", "5", "3") + c.Assert(s3.Intersection(s4), check.DeepEquals, NewStringSet("3")) + + s5 := NewStringSet("4", "5") + c.Assert(s3.Intersection(s5), check.DeepEquals, NewStringSet()) + + s6 := NewStringSet() + c.Assert(s3.Intersection(s6), check.DeepEquals, NewStringSet()) +} diff --git a/util/signal/signal_posix.go b/util/signal/signal_posix.go new file mode 100644 index 0000000..6e0be3d --- /dev/null +++ b/util/signal/signal_posix.go @@ -0,0 +1,56 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. +// +build linux darwin freebsd unix + +package signal + +import ( + "log" + "os" + "os/signal" + "runtime" + "syscall" + + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +// SetupSignalHandler setup signal handler for TiDB Server +func SetupSignalHandler(shudownFunc func(bool)) { + usrDefSignalChan := make(chan os.Signal, 1) + + signal.Notify(usrDefSignalChan, syscall.SIGUSR1) + go func() { + buf := make([]byte, 1<<16) + for { + sig := <-usrDefSignalChan + if sig == syscall.SIGUSR1 { + stackLen := runtime.Stack(buf, true) + log.Printf("\n=== Got signal [%s] to dump goroutine stack. ===\n%s\n=== Finished dumping goroutine stack. ===\n", sig, buf[:stackLen]) + } + } + }() + + closeSignalChan := make(chan os.Signal, 1) + signal.Notify(closeSignalChan, + syscall.SIGHUP, + syscall.SIGINT, + syscall.SIGTERM, + syscall.SIGQUIT) + + go func() { + sig := <-closeSignalChan + logutil.BgLogger().Info("got signal to exit", zap.Stringer("signal", sig)) + shudownFunc(sig == syscall.SIGQUIT) + }() +} diff --git a/util/signal/signal_windows.go b/util/signal/signal_windows.go new file mode 100644 index 0000000..79134b1 --- /dev/null +++ b/util/signal/signal_windows.go @@ -0,0 +1,41 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. +// +build windows + +package signal + +import ( + "os" + "os/signal" + "syscall" + + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" +) + +// SetupSignalHandler setup signal handler for TiDB Server +func SetupSignalHandler(shudownFunc func(bool)) { + //todo deal with dump goroutine stack on windows + closeSignalChan := make(chan os.Signal, 1) + signal.Notify(closeSignalChan, + syscall.SIGHUP, + syscall.SIGINT, + syscall.SIGTERM, + syscall.SIGQUIT) + + go func() { + sig := <-closeSignalChan + logutil.BgLogger().Info("got signal to exit", zap.Stringer("signal", sig)) + shudownFunc(sig == syscall.SIGQUIT) + }() +} diff --git a/util/sqlexec/restricted_sql_executor.go b/util/sqlexec/restricted_sql_executor.go new file mode 100644 index 0000000..097e3cf --- /dev/null +++ b/util/sqlexec/restricted_sql_executor.go @@ -0,0 +1,98 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package sqlexec + +import ( + "context" + + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/util/chunk" +) + +// RestrictedSQLExecutor is an interface provides executing restricted sql statement. +// Why we need this interface? +// When we execute some management statements, we need to operate system tables. +// For example when executing create user statement, we need to check if the user already +// exists in the mysql.User table and insert a new row if not exists. In this case, we need +// a convenience way to manipulate system tables. The most simple way is executing sql statement. +// In order to execute sql statement in stmts package, we add this interface to solve dependence problem. +// And in the same time, we do not want this interface becomes a general way to run sql statement. +// We hope this could be used with some restrictions such as only allowing system tables as target, +// do not allowing recursion call. +// For more information please refer to the comments in session.ExecRestrictedSQL(). +// This is implemented in session.go. +type RestrictedSQLExecutor interface { + // ExecRestrictedSQL run sql statement in ctx with some restriction. + ExecRestrictedSQL(sql string) ([]chunk.Row, []*ast.ResultField, error) +} + +// SQLExecutor is an interface provides executing normal sql statement. +// Why we need this interface? To break circle dependence of packages. +// For example, privilege/privileges package need execute SQL, if it use +// session.Session.Execute, then privilege/privileges and tidb would become a circle. +type SQLExecutor interface { + Execute(ctx context.Context, sql string) ([]RecordSet, error) +} + +// SQLParser is an interface provides parsing sql statement. +// To parse a sql statement, we could run parser.New() to get a parser object, and then run Parse method on it. +// But a session already has a parser bind in it, so we define this interface and use session as its implementation, +// thus avoid allocating new parser. See session.SQLParser for more information. +type SQLParser interface { + ParseSQL(sql, charset, collation string) ([]ast.StmtNode, error) +} + +// Statement is an interface for SQL execution. +// NOTE: all Statement implementations must be safe for +// concurrent using by multiple goroutines. +// If the Exec method requires any Execution domain local data, +// they must be held out of the implementing instance. +type Statement interface { + // OriginText gets the origin SQL text. + OriginText() string + + // Exec executes SQL and gets a Recordset. + Exec(ctx context.Context) (RecordSet, error) + + // IsReadOnly returns if the statement is read only. For example: SelectStmt without lock. + IsReadOnly() bool +} + +// RecordSet is an abstract result set interface to help get data from Plan. +type RecordSet interface { + // Fields gets result fields. + Fields() []*ast.ResultField + + // Next reads records into chunk. + Next(ctx context.Context, req *chunk.Chunk) error + + // NewChunk create a chunk. + NewChunk() *chunk.Chunk + + // Close closes the underlying iterator, call Next after Close will + // restart the iteration. + Close() error +} + +// MultiQueryNoDelayResult is an interface for one no-delay result for one statement in multi-queries. +type MultiQueryNoDelayResult interface { + // AffectedRows return affected row for one statement in multi-queries. + AffectedRows() uint64 + // WarnCount return warn count for one statement in multi-queries. + WarnCount() uint16 + // Status return status when executing one statement in multi-queries. + Status() uint16 + // LastInsertID return last insert id for one statement in multi-queries. + LastInsertID() uint64 +} diff --git a/util/stringutil/string_util.go b/util/stringutil/string_util.go new file mode 100644 index 0000000..065b739 --- /dev/null +++ b/util/stringutil/string_util.go @@ -0,0 +1,312 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package stringutil + +import ( + "bytes" + "fmt" + "strings" + "unicode/utf8" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/util/hack" +) + +// ErrSyntax indicates that a value does not have the right syntax for the target type. +var ErrSyntax = errors.New("invalid syntax") + +// UnquoteChar decodes the first character or byte in the escaped string +// or character literal represented by the string s. +// It returns four values: +// +//1) value, the decoded Unicode code point or byte value; +//2) multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation; +//3) tail, the remainder of the string after the character; and +//4) an error that will be nil if the character is syntactically valid. +// +// The second argument, quote, specifies the type of literal being parsed +// and therefore which escaped quote character is permitted. +// If set to a single quote, it permits the sequence \' and disallows unescaped '. +// If set to a double quote, it permits \" and disallows unescaped ". +// If set to zero, it does not permit either escape and allows both quote characters to appear unescaped. +// Different with strconv.UnquoteChar, it permits unnecessary backslash. +func UnquoteChar(s string, quote byte) (value []byte, tail string, err error) { + // easy cases + switch c := s[0]; { + case c == quote: + err = errors.Trace(ErrSyntax) + return + case c >= utf8.RuneSelf: + r, size := utf8.DecodeRuneInString(s) + if r == utf8.RuneError { + value = append(value, c) + return value, s[1:], nil + } + value = append(value, string(r)...) + return value, s[size:], nil + case c != '\\': + value = append(value, c) + return value, s[1:], nil + } + // hard case: c is backslash + if len(s) <= 1 { + err = errors.Trace(ErrSyntax) + return + } + c := s[1] + s = s[2:] + switch c { + case 'b': + value = append(value, '\b') + case 'n': + value = append(value, '\n') + case 'r': + value = append(value, '\r') + case 't': + value = append(value, '\t') + case 'Z': + value = append(value, '\032') + case '0': + value = append(value, '\000') + case '_', '%': + value = append(value, '\\') + value = append(value, c) + case '\\': + value = append(value, '\\') + case '\'', '"': + value = append(value, c) + default: + value = append(value, c) + } + tail = s + return +} + +// Unquote interprets s as a single-quoted, double-quoted, +// or backquoted Go string literal, returning the string value +// that s quotes. For example: test=`"\"\n"` (hex: 22 5c 22 5c 6e 22) +// should be converted to `"\n` (hex: 22 0a). +func Unquote(s string) (t string, err error) { + n := len(s) + if n < 2 { + return "", errors.Trace(ErrSyntax) + } + quote := s[0] + if quote != s[n-1] { + return "", errors.Trace(ErrSyntax) + } + s = s[1 : n-1] + if quote != '"' && quote != '\'' { + return "", errors.Trace(ErrSyntax) + } + // Avoid allocation. No need to convert if there is no '\' + if strings.IndexByte(s, '\\') == -1 && strings.IndexByte(s, quote) == -1 { + return s, nil + } + buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. + for len(s) > 0 { + mb, ss, err := UnquoteChar(s, quote) + if err != nil { + return "", errors.Trace(err) + } + s = ss + buf = append(buf, mb...) + } + return string(buf), nil +} + +const ( + patMatch = iota + 1 + patOne + patAny +) + +// CompilePattern handles escapes and wild cards convert pattern characters and +// pattern types. +func CompilePattern(pattern string, escape byte) (patChars, patTypes []byte) { + var lastAny bool + patChars = make([]byte, len(pattern)) + patTypes = make([]byte, len(pattern)) + patLen := 0 + for i := 0; i < len(pattern); i++ { + var tp byte + var c = pattern[i] + switch c { + case escape: + lastAny = false + tp = patMatch + if i < len(pattern)-1 { + i++ + c = pattern[i] + if c == escape || c == '_' || c == '%' { + // Valid escape. + } else { + // Invalid escape, fall back to escape byte. + // mysql will treat escape character as the origin value even + // the escape sequence is invalid in Go or C. + // e.g., \m is invalid in Go, but in MySQL we will get "m" for select '\m'. + // Following case is correct just for escape \, not for others like +. + // TODO: Add more checks for other escapes. + i-- + c = escape + } + } + case '_': + if lastAny { + continue + } + tp = patOne + case '%': + if lastAny { + continue + } + lastAny = true + tp = patAny + default: + lastAny = false + tp = patMatch + } + patChars[patLen] = c + patTypes[patLen] = tp + patLen++ + } + patChars = patChars[:patLen] + patTypes = patTypes[:patLen] + return +} + +// NOTE: Currently tikv's like function is case sensitive, so we keep its behavior here. +func matchByteCI(a, b byte) bool { + return a == b + // We may reuse below code block when like function go back to case insensitive. + /* + if a == b { + return true + } + if a >= 'a' && a <= 'z' && a-caseDiff == b { + return true + } + return a >= 'A' && a <= 'Z' && a+caseDiff == b + */ +} + +// CompileLike2Regexp convert a like `lhs` to a regular expression +func CompileLike2Regexp(str string) string { + patChars, patTypes := CompilePattern(str, '\\') + var result []byte + for i := 0; i < len(patChars); i++ { + switch patTypes[i] { + case patMatch: + result = append(result, patChars[i]) + case patOne: + // .*. == .* + if !bytes.HasSuffix(result, []byte{'.', '*'}) { + result = append(result, '.') + } + case patAny: + // ..* == .* + if bytes.HasSuffix(result, []byte{'.'}) { + result = append(result, '*') + continue + } + // .*.* == .* + if !bytes.HasSuffix(result, []byte{'.', '*'}) { + result = append(result, '.') + result = append(result, '*') + } + } + } + return string(result) +} + +// DoMatch matches the string with patChars and patTypes. +// The algorithm has linear time complexity. +// https://research.swtch.com/glob +func DoMatch(str string, patChars, patTypes []byte) bool { + var sIdx, pIdx, nextSIdx, nextPIdx int + for pIdx < len(patChars) || sIdx < len(str) { + if pIdx < len(patChars) { + switch patTypes[pIdx] { + case patMatch: + if sIdx < len(str) && matchByteCI(str[sIdx], patChars[pIdx]) { + pIdx++ + sIdx++ + continue + } + case patOne: + if sIdx < len(str) { + pIdx++ + sIdx++ + continue + } + case patAny: + // Try to match at sIdx. + // If that doesn't work out, + // restart at sIdx+1 next. + nextPIdx = pIdx + nextSIdx = sIdx + 1 + pIdx++ + continue + } + } + // Mismatch. Maybe restart. + if 0 < nextSIdx && nextSIdx <= len(str) { + pIdx = nextPIdx + sIdx = nextSIdx + continue + } + return false + } + // Matched all of pattern to all of name. Success. + return true +} + +// IsExactMatch return true if no wildcard character +func IsExactMatch(patTypes []byte) bool { + for _, pt := range patTypes { + if pt != patMatch { + return false + } + } + return true +} + +// Copy deep copies a string. +func Copy(src string) string { + return string(hack.Slice(src)) +} + +// StringerFunc defines string func implement fmt.Stringer. +type StringerFunc func() string + +// String implements fmt.Stringer +func (l StringerFunc) String() string { + return l() +} + +// MemoizeStr returns memoized version of stringFunc. +func MemoizeStr(l func() string) fmt.Stringer { + return StringerFunc(func() string { + return l() + }) +} + +// StringerStr defines a alias to normal string. +// implement fmt.Stringer +type StringerStr string + +// String implements fmt.Stringer +func (i StringerStr) String() string { + return string(i) +} diff --git a/util/stringutil/string_util_test.go b/util/stringutil/string_util_test.go new file mode 100644 index 0000000..8f9ffa6 --- /dev/null +++ b/util/stringutil/string_util_test.go @@ -0,0 +1,200 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package stringutil + +import ( + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/util/testleak" +) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + TestingT(t) +} + +var _ = Suite(&testStringUtilSuite{}) + +type testStringUtilSuite struct { +} + +func (s *testStringUtilSuite) TestUnquote(c *C) { + defer testleak.AfterTest(c)() + table := []struct { + str string + expect string + ok bool + }{ + {``, ``, false}, + {`'`, ``, false}, + {`'abc"`, ``, false}, + {`abcdea`, ``, false}, + {`'abc'def'`, ``, false}, + {`"abc\"`, ``, false}, + + {`"abcdef"`, `abcdef`, true}, + {`"abc'def"`, `abc'def`, true}, + {`"\a汉字测试"`, `a汉字测试`, true}, + {`"☺"`, `☺`, true}, + {`"\xFF"`, `xFF`, true}, + {`"\U00010111"`, `U00010111`, true}, + {`"\U0001011111"`, `U0001011111`, true}, + {`"\a\b\f\n\r\t\v\\\""`, "a\bf\n\r\tv\\\"", true}, + {`"\Z\%\_"`, "\032" + `\%\_`, true}, + {`"abc\0"`, "abc\000", true}, + {`"abc\"abc"`, `abc"abc`, true}, + + {`'abcdef'`, `abcdef`, true}, + {`'"'`, "\"", true}, + {`'\a\b\f\n\r\t\v\\\''`, "a\bf\n\r\tv\\'", true}, + {`' '`, ` `, true}, + {"'\\a汉字'", "a汉字", true}, + {"'\\a\x90'", "a\x90", true}, + {"\"\\a\x18èàø»\x05\"", "a\x18èàø»\x05", true}, + } + + for _, t := range table { + x, err := Unquote(t.str) + c.Assert(x, Equals, t.expect) + comment := Commentf("source %v", t.str) + if t.ok { + c.Assert(err, IsNil, comment) + } else { + c.Assert(err, NotNil, comment) + } + } +} + +func (s *testStringUtilSuite) TestPatternMatch(c *C) { + defer testleak.AfterTest(c)() + tbl := []struct { + pattern string + input string + escape byte + match bool + }{ + {``, `a`, '\\', false}, + {`a`, `a`, '\\', true}, + {`a`, `b`, '\\', false}, + {`aA`, `aA`, '\\', true}, + {`_`, `a`, '\\', true}, + {`_`, `ab`, '\\', false}, + {`__`, `b`, '\\', false}, + {`%`, `abcd`, '\\', true}, + {`%`, ``, '\\', true}, + {`%b`, `AAA`, '\\', false}, + {`%a%`, `BBB`, '\\', false}, + {`a%`, `BBB`, '\\', false}, + {`\%a`, `%a`, '\\', true}, + {`\%a`, `aa`, '\\', false}, + {`\_a`, `_a`, '\\', true}, + {`\_a`, `aa`, '\\', false}, + {`\\_a`, `\xa`, '\\', true}, + {`\a\b`, `\a\b`, '\\', true}, + {`%%_`, `abc`, '\\', true}, + {`%_%_aA`, "aaaA", '\\', true}, + {`+_a`, `_a`, '+', true}, + {`+%a`, `%a`, '+', true}, + {`\%a`, `%a`, '+', false}, + {`++a`, `+a`, '+', true}, + {`++_a`, `+xa`, '+', true}, + // We may reopen these test when like function go back to case insensitive. + /* + {"_ab", "AAB", '\\', true}, + {"%a%", "BAB", '\\', true}, + {"%a", "AAA", '\\', true}, + {"b%", "BBB", '\\', true}, + */ + } + for _, v := range tbl { + patChars, patTypes := CompilePattern(v.pattern, v.escape) + match := DoMatch(v.input, patChars, patTypes) + c.Assert(match, Equals, v.match, Commentf("%v", v)) + } +} + +func (s *testStringUtilSuite) TestCompileLike2Regexp(c *C) { + defer testleak.AfterTest(c)() + tbl := []struct { + pattern string + regexp string + }{ + {``, ``}, + {`a`, `a`}, + {`aA`, `aA`}, + {`_`, `.`}, + {`__`, `..`}, + {`%`, `.*`}, + {`%b`, `.*b`}, + {`%a%`, `.*a.*`}, + {`a%`, `a.*`}, + {`\%a`, `%a`}, + {`\_a`, `_a`}, + {`\\_a`, `\.a`}, + {`\a\b`, `\a\b`}, + {`%%_`, `.*`}, + {`%_%_aA`, ".*aA"}, + } + for _, v := range tbl { + result := CompileLike2Regexp(v.pattern) + c.Assert(result, Equals, v.regexp, Commentf("%v", v)) + } +} + +func (s *testStringUtilSuite) TestIsExactMatch(c *C) { + defer testleak.AfterTest(c)() + tbl := []struct { + pattern string + escape byte + exactMatch bool + }{ + {``, '\\', true}, + {`_`, '\\', false}, + {`%`, '\\', false}, + {`a`, '\\', true}, + {`a_`, '\\', false}, + {`a%`, '\\', false}, + {`a\_`, '\\', true}, + {`a\%`, '\\', true}, + {`a\\`, '\\', true}, + {`a\\_`, '\\', false}, + {`a+%`, '+', true}, + {`a\%`, '+', false}, + {`a++`, '+', true}, + {`a++_`, '+', false}, + } + for _, v := range tbl { + _, patTypes := CompilePattern(v.pattern, v.escape) + c.Assert(IsExactMatch(patTypes), Equals, v.exactMatch, Commentf("%v", v)) + } +} + +func BenchmarkMatchSpecial(b *testing.B) { + var ( + pattern = `a%a%a%a%a%a%a%a%b` + target = `aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa` + escape = byte('\\') + ) + + patChars, patTypes := CompilePattern(pattern, escape) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + match := DoMatch(target, patChars, patTypes) + if match { + b.Fatal("Unmatch expected.") + } + } +} diff --git a/util/testkit/ctestkit.go b/util/testkit/ctestkit.go new file mode 100644 index 0000000..8c12e98 --- /dev/null +++ b/util/testkit/ctestkit.go @@ -0,0 +1,222 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !codes + +package testkit + +import ( + "context" + "math/rand" + "sync/atomic" + + "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/util" + "github.com/pingcap/tidb/util/sqlexec" +) + +type sessionCtxKeyType struct{} + +var sessionKey = sessionCtxKeyType{} + +func getSession(ctx context.Context) session.Session { + s := ctx.Value(sessionKey) + if s == nil { + return nil + } + return s.(session.Session) +} + +func setSession(ctx context.Context, se session.Session) context.Context { + return context.WithValue(ctx, sessionKey, se) +} + +// CTestKit is a utility to run sql test with concurrent execution support. +type CTestKit struct { + c *check.C + store kv.Storage +} + +// NewCTestKit returns a new *CTestKit. +func NewCTestKit(c *check.C, store kv.Storage) *CTestKit { + return &CTestKit{ + c: c, + store: store, + } +} + +// OpenSession opens new session ctx if no exists one. +func (tk *CTestKit) OpenSession(ctx context.Context) context.Context { + if getSession(ctx) == nil { + se, err := session.CreateSession4Test(tk.store) + tk.c.Assert(err, check.IsNil) + id := atomic.AddUint64(&connectionID, 1) + se.SetConnectionID(id) + ctx = setSession(ctx, se) + } + return ctx +} + +// OpenSessionWithDB opens new session ctx if no exists one and use db. +func (tk *CTestKit) OpenSessionWithDB(ctx context.Context, db string) context.Context { + ctx = tk.OpenSession(ctx) + tk.MustExec(ctx, "use "+db) + return ctx +} + +// CloseSession closes exists session from ctx. +func (tk *CTestKit) CloseSession(ctx context.Context) { + se := getSession(ctx) + tk.c.Assert(se, check.NotNil) + se.Close() +} + +// Exec executes a sql statement. +func (tk *CTestKit) Exec(ctx context.Context, sql string) (sqlexec.RecordSet, error) { + var err error + tk.c.Assert(getSession(ctx), check.NotNil) + var rss []sqlexec.RecordSet + rss, err = getSession(ctx).Execute(ctx, sql) + if err == nil && len(rss) > 0 { + return rss[0], nil + } + return nil, err +} + +// CheckExecResult checks the affected rows and the insert id after executing MustExec. +func (tk *CTestKit) CheckExecResult(ctx context.Context, affectedRows, insertID int64) { + tk.c.Assert(getSession(ctx), check.NotNil) + tk.c.Assert(affectedRows, check.Equals, int64(getSession(ctx).AffectedRows())) + tk.c.Assert(insertID, check.Equals, int64(getSession(ctx).LastInsertID())) +} + +// MustExec executes a sql statement and asserts nil error. +func (tk *CTestKit) MustExec(ctx context.Context, sql string) { + res, err := tk.Exec(ctx, sql) + tk.c.Assert(err, check.IsNil, check.Commentf("sql:%s, error stack %v", sql, errors.ErrorStack(err))) + if res != nil { + tk.c.Assert(res.Close(), check.IsNil) + } +} + +// MustQuery query the statements and returns result rows. +// If expected result is set it asserts the query result equals expected result. +func (tk *CTestKit) MustQuery(ctx context.Context, sql string) *Result { + comment := check.Commentf("sql:%s", sql) + rs, err := tk.Exec(ctx, sql) + tk.c.Assert(errors.ErrorStack(err), check.Equals, "", comment) + tk.c.Assert(rs, check.NotNil, comment) + return tk.resultSetToResult(ctx, rs, comment) +} + +// resultSetToResult converts ast.RecordSet to testkit.Result. +// It is used to check results of execute statement in binary mode. +func (tk *CTestKit) resultSetToResult(ctx context.Context, rs sqlexec.RecordSet, comment check.CommentInterface) *Result { + rows, err := session.GetRows4Test(context.Background(), getSession(ctx), rs) + tk.c.Assert(errors.ErrorStack(err), check.Equals, "", comment) + err = rs.Close() + tk.c.Assert(errors.ErrorStack(err), check.Equals, "", comment) + sRows := make([][]string, len(rows)) + for i := range rows { + row := rows[i] + iRow := make([]string, row.Len()) + for j := 0; j < row.Len(); j++ { + if row.IsNull(j) { + iRow[j] = "" + } else { + d := row.GetDatum(j, &rs.Fields()[j].Column.FieldType) + iRow[j], err = d.ToString() + tk.c.Assert(err, check.IsNil) + } + } + sRows[i] = iRow + } + return &Result{rows: sRows, c: tk.c, comment: comment} +} + +// ConcurrentRun run test in current. +// - concurrent: controls the concurrent worker count. +// - loops: controls run test how much times. +// - prepareFunc: provide test data and will be called for every loop. +// - checkFunc: used to do some check after all workers done. +// works like create table better be put in front of this method calling. +// see more example at TestBatchInsertWithOnDuplicate +func (tk *CTestKit) ConcurrentRun(c *check.C, concurrent int, loops int, + prepareFunc func(ctx context.Context, tk *CTestKit, concurrent int, currentLoop int) [][][]interface{}, + writeFunc func(ctx context.Context, tk *CTestKit, input [][]interface{}), + checkFunc func(ctx context.Context, tk *CTestKit)) { + var ( + channel = make([]chan [][]interface{}, concurrent) + ctxs = make([]context.Context, concurrent) + dones = make([]context.CancelFunc, concurrent) + ) + for i := 0; i < concurrent; i++ { + w := i + channel[w] = make(chan [][]interface{}, 1) + ctxs[w], dones[w] = context.WithCancel(context.Background()) + ctxs[w] = tk.OpenSessionWithDB(ctxs[w], "test") + go func() { + defer func() { + r := recover() + if r != nil { + c.Fatal(r, string(util.GetStack())) + } + dones[w]() + }() + for input := range channel[w] { + writeFunc(ctxs[w], tk, input) + } + }() + } + defer func() { + for i := 0; i < concurrent; i++ { + tk.CloseSession(ctxs[i]) + } + }() + + ctx := tk.OpenSessionWithDB(context.Background(), "test") + defer tk.CloseSession(ctx) + tk.MustExec(ctx, "use test") + + for j := 0; j < loops; j++ { + datas := prepareFunc(ctx, tk, concurrent, j) + for i := 0; i < concurrent; i++ { + channel[i] <- datas[i] + } + } + + for i := 0; i < concurrent; i++ { + close(channel[i]) + } + + for i := 0; i < concurrent; i++ { + <-ctxs[i].Done() + } + checkFunc(ctx, tk) +} + +// PermInt returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n). +func (tk *CTestKit) PermInt(n int) []interface{} { + var v []interface{} + for _, i := range rand.Perm(n) { + v = append(v, i) + } + return v +} + +// IgnoreError ignores error and make errcheck tool happy. +// Deprecated: it's normal to ignore some error in concurrent test, but please don't use this method in other place. +func (tk *CTestKit) IgnoreError(_ error) {} diff --git a/util/testkit/fake.go b/util/testkit/fake.go new file mode 100644 index 0000000..20321b8 --- /dev/null +++ b/util/testkit/fake.go @@ -0,0 +1,3 @@ +// +build codes + +package testkit diff --git a/util/testkit/testkit.go b/util/testkit/testkit.go new file mode 100644 index 0000000..64f9174 --- /dev/null +++ b/util/testkit/testkit.go @@ -0,0 +1,264 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !codes + +package testkit + +import ( + "bytes" + "context" + "fmt" + "sort" + "strings" + "sync/atomic" + + "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/util/sqlexec" + "github.com/pingcap/tidb/util/testutil" +) + +// TestKit is a utility to run sql test. +type TestKit struct { + c *check.C + store kv.Storage + Se session.Session +} + +// Result is the result returned by MustQuery. +type Result struct { + rows [][]string + comment check.CommentInterface + c *check.C +} + +// Check asserts the result equals the expected results. +func (res *Result) Check(expected [][]interface{}) { + resBuff := bytes.NewBufferString("") + for _, row := range res.rows { + fmt.Fprintf(resBuff, "%s\n", row) + } + needBuff := bytes.NewBufferString("") + for _, row := range expected { + fmt.Fprintf(needBuff, "%s\n", row) + } + res.c.Assert(resBuff.String(), check.Equals, needBuff.String(), res.comment) +} + +// CheckAt asserts the result of selected columns equals the expected results. +func (res *Result) CheckAt(cols []int, expected [][]interface{}) { + for _, e := range expected { + res.c.Assert(len(cols), check.Equals, len(e)) + } + + rows := make([][]string, 0, len(expected)) + for i := range res.rows { + row := make([]string, 0, len(cols)) + for _, r := range cols { + row = append(row, res.rows[i][r]) + } + rows = append(rows, row) + } + got := fmt.Sprintf("%s", rows) + need := fmt.Sprintf("%s", expected) + res.c.Assert(got, check.Equals, need, res.comment) +} + +// Rows returns the result data. +func (res *Result) Rows() [][]interface{} { + ifacesSlice := make([][]interface{}, len(res.rows)) + for i := range res.rows { + ifaces := make([]interface{}, len(res.rows[i])) + for j := range res.rows[i] { + ifaces[j] = res.rows[i][j] + } + ifacesSlice[i] = ifaces + } + return ifacesSlice +} + +// Sort sorts and return the result. +func (res *Result) Sort() *Result { + sort.Slice(res.rows, func(i, j int) bool { + a := res.rows[i] + b := res.rows[j] + for i := range a { + if a[i] < b[i] { + return true + } else if a[i] > b[i] { + return false + } + } + return false + }) + return res +} + +// NewTestKit returns a new *TestKit. +func NewTestKit(c *check.C, store kv.Storage) *TestKit { + return &TestKit{ + c: c, + store: store, + } +} + +// NewTestKitWithInit returns a new *TestKit and creates a session. +func NewTestKitWithInit(c *check.C, store kv.Storage) *TestKit { + tk := NewTestKit(c, store) + // Use test and prepare a session. + tk.MustExec("use test") + return tk +} + +var connectionID uint64 + +// Exec executes a sql statement. +func (tk *TestKit) Exec(sql string) (sqlexec.RecordSet, error) { + var err error + if tk.Se == nil { + tk.Se, err = session.CreateSession4Test(tk.store) + tk.c.Assert(err, check.IsNil) + id := atomic.AddUint64(&connectionID, 1) + tk.Se.SetConnectionID(id) + } + ctx := context.Background() + var rss []sqlexec.RecordSet + rss, err = tk.Se.Execute(ctx, sql) + if err == nil && len(rss) > 0 { + return rss[0], nil + } + return nil, errors.Trace(err) +} + +// CheckExecResult checks the affected rows and the insert id after executing MustExec. +func (tk *TestKit) CheckExecResult(affectedRows, insertID int64) { + tk.c.Assert(affectedRows, check.Equals, int64(tk.Se.AffectedRows())) + tk.c.Assert(insertID, check.Equals, int64(tk.Se.LastInsertID())) +} + +// MustExec executes a sql statement and asserts nil error. +func (tk *TestKit) MustExec(sql string) { + res, err := tk.Exec(sql) + tk.c.Assert(err, check.IsNil, check.Commentf("sql:%s, error stack %v", sql, errors.ErrorStack(err))) + if res != nil { + tk.c.Assert(res.Close(), check.IsNil) + } +} + +// HasPlan checks if the result execution plan contains specific plan. +func (tk *TestKit) HasPlan(sql string, plan string) bool { + rs := tk.MustQuery("explain " + sql) + for i := range rs.rows { + if strings.Contains(rs.rows[i][0], plan) { + return true + } + } + return false +} + +// MustUseIndex checks if the result execution plan contains specific index(es). +func (tk *TestKit) MustUseIndex(sql string, index string) bool { + rs := tk.MustQuery("explain " + sql) + for i := range rs.rows { + if strings.Contains(rs.rows[i][3], "index:"+index+",") { + return true + } + } + return false +} + +// MustIndexLookup checks whether the plan for the sql is IndexLookUp. +func (tk *TestKit) MustIndexLookup(sql string) *Result { + tk.c.Assert(tk.HasPlan(sql, "IndexLookUp"), check.IsTrue) + return tk.MustQuery(sql) +} + +// MustTableDual checks whether the plan for the sql is TableDual. +func (tk *TestKit) MustTableDual(sql string) *Result { + tk.c.Assert(tk.HasPlan(sql, "TableDual"), check.IsTrue) + return tk.MustQuery(sql) +} + +// MustQuery query the statements and returns result rows. +// If expected result is set it asserts the query result equals expected result. +func (tk *TestKit) MustQuery(sql string) *Result { + comment := check.Commentf("sql:%s", sql) + rs, err := tk.Exec(sql) + tk.c.Assert(errors.ErrorStack(err), check.Equals, "", comment) + tk.c.Assert(rs, check.NotNil, comment) + return tk.ResultSetToResult(rs, comment) +} + +// QueryToErr executes a sql statement and discard results. +func (tk *TestKit) QueryToErr(sql string) error { + comment := check.Commentf("sql:%s", sql) + res, err := tk.Exec(sql) + tk.c.Assert(errors.ErrorStack(err), check.Equals, "", comment) + tk.c.Assert(res, check.NotNil, comment) + _, resErr := session.GetRows4Test(context.Background(), tk.Se, res) + tk.c.Assert(res.Close(), check.IsNil) + return resErr +} + +// ExecToErr executes a sql statement and discard results. +func (tk *TestKit) ExecToErr(sql string) error { + res, err := tk.Exec(sql) + if res != nil { + tk.c.Assert(res.Close(), check.IsNil) + } + return err +} + +// MustGetErrCode executes a sql statement and assert it's error code. +func (tk *TestKit) MustGetErrCode(sql string, errCode int) { + _, err := tk.Exec(sql) + tk.c.Assert(err, check.NotNil) + originErr := errors.Cause(err) + tErr, ok := originErr.(*terror.Error) + tk.c.Assert(ok, check.IsTrue, check.Commentf("expect type 'terror.Error', but obtain '%T'", originErr)) + sqlErr := tErr.ToSQLError() + tk.c.Assert(int(sqlErr.Code), check.Equals, errCode, check.Commentf("Assertion failed, origin err:\n %v", sqlErr)) +} + +// ResultSetToResult converts sqlexec.RecordSet to testkit.Result. +// It is used to check results of execute statement in binary mode. +func (tk *TestKit) ResultSetToResult(rs sqlexec.RecordSet, comment check.CommentInterface) *Result { + return tk.ResultSetToResultWithCtx(context.Background(), rs, comment) +} + +// ResultSetToResultWithCtx converts sqlexec.RecordSet to testkit.Result. +func (tk *TestKit) ResultSetToResultWithCtx(ctx context.Context, rs sqlexec.RecordSet, comment check.CommentInterface) *Result { + sRows, err := session.ResultSetToStringSlice(ctx, tk.Se, rs) + tk.c.Check(err, check.IsNil, comment) + return &Result{rows: sRows, c: tk.c, comment: comment} +} + +// Rows is similar to RowsWithSep, use white space as separator string. +func Rows(args ...string) [][]interface{} { + return testutil.RowsWithSep(" ", args...) +} + +// GetTableID gets table ID by name. +func (tk *TestKit) GetTableID(tableName string) int64 { + dom := domain.GetDomain(tk.Se) + is := dom.InfoSchema() + tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr(tableName)) + tk.c.Assert(err, check.IsNil) + return tbl.Meta().ID +} diff --git a/util/testkit/testkit_test.go b/util/testkit/testkit_test.go new file mode 100644 index 0000000..f90554d --- /dev/null +++ b/util/testkit/testkit_test.go @@ -0,0 +1,38 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package testkit + +import ( + "testing" + + "github.com/pingcap/check" +) + +var _ = check.Suite(&testKitSuite{}) + +func TestT(t *testing.T) { + check.TestingT(t) +} + +type testKitSuite struct { +} + +func (s testKitSuite) TestSort(c *check.C) { + result := &Result{ + rows: [][]string{{"1", "1", "", ""}, {"2", "2", "2", "3"}}, + c: c, + comment: check.Commentf(""), + } + result.Sort().Check(Rows("1 1 ", "2 2 2 3")) +} diff --git a/util/testleak/add-leaktest.sh b/util/testleak/add-leaktest.sh new file mode 100644 index 0000000..723553e --- /dev/null +++ b/util/testleak/add-leaktest.sh @@ -0,0 +1,21 @@ +#!/bin/sh +# +# Usage: add-leaktest.sh pkg/*_test.go + +set -eu + +sed -i'~' -e ' + /^func (s \*test.*Suite) Test.*(c \*C) {/ { + n + /testleak.AfterTest/! i\ + defer testleak.AfterTest(c)() + } +' $@ + +for i in $@; do + if ! cmp -s $i $i~ ; then + goimports -w $i + fi +echo $i + rm -f $i~ +done diff --git a/util/testleak/check-leaktest.sh b/util/testleak/check-leaktest.sh new file mode 100644 index 0000000..8df9d57 --- /dev/null +++ b/util/testleak/check-leaktest.sh @@ -0,0 +1,38 @@ +#!/bin/sh +# +# Usage: check-leaktest.sh +# It needs to run under the github.com/pingcap/tidb directory. + +set -e + +pkgs=$(git grep 'Suite' |grep -vE "Godeps|tags" |awk -F: '{print $1}' | xargs -n1 dirname | sort |uniq) +echo $pkgs +for pkg in ${pkgs}; do + if [ -z "$(ls ${pkg}/*_test.go 2>/dev/null)" ]; then + continue + fi + awk -F'[(]' ' +/func \(s .*Suite\) Test.*C\) {/ { + test = $1"("$2 + next +} + +/defer testleak.AfterTest/ { + test = 0 + next +} + +{ + if (test && (FILENAME != "./tidb_test.go")) { + printf "%s: %s: missing defer testleak.AfterTest\n", FILENAME, test + test = 0 + code = 1 + } +} + +END { + exit code +} + +' ${pkg}/*_test.go +done diff --git a/util/testleak/fake.go b/util/testleak/fake.go new file mode 100644 index 0000000..5d4ad57 --- /dev/null +++ b/util/testleak/fake.go @@ -0,0 +1,37 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. +// +build !leak + +package testleak + +import ( + "testing" + + "github.com/pingcap/check" +) + +// BeforeTest is a dummy implementation when build tag 'leak' is not set. +func BeforeTest() { +} + +// AfterTest is a dummy implementation when build tag 'leak' is not set. +func AfterTest(c *check.C) func() { + return func() { + } +} + +// AfterTestT is used after all the test cases is finished. +func AfterTestT(t *testing.T) func() { + return func() { + } +} diff --git a/util/testleak/leaktest.go b/util/testleak/leaktest.go new file mode 100644 index 0000000..c904194 --- /dev/null +++ b/util/testleak/leaktest.go @@ -0,0 +1,134 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. +// +build leak + +package testleak + +import ( + "runtime" + "sort" + "strings" + "testing" + "time" + + "github.com/pingcap/check" +) + +func interestingGoroutines() (gs []string) { + buf := make([]byte, 2<<20) + buf = buf[:runtime.Stack(buf, true)] + for _, g := range strings.Split(string(buf), "\n\n") { + sl := strings.SplitN(g, "\n", 2) + if len(sl) != 2 { + continue + } + stack := strings.TrimSpace(sl[1]) + if stack == "" || + strings.Contains(stack, "created by github.com/pingcap/tidb.init") || + strings.Contains(stack, "testing.RunTests") || + strings.Contains(stack, "check.(*resultTracker).start") || + strings.Contains(stack, "check.(*suiteRunner).runFunc") || + strings.Contains(stack, "check.(*suiteRunner).parallelRun") || + strings.Contains(stack, "localstore.(*dbStore).scheduler") || + strings.Contains(stack, "tikv.(*noGCHandler).Start") || + strings.Contains(stack, "ddl.(*ddl).start") || + strings.Contains(stack, "ddl.(*delRange).startEmulator") || + strings.Contains(stack, "domain.NewDomain") || + strings.Contains(stack, "testing.(*T).Run") || + strings.Contains(stack, "domain.(*Domain).LoadPrivilegeLoop") || + strings.Contains(stack, "domain.(*Domain).UpdateTableStatsLoop") || + strings.Contains(stack, "testing.Main(") || + strings.Contains(stack, "runtime.goexit") || + strings.Contains(stack, "created by runtime.gc") || + strings.Contains(stack, "interestingGoroutines") || + strings.Contains(stack, "runtime.MHeap_Scavenger") { + continue + } + gs = append(gs, stack) + } + sort.Strings(gs) + return +} + +var beforeTestGorountines = map[string]bool{} + +// BeforeTest gets the current goroutines. +// It's used for check.Suite.SetUpSuite() function. +// Now it's only used in the tidb_test.go. +func BeforeTest() { + for _, g := range interestingGoroutines() { + beforeTestGorountines[g] = true + } +} + +const defaultCheckCnt = 50 + +func checkLeakAfterTest(errorFunc func(cnt int, g string)) func() { + if len(beforeTestGorountines) == 0 { + for _, g := range interestingGoroutines() { + beforeTestGorountines[g] = true + } + } + + cnt := defaultCheckCnt + return func() { + defer func() { + beforeTestGorountines = map[string]bool{} + }() + + var leaked []string + for i := 0; i < cnt; i++ { + leaked = leaked[:0] + for _, g := range interestingGoroutines() { + if !beforeTestGorountines[g] { + leaked = append(leaked, g) + } + } + // Bad stuff found, but goroutines might just still be + // shutting down, so give it some time. + if len(leaked) != 0 { + time.Sleep(50 * time.Millisecond) + continue + } + + return + } + for _, g := range leaked { + errorFunc(cnt, g) + } + } +} + +// AfterTest gets the current goroutines and runs the returned function to +// get the goroutines at that time to contrast whether any goroutines leaked. +// Usage: defer testleak.AfterTest(c)() +// It can call with BeforeTest() at the beginning of check.Suite.TearDownSuite() or +// call alone at the beginning of each test. +func AfterTest(c *check.C) func() { + errorFunc := func(cnt int, g string) { + c.Errorf("Test %s check-count %d appears to have leaked: %v", c.TestName(), cnt, g) + } + return checkLeakAfterTest(errorFunc) +} + +// AfterTestT is used after all the test cases is finished. +func AfterTestT(t *testing.T) func() { + errorFunc := func(cnt int, g string) { + t.Errorf("Test %s check-count %d appears to have leaked: %v", t.Name(), cnt, g) + } + return checkLeakAfterTest(errorFunc) +} diff --git a/util/testutil/testutil.go b/util/testutil/testutil.go new file mode 100644 index 0000000..41d2a05 --- /dev/null +++ b/util/testutil/testutil.go @@ -0,0 +1,297 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !codes + +package testutil + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "regexp" + "runtime" + "strings" + + "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" +) + +// CompareUnorderedStringSlice compare two string slices. +// If a and b is exactly the same except the order, it returns true. +// In otherwise return false. +func CompareUnorderedStringSlice(a []string, b []string) bool { + if a == nil && b == nil { + return true + } + if a == nil || b == nil { + return false + } + if len(a) != len(b) { + return false + } + m := make(map[string]int, len(a)) + for _, i := range a { + _, ok := m[i] + if !ok { + m[i] = 1 + } else { + m[i]++ + } + } + + for _, i := range b { + _, ok := m[i] + if !ok { + return false + } + m[i]-- + if m[i] == 0 { + delete(m, i) + } + } + return len(m) == 0 +} + +// datumEqualsChecker is a checker for DatumEquals. +type datumEqualsChecker struct { + *check.CheckerInfo +} + +// DatumEquals checker verifies that the obtained value is equal to +// the expected value. +// For example: +// c.Assert(value, DatumEquals, NewDatum(42)) +var DatumEquals check.Checker = &datumEqualsChecker{ + &check.CheckerInfo{Name: "DatumEquals", Params: []string{"obtained", "expected"}}, +} + +func (checker *datumEqualsChecker) Check(params []interface{}, names []string) (result bool, error string) { + defer func() { + if v := recover(); v != nil { + result = false + error = fmt.Sprint(v) + } + }() + paramFirst, ok := params[0].(types.Datum) + if !ok { + panic("the first param should be datum") + } + paramSecond, ok := params[1].(types.Datum) + if !ok { + panic("the second param should be datum") + } + sc := new(stmtctx.StatementContext) + res, err := paramFirst.CompareDatum(sc, ¶mSecond) + if err != nil { + panic(err) + } + return res == 0, "" +} + +// RowsWithSep is a convenient function to wrap args to a slice of []interface. +// The arg represents a row, split by sep. +func RowsWithSep(sep string, args ...string) [][]interface{} { + rows := make([][]interface{}, len(args)) + for i, v := range args { + strs := strings.Split(v, sep) + row := make([]interface{}, len(strs)) + for j, s := range strs { + row[j] = s + } + rows[i] = row + } + return rows +} + +// record is a flag used for generate test result. +var record bool + +func init() { + flag.BoolVar(&record, "record", false, "to generate test result") +} + +type testCases struct { + Name string + Cases *json.RawMessage // For delayed parse. + decodedOut interface{} // For generate output. +} + +// TestData stores all the data of a test suite. +type TestData struct { + input []testCases + output []testCases + filePathPrefix string + funcMap map[string]int +} + +// LoadTestSuiteData loads test suite data from file. +func LoadTestSuiteData(dir, suiteName string) (res TestData, err error) { + res.filePathPrefix = filepath.Join(dir, suiteName) + res.input, err = loadTestSuiteCases(fmt.Sprintf("%s_in.json", res.filePathPrefix)) + if err != nil { + return res, err + } + if record { + res.output = make([]testCases, len(res.input)) + for i := range res.input { + res.output[i].Name = res.input[i].Name + } + } else { + res.output, err = loadTestSuiteCases(fmt.Sprintf("%s_out.json", res.filePathPrefix)) + if err != nil { + return res, err + } + if len(res.input) != len(res.output) { + return res, errors.New(fmt.Sprintf("Number of test input cases %d does not match test output cases %d", len(res.input), len(res.output))) + } + } + res.funcMap = make(map[string]int, len(res.input)) + for i, test := range res.input { + res.funcMap[test.Name] = i + if test.Name != res.output[i].Name { + return res, errors.New(fmt.Sprintf("Input name of the %d-case %s does not match output %s", i, test.Name, res.output[i].Name)) + } + } + return res, nil +} + +func loadTestSuiteCases(filePath string) (res []testCases, err error) { + jsonFile, err := os.Open(filePath) + if err != nil { + return res, err + } + defer func() { + if err1 := jsonFile.Close(); err == nil && err1 != nil { + err = err1 + } + }() + byteValue, err := ioutil.ReadAll(jsonFile) + if err != nil { + return res, err + } + // Remove comments, since they are not allowed in json. + re := regexp.MustCompile("(?s)//.*?\n") + err = json.Unmarshal(re.ReplaceAll(byteValue, nil), &res) + return res, err +} + +// GetTestCasesByName gets the test cases for a test function by its name. +func (t *TestData) GetTestCasesByName(caseName string, c *check.C, in interface{}, out interface{}) { + casesIdx, ok := t.funcMap[caseName] + c.Assert(ok, check.IsTrue, check.Commentf("Must get test %s", caseName)) + err := json.Unmarshal(*t.input[casesIdx].Cases, in) + c.Assert(err, check.IsNil) + if !record { + err = json.Unmarshal(*t.output[casesIdx].Cases, out) + c.Assert(err, check.IsNil) + } else { + // Init for generate output file. + inputLen := reflect.ValueOf(in).Elem().Len() + v := reflect.ValueOf(out).Elem() + if v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), inputLen, inputLen)) + } + } + t.output[casesIdx].decodedOut = out +} + +// GetTestCases gets the test cases for a test function. +func (t *TestData) GetTestCases(c *check.C, in interface{}, out interface{}) { + // Extract caller's name. + pc, _, _, ok := runtime.Caller(1) + c.Assert(ok, check.IsTrue) + details := runtime.FuncForPC(pc) + funcNameIdx := strings.LastIndex(details.Name(), ".") + funcName := details.Name()[funcNameIdx+1:] + + casesIdx, ok := t.funcMap[funcName] + c.Assert(ok, check.IsTrue, check.Commentf("Must get test %s", funcName)) + err := json.Unmarshal(*t.input[casesIdx].Cases, in) + c.Assert(err, check.IsNil) + if !record { + err = json.Unmarshal(*t.output[casesIdx].Cases, out) + c.Assert(err, check.IsNil) + } else { + // Init for generate output file. + inputLen := reflect.ValueOf(in).Elem().Len() + v := reflect.ValueOf(out).Elem() + if v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), inputLen, inputLen)) + } + } + t.output[casesIdx].decodedOut = out +} + +// OnRecord execute the function to update result. +func (t *TestData) OnRecord(updateFunc func()) { + if record { + updateFunc() + } +} + +// ConvertRowsToStrings converts [][]interface{} to []string. +func (t *TestData) ConvertRowsToStrings(rows [][]interface{}) (rs []string) { + for _, row := range rows { + s := fmt.Sprintf("%v", row) + // Trim the leftmost `[` and rightmost `]`. + s = s[1 : len(s)-1] + rs = append(rs, s) + } + return rs +} + +// GenerateOutputIfNeeded generate the output file. +func (t *TestData) GenerateOutputIfNeeded() error { + if !record { + return nil + } + + buf := new(bytes.Buffer) + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + enc.SetIndent("", " ") + for i, test := range t.output { + err := enc.Encode(test.decodedOut) + if err != nil { + return err + } + res := make([]byte, len(buf.Bytes())) + copy(res, buf.Bytes()) + buf.Reset() + rm := json.RawMessage(res) + t.output[i].Cases = &rm + } + err := enc.Encode(t.output) + if err != nil { + return err + } + file, err := os.Create(fmt.Sprintf("%s_out.json", t.filePathPrefix)) + if err != nil { + return err + } + defer func() { + if err1 := file.Close(); err == nil && err1 != nil { + err = err1 + } + }() + _, err = file.Write(buf.Bytes()) + return err +} diff --git a/util/testutil/testutil_test.go b/util/testutil/testutil_test.go new file mode 100644 index 0000000..db92db0 --- /dev/null +++ b/util/testutil/testutil_test.go @@ -0,0 +1,51 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package testutil + +import ( + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/util/testleak" +) + +func TestT(t *testing.T) { + CustomVerboseFlag = true + TestingT(t) +} + +var _ = Suite(&testTestUtilSuite{}) + +type testTestUtilSuite struct { +} + +func (s *testTestUtilSuite) TestCompareUnorderedString(c *C) { + defer testleak.AfterTest(c)() + tbl := []struct { + a []string + b []string + r bool + }{ + {[]string{"1", "1", "2"}, []string{"1", "1", "2"}, true}, + {[]string{"1", "1", "2"}, []string{"1", "2", "1"}, true}, + {[]string{"1", "1"}, []string{"1", "2", "1"}, false}, + {[]string{"1", "1", "2"}, []string{"1", "2", "2"}, false}, + {nil, nil, true}, + {[]string{}, nil, false}, + {nil, []string{}, false}, + } + for _, t := range tbl { + c.Assert(CompareUnorderedStringSlice(t.a, t.b), Equals, t.r) + } +}